python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /**************************************/ /* this file adapted from font_8x16.c */ /* by Jurriaan Kalkman 05-2005 */ /**************************************/ #include <linux/font.h> #define FONTDATAMAX 3584 static const struct font_data fontdata_7x14 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 1 0x01 '^A' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x82, /* 1000001 */ 0xaa, /* 1010101 */ 0x82, /* 1000001 */ 0x82, /* 1000001 */ 0xba, /* 1011101 */ 0x92, /* 1001001 */ 0x82, /* 1000001 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 2 0x02 '^B' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xc6, /* 1100011 */ 0xee, /* 1110111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 3 0x03 '^C' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 4 0x04 '^D' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x38, /* 0011100 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 5 0x05 '^E' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x38, /* 0011100 */ 0x38, /* 0011100 */ 0xee, /* 1110111 */ 0xee, /* 1110111 */ 0xee, /* 1110111 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 6 0x06 '^F' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 7 0x07 '^G' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 8 0x08 '^H' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xe6, /* 1110011 */ 0xc2, /* 1100001 */ 0xc2, /* 1100001 */ 0xe6, /* 1110011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 9 0x09 '^I' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x44, /* 0100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 10 0x0a '^J' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xc6, /* 1100011 */ 0x92, /* 1001001 */ 0xba, /* 1011101 */ 0x92, /* 1001001 */ 0xc6, /* 1100011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 11 0x0b '^K' */ 0x00, /* 0000000 */ 0x1e, /* 0001111 */ 0x0e, /* 0000111 */ 0x1a, /* 0001101 */ 0x1a, /* 0001101 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 12 0x0c '^L' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 13 0x0d '^M' */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x36, /* 0011011 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x70, /* 0111000 */ 0xf0, /* 1111000 */ 0xe0, /* 1110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 14 0x0e '^N' */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x66, /* 0110011 */ 0x7e, /* 0111111 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x6e, /* 0110111 */ 0xee, /* 1110111 */ 0xec, /* 1110110 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 15 0x0f '^O' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0xd6, /* 1101011 */ 0x38, /* 0011100 */ 0xee, /* 1110111 */ 0x38, /* 0011100 */ 0xd6, /* 1101011 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 16 0x10 '^P' */ 0x00, /* 0000000 */ 0x80, /* 1000000 */ 0xc0, /* 1100000 */ 0xe0, /* 1110000 */ 0xf0, /* 1111000 */ 0xfc, /* 1111110 */ 0xf0, /* 1111000 */ 0xe0, /* 1110000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 17 0x11 '^Q' */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x1c, /* 0001110 */ 0x3c, /* 0011110 */ 0xfc, /* 1111110 */ 0x3c, /* 0011110 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 18 0x12 '^R' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 19 0x13 '^S' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 20 0x14 '^T' */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0xd4, /* 1101010 */ 0xd4, /* 1101010 */ 0xd4, /* 1101010 */ 0x74, /* 0111010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x16, /* 0001011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 21 0x15 '^U' */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 22 0x16 '^V' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 23 0x17 '^W' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 24 0x18 '^X' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 25 0x19 '^Y' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 26 0x1a '^Z' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0xfc, /* 1111110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 27 0x1b '^[' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xfc, /* 1111110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 28 0x1c '^\' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 29 0x1d '^]' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x28, /* 0010100 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x28, /* 0010100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 30 0x1e '^^' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 31 0x1f '^_' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 32 0x20 ' ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 33 0x21 '!' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 34 0x22 '"' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x28, /* 0010100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 35 0x23 '#' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 36 0x24 '$' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x8c, /* 1000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ /* 37 0x25 '%' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0x8c, /* 1000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 38 0x26 '&' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x78, /* 0111100 */ 0xde, /* 1101111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 39 0x27 ''' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 40 0x28 '(' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 41 0x29 ')' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 42 0x2a '*' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0xfe, /* 1111111 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 43 0x2b '+' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x7c, /* 0111110 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 44 0x2c ',' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 45 0x2d '-' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 46 0x2e '.' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 47 0x2f '/' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 48 0x30 '0' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 49 0x31 '1' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x38, /* 0011100 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 50 0x32 '2' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 51 0x33 '3' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x38, /* 0011100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 52 0x34 '4' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x1c, /* 0001110 */ 0x3c, /* 0011110 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xfe, /* 1111111 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 53 0x35 '5' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf8, /* 1111100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 54 0x36 '6' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 55 0x37 '7' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 56 0x38 '8' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 57 0x39 '9' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 58 0x3a ':' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 59 0x3b ';' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 60 0x3c '<' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 61 0x3d '=' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 62 0x3e '>' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x40, /* 0100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x40, /* 0100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 63 0x3f '?' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 64 0x40 '@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0xdc, /* 1101110 */ 0xd8, /* 1101100 */ 0xc0, /* 1100000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 65 0x41 'A' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 66 0x42 'B' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x78, /* 0111100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 67 0x43 'C' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 68 0x44 'D' */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 69 0x45 'E' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x68, /* 0110100 */ 0x78, /* 0111100 */ 0x68, /* 0110100 */ 0x60, /* 0110000 */ 0x64, /* 0110010 */ 0x6c, /* 0110110 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 70 0x46 'F' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0x68, /* 0110100 */ 0x78, /* 0111100 */ 0x68, /* 0110100 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 71 0x47 'G' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x6c, /* 0110110 */ 0x34, /* 0011010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 72 0x48 'H' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 73 0x49 'I' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 74 0x4a 'J' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 75 0x4b 'K' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 76 0x4c 'L' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 77 0x4d 'M' */ 0x00, /* 0000000 */ 0xc6, /* 1100011 */ 0xee, /* 1110111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 78 0x4e 'N' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xec, /* 1110110 */ 0xec, /* 1110110 */ 0xfc, /* 1111110 */ 0xdc, /* 1101110 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 79 0x4f 'O' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 80 0x50 'P' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 81 0x51 'Q' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ /* 82 0x52 'R' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 83 0x53 'S' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x38, /* 0011100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x8c, /* 1000110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 84 0x54 'T' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xb4, /* 1011010 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 85 0x55 'U' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 86 0x56 'V' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 87 0x57 'W' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x48, /* 0100100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 88 0x58 'X' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 89 0x59 'Y' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 90 0x5a 'Z' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x8c, /* 1000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 91 0x5b '[' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 92 0x5c '\' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x80, /* 1000000 */ 0xc0, /* 1100000 */ 0xe0, /* 1110000 */ 0x70, /* 0111000 */ 0x38, /* 0011100 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 93 0x5d ']' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 94 0x5e '^' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 95 0x5f '_' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ /* 96 0x60 '`' */ 0x00, /* 0000000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 97 0x61 'a' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 98 0x62 'b' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 99 0x63 'c' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 100 0x64 'd' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x3c, /* 0011110 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 101 0x65 'e' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 102 0x66 'f' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 103 0x67 'g' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ /* 104 0x68 'h' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xd8, /* 1101100 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 105 0x69 'i' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 106 0x6a 'j' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ /* 107 0x6b 'k' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 108 0x6c 'l' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 109 0x6d 'm' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 110 0x6e 'n' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 111 0x6f 'o' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 112 0x70 'p' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ /* 113 0x71 'q' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x74, /* 0111010 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ /* 114 0x72 'r' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 115 0x73 's' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 116 0x74 't' */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x36, /* 0011011 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 117 0x75 'u' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 118 0x76 'v' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 119 0x77 'w' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 120 0x78 'x' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 121 0x79 'y' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0xf0, /* 1111000 */ /* 122 0x7a 'z' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 123 0x7b '{' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xe0, /* 1110000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 124 0x7c '|' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 125 0x7d '}' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x0e, /* 0000111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 126 0x7e '~' */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0xb8, /* 1011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 127 0x7f '' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 128 0x80 'Ç' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ /* 129 0x81 'ü' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 130 0x82 'é' */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 131 0x83 'â' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 132 0x84 'ä' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 133 0x85 'à' */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 134 0x86 'å' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 135 0x87 'ç' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xe0, /* 1110000 */ /* 136 0x88 'ê' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 137 0x89 'ë' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 138 0x8a 'è' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 139 0x8b 'ï' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 140 0x8c 'î' */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 141 0x8d 'ì' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 142 0x8e 'Ä' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 143 0x8f 'Å' */ 0x30, /* 0011000 */ 0x48, /* 0100100 */ 0x48, /* 0100100 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 144 0x90 'É' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xc4, /* 1100010 */ 0xd0, /* 1101000 */ 0xf0, /* 1111000 */ 0xd0, /* 1101000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 145 0x91 'æ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0x36, /* 0011011 */ 0x36, /* 0011011 */ 0x7e, /* 0111111 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x6e, /* 0110111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 146 0x92 'Æ' */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfe, /* 1111111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xce, /* 1100111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 147 0x93 'ô' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 148 0x94 'ö' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 149 0x95 'ò' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 150 0x96 'û' */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 151 0x97 'ù' */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 152 0x98 'ÿ' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ /* 153 0x99 'Ö' */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 154 0x9a 'Ü' */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 155 0x9b '¢' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 156 0x9c '£' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0xe6, /* 1110011 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 157 0x9d '¥' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 158 0x9e '₧' */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xde, /* 1101111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 159 0x9f 'ƒ' */ 0x1c, /* 0001110 */ 0x36, /* 0011011 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xb0, /* 1011000 */ 0xe0, /* 1110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 160 0xa0 'á' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 161 0xa1 'í' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 162 0xa2 'ó' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 163 0xa3 'ú' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 164 0xa4 'ñ' */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 165 0xa5 'Ñ' */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xec, /* 1110110 */ 0xec, /* 1110110 */ 0xfc, /* 1111110 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 166 0xa6 'ª' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 167 0xa7 'º' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 168 0xa8 '¿' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 169 0xa9 '⌐' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 170 0xaa '¬' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 171 0xab '½' */ 0x60, /* 0110000 */ 0xe0, /* 1110000 */ 0x62, /* 0110001 */ 0x66, /* 0110011 */ 0x6c, /* 0110110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xb8, /* 1011100 */ 0x4c, /* 0100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x7c, /* 0111110 */ /* 172 0xac '¼' */ 0x60, /* 0110000 */ 0xe0, /* 1110000 */ 0x62, /* 0110001 */ 0x66, /* 0110011 */ 0x6c, /* 0110110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x6c, /* 0110110 */ 0xdc, /* 1101110 */ 0xb4, /* 1011010 */ 0x7e, /* 0111111 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ /* 173 0xad '¡' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 174 0xae '«' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x36, /* 0011011 */ 0x6c, /* 0110110 */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x36, /* 0011011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 175 0xaf '»' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x36, /* 0011011 */ 0x6c, /* 0110110 */ 0xd8, /* 1101100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 176 0xb0 '░' */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ /* 177 0xb1 '▒' */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ /* 178 0xb2 '▓' */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ /* 179 0xb3 '│' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 180 0xb4 '┤' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 181 0xb5 '╡' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 182 0xb6 '╢' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 183 0xb7 '╖' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 184 0xb8 '╕' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 185 0xb9 '╣' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x0c, /* 0000110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 186 0xba '║' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 187 0xbb '╗' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x0c, /* 0000110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 188 0xbc '╝' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x0c, /* 0000110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 189 0xbd '╜' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 190 0xbe '╛' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 191 0xbf '┐' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 192 0xc0 '└' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 193 0xc1 '┴' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 194 0xc2 '┬' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 195 0xc3 '├' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 196 0xc4 '─' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 197 0xc5 '┼' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 198 0xc6 '╞' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 199 0xc7 '╟' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 200 0xc8 '╚' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x60, /* 0110000 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 201 0xc9 '╔' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x60, /* 0110000 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 202 0xca '╩' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 203 0xcb '╦' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xee, /* 1110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 204 0xcc '╠' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x60, /* 0110000 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 205 0xcd '═' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 206 0xce '╬' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0xee, /* 1110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 207 0xcf '╧' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 208 0xd0 '╨' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 209 0xd1 '╤' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 210 0xd2 '╥' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 211 0xd3 '╙' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 212 0xd4 '╘' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 213 0xd5 '╒' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 214 0xd6 '╓' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 215 0xd7 '╫' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 216 0xd8 '╪' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 217 0xd9 '┘' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 218 0xda '┌' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 219 0xdb '█' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 220 0xdc '▄' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 221 0xdd '▌' */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ /* 222 0xde '▐' */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ /* 223 0xdf '▀' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 224 0xe0 'α' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xdc, /* 1101110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 225 0xe1 'ß' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 226 0xe2 'Γ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 227 0xe3 'π' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 228 0xe4 'Σ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 229 0xe5 'σ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 230 0xe6 'µ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ /* 231 0xe7 'τ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 232 0xe8 'Φ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 233 0xe9 'Θ' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 234 0xea 'Ω' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 235 0xeb 'δ' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 236 0xec '∞' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 237 0xed 'φ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x06, /* 0000011 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xe6, /* 1110011 */ 0x7c, /* 0111110 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 238 0xee 'ε' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x7c, /* 0111110 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 239 0xef '∩' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 240 0xf0 '≡' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 241 0xf1 '±' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 242 0xf2 '≥' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 243 0xf3 '≤' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 244 0xf4 '⌠' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x36, /* 0011011 */ 0x36, /* 0011011 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 245 0xf5 '⌡' */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 246 0xf6 '÷' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 247 0xf7 '≈' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 248 0xf8 '°' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 249 0xf9 '·' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 250 0xfa '•' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 251 0xfb '√' */ 0x1e, /* 0001111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x78, /* 0111100 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 252 0xfc 'ⁿ' */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 253 0xfd '²' */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x64, /* 0110010 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 254 0xfe '■' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 255 0xff ' ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ } }; const struct font_desc font_7x14 = { .idx = FONT7x14_IDX, .name = "7x14", .width = 7, .height = 14, .charcount = 256, .data = fontdata_7x14.data, .pref = 0, };
linux-master
lib/fonts/font_7x14.c
// SPDX-License-Identifier: GPL-2.0 /**********************************************/ /* */ /* Font file generated by cpi2fnt */ /* ------------------------------ */ /* Combined with the alphanumeric */ /* portion of Greg Harp's old PEARL */ /* font (from earlier versions of */ /* linux-m86k) by John Shifflett */ /* */ /**********************************************/ #include <linux/font.h> #define FONTDATAMAX 2048 static const struct font_data fontdata_pearl8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x7e, /* 01111110 */ 0x81, /* 10000001 */ 0xa5, /* 10100101 */ 0x81, /* 10000001 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0x81, /* 10000001 */ 0x7e, /* 01111110 */ /* 2 0x02 '^B' */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xdb, /* 11011011 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ /* 3 0x03 '^C' */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ /* 6 0x06 '^F' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xe7, /* 11100111 */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x42, /* 01000010 */ 0x42, /* 01000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0x99, /* 10011001 */ 0xbd, /* 10111101 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0xc3, /* 11000011 */ 0xff, /* 11111111 */ /* 11 0x0b '^K' */ 0x0f, /* 00001111 */ 0x07, /* 00000111 */ 0x0f, /* 00001111 */ 0x7d, /* 01111101 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ /* 12 0x0c '^L' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ /* 13 0x0d '^M' */ 0x3f, /* 00111111 */ 0x33, /* 00110011 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x70, /* 01110000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ /* 14 0x0e '^N' */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x67, /* 01100111 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ /* 15 0x0f '^O' */ 0x18, /* 00011000 */ 0xdb, /* 11011011 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0x3c, /* 00111100 */ 0xdb, /* 11011011 */ 0x18, /* 00011000 */ /* 16 0x10 '^P' */ 0x80, /* 10000000 */ 0xe0, /* 11100000 */ 0xf8, /* 11111000 */ 0xfe, /* 11111110 */ 0xf8, /* 11111000 */ 0xe0, /* 11100000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x02, /* 00000010 */ 0x0e, /* 00001110 */ 0x3e, /* 00111110 */ 0xfe, /* 11111110 */ 0x3e, /* 00111110 */ 0x0e, /* 00001110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ /* 19 0x13 '^S' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x7f, /* 01111111 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7b, /* 01111011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x3e, /* 00111110 */ 0x61, /* 01100001 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x86, /* 10000110 */ 0x7c, /* 01111100 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ /* 24 0x18 '^X' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x24, /* 00100100 */ 0x66, /* 01100110 */ 0xff, /* 11111111 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^_' */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x18, /* 00011000 */ 0x3e, /* 00111110 */ 0x60, /* 01100000 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x68, /* 01101000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0xff, /* 11111111 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x03, /* 00000011 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xfe, /* 11111110 */ 0xf6, /* 11110110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x18, /* 00011000 */ 0x78, /* 01111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x1c, /* 00011100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x1c, /* 00011100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ /* 60 0x3c '<' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xf8, /* 11111000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xf8, /* 11111000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xf0, /* 11110000 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0x82, /* 10000010 */ 0xc6, /* 11000110 */ 0xee, /* 11101110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xf6, /* 11110110 */ 0xde, /* 11011110 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ /* 82 0x52 'R' */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0xee, /* 11101110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0xc3, /* 11000011 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc3, /* 11000011 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x3c, /* 00111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0xc0, /* 11000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x03, /* 00000011 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x3c, /* 00111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ /* 96 0x60 '`' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0x7e, /* 01111110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x7e, /* 01111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x7c, /* 01111100 */ /* 104 0x68 'h' */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ /* 107 0x6b 'k' */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xf0, /* 11110000 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfc, /* 11111100 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x7c, /* 01111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x36, /* 00110110 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc3, /* 11000011 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ /* 124 0x7c '|' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 126 0x7e '~' */ 0x72, /* 01110010 */ 0x9c, /* 10011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 128 0x80 'Ç' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ /* 129 0x81 'ü' */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 130 0x82 'é' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 131 0x83 'â' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 132 0x84 'ä' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 133 0x85 'à' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 134 0x86 'å' */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 135 0x87 'ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x7e, /* 01111110 */ 0x0c, /* 00001100 */ 0x38, /* 00111000 */ /* 136 0x88 'ê' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 137 0x89 'ë' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 138 0x8a 'è' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 139 0x8b 'ï' */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 140 0x8c 'î' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 141 0x8d 'ì' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 142 0x8e 'Ä' */ 0xc6, /* 11000110 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 143 0x8f 'Å' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 144 0x90 'É' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xf8, /* 11111000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 145 0x91 'æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 146 0x92 'Æ' */ 0x3e, /* 00111110 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ /* 147 0x93 'ô' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 148 0x94 'ö' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 149 0x95 'ò' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 150 0x96 'û' */ 0x78, /* 01111000 */ 0x84, /* 10000100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 151 0x97 'ù' */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 152 0x98 'ÿ' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0xfc, /* 11111100 */ /* 153 0x99 'Ö' */ 0xc6, /* 11000110 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 154 0x9a 'Ü' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 155 0x9b '¢' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 156 0x9c '£' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x64, /* 01100100 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 157 0x9d '¥' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 158 0x9e '₧' */ 0xf8, /* 11111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xfa, /* 11111010 */ 0xc6, /* 11000110 */ 0xcf, /* 11001111 */ 0xc6, /* 11000110 */ 0xc7, /* 11000111 */ /* 159 0x9f 'ƒ' */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 160 0xa0 'á' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 161 0xa1 'í' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 162 0xa2 'ó' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 163 0xa3 'ú' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 164 0xa4 'ñ' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ /* 165 0xa5 'Ñ' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ /* 166 0xa6 'ª' */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 'º' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '¿' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x63, /* 01100011 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ /* 169 0xa9 '⌐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa '¬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '½' */ 0x63, /* 01100011 */ 0xe6, /* 11100110 */ 0x6c, /* 01101100 */ 0x7e, /* 01111110 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x0f, /* 00001111 */ /* 172 0xac '¼' */ 0x63, /* 01100011 */ 0xe6, /* 11100110 */ 0x6c, /* 01101100 */ 0x7a, /* 01111010 */ 0x36, /* 00110110 */ 0x6a, /* 01101010 */ 0xdf, /* 11011111 */ 0x06, /* 00000110 */ /* 173 0xad '¡' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 174 0xae '«' */ 0x00, /* 00000000 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x66, /* 01100110 */ 0x33, /* 00110011 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '»' */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x66, /* 01100110 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '░' */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ /* 177 0xb1 '▒' */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ /* 178 0xb2 '▓' */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ /* 179 0xb3 '│' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 180 0xb4 '┤' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 181 0xb5 '╡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 182 0xb6 '╢' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 183 0xb7 '╖' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 184 0xb8 '╕' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 185 0xb9 '╣' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 186 0xba '║' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 187 0xbb '╗' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 188 0xbc '╝' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '╜' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '╛' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '┐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 192 0xc0 '└' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 '┴' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 '┬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 195 0xc3 '├' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 196 0xc4 '─' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 '┼' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 198 0xc6 '╞' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 199 0xc7 '╟' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 200 0xc8 '╚' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 '╔' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 202 0xca '╩' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb '╦' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 204 0xcc '╠' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 205 0xcd '═' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce '╬' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 207 0xcf '╧' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 '╨' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 '╤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 210 0xd2 '╥' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 211 0xd3 '╙' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 '╘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 '╒' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 214 0xd6 '╓' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 215 0xd7 '╫' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 216 0xd8 '╪' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 217 0xd9 '┘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda '┌' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 219 0xdb '█' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 220 0xdc '▄' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 221 0xdd '▌' */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ /* 222 0xde '▐' */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ /* 223 0xdf '▀' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 'α' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xc8, /* 11001000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 225 0xe1 'ß' */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ /* 226 0xe2 'Γ' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 227 0xe3 'π' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 228 0xe4 'Σ' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 229 0xe5 'σ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 230 0xe6 'µ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0xc0, /* 11000000 */ /* 231 0xe7 'τ' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 232 0xe8 'Φ' */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ /* 233 0xe9 'Θ' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 234 0xea 'Ω' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xee, /* 11101110 */ 0x00, /* 00000000 */ /* 235 0xeb 'δ' */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x3e, /* 00111110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 236 0xec '∞' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed 'φ' */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ /* 238 0xee 'ε' */ 0x1e, /* 00011110 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 239 0xef '∩' */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 240 0xf0 '≡' */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 '±' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 242 0xf2 '≥' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 243 0xf3 '≤' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 244 0xf4 '⌠' */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 245 0xf5 '⌡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ /* 246 0xf6 '÷' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '≈' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 '°' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 '·' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa '•' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb '√' */ 0x0f, /* 00001111 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xec, /* 11101100 */ 0x6c, /* 01101100 */ 0x3c, /* 00111100 */ 0x1c, /* 00011100 */ /* 252 0xfc 'ⁿ' */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd '²' */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe '■' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ } }; const struct font_desc font_pearl_8x8 = { .idx = PEARL8x8_IDX, .name = "PEARL8x8", .width = 8, .height = 8, .charcount = 256, .data = fontdata_pearl8x8.data, .pref = 2, };
linux-master
lib/fonts/font_pearl_8x8.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/font.h> #define FONTDATAMAX 2560 static const struct font_data fontdata_6x10 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x84, /* 10000100 */ 0xCC, /* 11001100 */ 0x84, /* 10000100 */ 0xCC, /* 11001100 */ 0xB4, /* 10110100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 2 0x02 '^B' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0xFC, /* 11111100 */ 0xB4, /* 10110100 */ 0xFC, /* 11111100 */ 0xB4, /* 10110100 */ 0xCC, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 3 0x03 '^C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x7C, /* 01111100 */ 0x7C, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7C, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x6C, /* 01101100 */ 0x6C, /* 01101100 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 6 0x06 '^F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7C, /* 01111100 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xCC, /* 11001100 */ 0x84, /* 10000100 */ 0xCC, /* 11001100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x48, /* 01001000 */ 0x84, /* 10000100 */ 0x48, /* 01001000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 10 0x0A '^J' */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xCC, /* 11001100 */ 0xB4, /* 10110100 */ 0x78, /* 01111000 */ 0xB4, /* 10110100 */ 0xCC, /* 11001100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ /* 11 0x0B '^K' */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x14, /* 00010100 */ 0x20, /* 00100000 */ 0x78, /* 01111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 12 0x0C '^L' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 13 0x0D '^M' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x14, /* 00010100 */ 0x14, /* 00010100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x70, /* 01110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 14 0x0E '^N' */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x24, /* 00100100 */ 0x3C, /* 00111100 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x6C, /* 01101100 */ 0x6C, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 15 0x0F '^O' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x6C, /* 01101100 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 16 0x10 '^P' */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x60, /* 01100000 */ 0x70, /* 01110000 */ 0x78, /* 01111000 */ 0x70, /* 01110000 */ 0x60, /* 01100000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x00, /* 00000000 */ 0x04, /* 00000100 */ 0x0C, /* 00001100 */ 0x1C, /* 00011100 */ 0x3C, /* 00111100 */ 0x1C, /* 00011100 */ 0x0C, /* 00001100 */ 0x04, /* 00000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x10, /* 00010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 19 0x13 '^S' */ 0x00, /* 00000000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x00, /* 00000000 */ 0x48, /* 01001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x3C, /* 00111100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x3C, /* 00111100 */ 0x14, /* 00010100 */ 0x14, /* 00010100 */ 0x14, /* 00010100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x20, /* 00100000 */ 0x50, /* 01010000 */ 0x48, /* 01001000 */ 0x24, /* 00100100 */ 0x14, /* 00010100 */ 0x08, /* 00001000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xF8, /* 11111000 */ 0xF8, /* 11111000 */ 0xF8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x10, /* 00010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ /* 24 0x18 '^X' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 26 0x1A '^Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x7C, /* 01111100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1B '^[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x7C, /* 01111100 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1C '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1D '^]' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x48, /* 01001000 */ 0x84, /* 10000100 */ 0xFC, /* 11111100 */ 0x84, /* 10000100 */ 0x48, /* 01001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1E '^^' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7C, /* 01111100 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1F '^_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x7C, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x7C, /* 01111100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x7C, /* 01111100 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x50, /* 01010000 */ 0x38, /* 00111000 */ 0x14, /* 00010100 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0x64, /* 01100100 */ 0x64, /* 01100100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x4C, /* 01001100 */ 0x4C, /* 01001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x48, /* 01001000 */ 0x50, /* 01010000 */ 0x20, /* 00100000 */ 0x54, /* 01010100 */ 0x48, /* 01001000 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x00, /* 00000000 */ /* 42 0x2A '*' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2B '+' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2C ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ /* 45 0x2D '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2E '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 47 0x2F '/' */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ /* 48 0x30 '0' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x54, /* 01010100 */ 0x64, /* 01100100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x30, /* 00110000 */ 0x50, /* 01010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x04, /* 00000100 */ 0x18, /* 00011000 */ 0x04, /* 00000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x00, /* 00000000 */ 0x08, /* 00001000 */ 0x18, /* 00011000 */ 0x28, /* 00101000 */ 0x48, /* 01001000 */ 0x7C, /* 01111100 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x20, /* 00100000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 58 0x3A ':' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 59 0x3B ';' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ /* 60 0x3C '<' */ 0x00, /* 00000000 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x04, /* 00000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 61 0x3D '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3E '>' */ 0x00, /* 00000000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 63 0x3F '?' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x5C, /* 01011100 */ 0x54, /* 01010100 */ 0x5C, /* 01011100 */ 0x40, /* 01000000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x38, /* 00111000 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x5C, /* 01011100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 74 0x4A 'J' */ 0x00, /* 00000000 */ 0x1C, /* 00011100 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 75 0x4B 'K' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x48, /* 01001000 */ 0x50, /* 01010000 */ 0x60, /* 01100000 */ 0x50, /* 01010000 */ 0x48, /* 01001000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 76 0x4C 'L' */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 77 0x4D 'M' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x6C, /* 01101100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 78 0x4E 'N' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x64, /* 01100100 */ 0x54, /* 01010100 */ 0x4C, /* 01001100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 79 0x4F 'O' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x78, /* 01111000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x54, /* 01010100 */ 0x48, /* 01001000 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 82 0x52 'R' */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x78, /* 01111000 */ 0x50, /* 01010000 */ 0x48, /* 01001000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x6C, /* 01101100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 90 0x5A 'Z' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x40, /* 01000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 91 0x5B '[' */ 0x18, /* 00011000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 92 0x5C '\' */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ /* 93 0x5D ']' */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ /* 94 0x5E '^' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5F '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ /* 96 0x60 '`' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x58, /* 01011000 */ 0x64, /* 01100100 */ 0x44, /* 01000100 */ 0x64, /* 01100100 */ 0x58, /* 01011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x00, /* 00000000 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ 0x34, /* 00110100 */ 0x4C, /* 01001100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x00, /* 00000000 */ 0x0C, /* 00001100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x34, /* 00110100 */ 0x4C, /* 01001100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x04, /* 00000100 */ 0x38, /* 00111000 */ /* 104 0x68 'h' */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 106 0x6A 'j' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x60, /* 01100000 */ /* 107 0x6B 'k' */ 0x00, /* 00000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x48, /* 01001000 */ 0x50, /* 01010000 */ 0x70, /* 01110000 */ 0x48, /* 01001000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 108 0x6C 'l' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 109 0x6D 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x68, /* 01101000 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 110 0x6E 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x58, /* 01011000 */ 0x64, /* 01100100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 111 0x6F 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x58, /* 01011000 */ 0x64, /* 01100100 */ 0x44, /* 01000100 */ 0x64, /* 01100100 */ 0x58, /* 01011000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x34, /* 00110100 */ 0x4C, /* 01001100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x58, /* 01011000 */ 0x64, /* 01100100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x40, /* 01000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x0C, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x04, /* 00000100 */ 0x38, /* 00111000 */ /* 122 0x7A 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 123 0x7B '{' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ /* 124 0x7C '|' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 125 0x7D '}' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x00, /* 00000000 */ /* 126 0x7E '~' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x20, /* 00100000 */ 0x54, /* 01010100 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7F '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 128 0x80 '\200' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ /* 129 0x81 '\201' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 130 0x82 '\202' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 131 0x83 '\203' */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 132 0x84 '\204' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 133 0x85 '\205' */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 134 0x86 '\206' */ 0x18, /* 00011000 */ 0x24, /* 00100100 */ 0x18, /* 00011000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 135 0x87 '\207' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ /* 136 0x88 '\210' */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 137 0x89 '\211' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 138 0x8A '\212' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 139 0x8B '\213' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 140 0x8C '\214' */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 141 0x8D '\215' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 142 0x8E '\216' */ 0x44, /* 01000100 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 143 0x8F '\217' */ 0x30, /* 00110000 */ 0x48, /* 01001000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 144 0x90 '\220' */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x78, /* 01111000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 145 0x91 '\221' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x14, /* 00010100 */ 0x7C, /* 01111100 */ 0x50, /* 01010000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 146 0x92 '\222' */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x50, /* 01010000 */ 0x50, /* 01010000 */ 0x78, /* 01111000 */ 0x50, /* 01010000 */ 0x50, /* 01010000 */ 0x5C, /* 01011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 147 0x93 '\223' */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 148 0x94 '\224' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 149 0x95 '\225' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 150 0x96 '\226' */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 151 0x97 '\227' */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 152 0x98 '\230' */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x04, /* 00000100 */ 0x38, /* 00111000 */ /* 153 0x99 '\231' */ 0x84, /* 10000100 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 154 0x9A '\232' */ 0x88, /* 10001000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 155 0x9B '\233' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x50, /* 01010000 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 156 0x9C '\234' */ 0x30, /* 00110000 */ 0x48, /* 01001000 */ 0x40, /* 01000000 */ 0x70, /* 01110000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 157 0x9D '\235' */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 158 0x9E '\236' */ 0x00, /* 00000000 */ 0x70, /* 01110000 */ 0x48, /* 01001000 */ 0x70, /* 01110000 */ 0x48, /* 01001000 */ 0x5C, /* 01011100 */ 0x48, /* 01001000 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 159 0x9F '\237' */ 0x00, /* 00000000 */ 0x0C, /* 00001100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 160 0xA0 '\240' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 161 0xA1 '\241' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 162 0xA2 '\242' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 163 0xA3 '\243' */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x4C, /* 01001100 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 164 0xA4 '\244' */ 0x34, /* 00110100 */ 0x58, /* 01011000 */ 0x00, /* 00000000 */ 0x58, /* 01011000 */ 0x64, /* 01100100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 165 0xA5 '\245' */ 0x58, /* 01011000 */ 0x44, /* 01000100 */ 0x64, /* 01100100 */ 0x54, /* 01010100 */ 0x4C, /* 01001100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 166 0xA6 '\246' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x04, /* 00000100 */ 0x3C, /* 00111100 */ 0x44, /* 01000100 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xA7 '\247' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xA8 '\250' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x40, /* 01000000 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 169 0xA9 '\251' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xAA '\252' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x04, /* 00000100 */ 0x04, /* 00000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xAB '\253' */ 0x20, /* 00100000 */ 0x60, /* 01100000 */ 0x24, /* 00100100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x44, /* 01000100 */ 0x08, /* 00001000 */ 0x1C, /* 00011100 */ 0x00, /* 00000000 */ /* 172 0xAC '\254' */ 0x20, /* 00100000 */ 0x60, /* 01100000 */ 0x24, /* 00100100 */ 0x28, /* 00101000 */ 0x10, /* 00010000 */ 0x28, /* 00101000 */ 0x58, /* 01011000 */ 0x3C, /* 00111100 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ /* 173 0xAD '\255' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 174 0xAE '\256' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x24, /* 00100100 */ 0x48, /* 01001000 */ 0x90, /* 10010000 */ 0x48, /* 01001000 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xAF '\257' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x90, /* 10010000 */ 0x48, /* 01001000 */ 0x24, /* 00100100 */ 0x48, /* 01001000 */ 0x90, /* 10010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xB0 '\260' */ 0x10, /* 00010000 */ 0x44, /* 01000100 */ 0x10, /* 00010000 */ 0x44, /* 01000100 */ 0x10, /* 00010000 */ 0x44, /* 01000100 */ 0x10, /* 00010000 */ 0x44, /* 01000100 */ 0x10, /* 00010000 */ 0x44, /* 01000100 */ /* 177 0xB1 '\261' */ 0xA8, /* 10101000 */ 0x54, /* 01010100 */ 0xA8, /* 10101000 */ 0x54, /* 01010100 */ 0xA8, /* 10101000 */ 0x54, /* 01010100 */ 0xA8, /* 10101000 */ 0x54, /* 01010100 */ 0xA8, /* 10101000 */ 0x54, /* 01010100 */ /* 178 0xB2 '\262' */ 0xDC, /* 11011100 */ 0x74, /* 01110100 */ 0xDC, /* 11011100 */ 0x74, /* 01110100 */ 0xDC, /* 11011100 */ 0x74, /* 01110100 */ 0xDC, /* 11011100 */ 0x74, /* 01110100 */ 0xDC, /* 11011100 */ 0x74, /* 01110100 */ /* 179 0xB3 '\263' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 180 0xB4 '\264' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 181 0xB5 '\265' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 182 0xB6 '\266' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xE8, /* 11101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 183 0xB7 '\267' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xF8, /* 11111000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 184 0xB8 '\270' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 185 0xB9 '\271' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xE8, /* 11101000 */ 0x08, /* 00001000 */ 0xE8, /* 11101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 186 0xBA '\272' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 187 0xBB '\273' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xF8, /* 11111000 */ 0x08, /* 00001000 */ 0xE8, /* 11101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 188 0xBC '\274' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xE8, /* 11101000 */ 0x08, /* 00001000 */ 0xF8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xBD '\275' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xF8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xBE '\276' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xBF '\277' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xF0, /* 11110000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 192 0xC0 '\300' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xC1 '\301' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xC2 '\302' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 195 0xC3 '\303' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 196 0xC4 '\304' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xC5 '\305' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xFC, /* 11111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 198 0xC6 '\306' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 199 0xC7 '\307' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x2C, /* 00101100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 200 0xC8 '\310' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x2C, /* 00101100 */ 0x20, /* 00100000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xC9 '\311' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x20, /* 00100000 */ 0x2C, /* 00101100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 202 0xCA '\312' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xEC, /* 11101100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xCB '\313' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xEC, /* 11101100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 204 0xCC '\314' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x2C, /* 00101100 */ 0x20, /* 00100000 */ 0x2C, /* 00101100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 205 0xCD '\315' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xCE '\316' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xEC, /* 11101100 */ 0x00, /* 00000000 */ 0xEC, /* 11101100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 207 0xCF '\317' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xD0 '\320' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xD1 '\321' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 210 0xD2 '\322' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 211 0xD3 '\323' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xD4 '\324' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xD5 '\325' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 214 0xD6 '\326' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 215 0xD7 '\327' */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0xFC, /* 11111100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ /* 216 0xD8 '\330' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xFC, /* 11111100 */ 0x10, /* 00010000 */ 0xFC, /* 11111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 217 0xD9 '\331' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0xF0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xDA '\332' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1C, /* 00011100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 219 0xDB '\333' */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ /* 220 0xDC '\334' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ /* 221 0xDD '\335' */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ 0xE0, /* 11100000 */ /* 222 0xDE '\336' */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ 0x1C, /* 00011100 */ /* 223 0xDF '\337' */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xE0 '\340' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x34, /* 00110100 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x34, /* 00110100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 225 0xE1 '\341' */ 0x18, /* 00011000 */ 0x24, /* 00100100 */ 0x44, /* 01000100 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x58, /* 01011000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ /* 226 0xE2 '\342' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 227 0xE3 '\343' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 228 0xE4 '\344' */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x24, /* 00100100 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x24, /* 00100100 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 229 0xE5 '\345' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 230 0xE6 '\346' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x74, /* 01110100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ /* 231 0xE7 '\347' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x0C, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 232 0xE8 '\350' */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 233 0xE9 '\351' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x7C, /* 01111100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 234 0xEA '\352' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x28, /* 00101000 */ 0x6C, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 235 0xEB '\353' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x20, /* 00100000 */ 0x18, /* 00011000 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x24, /* 00100100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 236 0xEC '\354' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xED '\355' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x04, /* 00000100 */ 0x38, /* 00111000 */ 0x54, /* 01010100 */ 0x54, /* 01010100 */ 0x38, /* 00111000 */ 0x40, /* 01000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 238 0xEE '\356' */ 0x00, /* 00000000 */ 0x3C, /* 00111100 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x38, /* 00111000 */ 0x40, /* 01000000 */ 0x40, /* 01000000 */ 0x3C, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 239 0xEF '\357' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x44, /* 01000100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 240 0xF0 '\360' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0xFC, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xF1 '\361' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x7C, /* 01111100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 242 0xF2 '\362' */ 0x00, /* 00000000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 243 0xF3 '\363' */ 0x00, /* 00000000 */ 0x08, /* 00001000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x10, /* 00010000 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 244 0xF4 '\364' */ 0x00, /* 00000000 */ 0x0C, /* 00001100 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ /* 245 0xF5 '\365' */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x10, /* 00010000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ /* 246 0xF6 '\366' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x7C, /* 01111100 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xF7 '\367' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x20, /* 00100000 */ 0x54, /* 01010100 */ 0x08, /* 00001000 */ 0x20, /* 00100000 */ 0x54, /* 01010100 */ 0x08, /* 00001000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xF8 '\370' */ 0x30, /* 00110000 */ 0x48, /* 01001000 */ 0x48, /* 01001000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xF9 '\371' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xFA '\372' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xFB '\373' */ 0x00, /* 00000000 */ 0x04, /* 00000100 */ 0x08, /* 00001000 */ 0x08, /* 00001000 */ 0x50, /* 01010000 */ 0x50, /* 01010000 */ 0x20, /* 00100000 */ 0x20, /* 00100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 252 0xFC '\374' */ 0x60, /* 01100000 */ 0x50, /* 01010000 */ 0x50, /* 01010000 */ 0x50, /* 01010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xFD '\375' */ 0x60, /* 01100000 */ 0x10, /* 00010000 */ 0x20, /* 00100000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xFE '\376' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xFF '\377' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ } }; const struct font_desc font_6x10 = { .idx = FONT6x10_IDX, .name = "6x10", .width = 6, .height = 10, .charcount = 256, .data = fontdata_6x10.data, .pref = 0, };
linux-master
lib/fonts/font_6x10.c
// SPDX-License-Identifier: GPL-2.0 /* Acorn-like font definition, with PC graphics characters */ #include <linux/font.h> #define FONTDATAMAX 2048 static const struct font_data acorndata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ /* 03 */ 0x6c, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^C */ /* 04 */ 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^D */ /* 05 */ 0x00, 0x18, 0x3c, 0xe7, 0xe7, 0x3c, 0x18, 0x00, /* ^E */ /* 06 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 07 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 08 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 09 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 10 */ 0x00, 0x60, 0x78, 0x7e, 0x7e, 0x78, 0x60, 0x00, /* |> */ /* 11 */ 0x00, 0x06, 0x1e, 0x7e, 0x7e, 0x1e, 0x06, 0x00, /* <| */ /* 12 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 13 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 14 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 15 */ 0x3c, 0x60, 0x3c, 0x66, 0x3c, 0x06, 0x3c, 0x00, /* 16 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 17 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 18 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 19 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1E */ 0x00, 0x18, 0x18, 0x3c, 0x3c, 0x7e, 0x7e, 0x00, /* /\ */ /* 1F */ 0x00, 0x7e, 0x7e, 0x3c, 0x3c, 0x18, 0x18, 0x00, /* \/ */ /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* */ /* 21 */ 0x18, 0x3c, 0x3c, 0x18, 0x18, 0x00, 0x18, 0x00, /* ! */ /* 22 */ 0x6C, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, /* " */ /* 23 */ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00, /* # */ /* 24 */ 0x0C, 0x3F, 0x68, 0x3E, 0x0B, 0x7E, 0x18, 0x00, /* $ */ /* 25 */ 0x60, 0x66, 0x0C, 0x18, 0x30, 0x66, 0x06, 0x00, /* % */ /* 26 */ 0x38, 0x6C, 0x6C, 0x38, 0x6D, 0x66, 0x3B, 0x00, /* & */ /* 27 */ 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, /* ' */ /* 28 */ 0x0C, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x00, /* ( */ /* 29 */ 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x18, 0x30, 0x00, /* ) */ /* 2A */ 0x00, 0x18, 0x7E, 0x3C, 0x7E, 0x18, 0x00, 0x00, /* * */ /* 2B */ 0x00, 0x18, 0x18, 0x7E, 0x18, 0x18, 0x00, 0x00, /* + */ /* 2C */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, /* , */ /* 2D */ 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, /* - */ /* 2E */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, /* . */ /* 2F */ 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00, 0x00, /* / */ /* 30 */ 0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x3C, 0x00, /* 0 */ /* 31 */ 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* 1 */ /* 32 */ 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* 2 */ /* 33 */ 0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, /* 3 */ /* 34 */ 0x0C, 0x1C, 0x3C, 0x6C, 0x7E, 0x0C, 0x0C, 0x00, /* 4 */ /* 35 */ 0x7E, 0x60, 0x7C, 0x06, 0x06, 0x66, 0x3C, 0x00, /* 5 */ /* 36 */ 0x1C, 0x30, 0x60, 0x7C, 0x66, 0x66, 0x3C, 0x00, /* 6 */ /* 37 */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, /* 7 */ /* 38 */ 0x3C, 0x66, 0x66, 0x3C, 0x66, 0x66, 0x3C, 0x00, /* 8 */ /* 39 */ 0x3C, 0x66, 0x66, 0x3E, 0x06, 0x0C, 0x38, 0x00, /* 9 */ /* 3A */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, /* : */ /* 3B */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30, /* ; */ /* 3C */ 0x0C, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0C, 0x00, /* < */ /* 3D */ 0x00, 0x00, 0x7E, 0x00, 0x7E, 0x00, 0x00, 0x00, /* = */ /* 3E */ 0x30, 0x18, 0x0C, 0x06, 0x0C, 0x18, 0x30, 0x00, /* > */ /* 3F */ 0x3C, 0x66, 0x0C, 0x18, 0x18, 0x00, 0x18, 0x00, /* ? */ /* 40 */ 0x3C, 0x66, 0x6E, 0x6A, 0x6E, 0x60, 0x3C, 0x00, /* @ */ /* 41 */ 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* A */ /* 42 */ 0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, /* B */ /* 43 */ 0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, /* C */ /* 44 */ 0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, /* D */ /* 45 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, /* E */ /* 46 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x60, 0x00, /* F */ /* 47 */ 0x3C, 0x66, 0x60, 0x6E, 0x66, 0x66, 0x3C, 0x00, /* G */ /* 48 */ 0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* H */ /* 49 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* I */ /* 4A */ 0x3E, 0x0C, 0x0C, 0x0C, 0x0C, 0x6C, 0x38, 0x00, /* J */ /* 4B */ 0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, /* K */ /* 4C */ 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, /* L */ /* 4D */ 0x63, 0x77, 0x7F, 0x6B, 0x6B, 0x63, 0x63, 0x00, /* M */ /* 4E */ 0x66, 0x66, 0x76, 0x7E, 0x6E, 0x66, 0x66, 0x00, /* N */ /* 4F */ 0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* O */ /* 50 */ 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, /* P */ /* 51 */ 0x3C, 0x66, 0x66, 0x66, 0x6A, 0x6C, 0x36, 0x00, /* Q */ /* 52 */ 0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, /* R */ /* 53 */ 0x3C, 0x66, 0x60, 0x3C, 0x06, 0x66, 0x3C, 0x00, /* S */ /* 54 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* T */ /* 55 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* U */ /* 56 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* V */ /* 57 */ 0x63, 0x63, 0x6B, 0x6B, 0x7F, 0x77, 0x63, 0x00, /* W */ /* 58 */ 0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, /* X */ /* 59 */ 0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, /* Y */ /* 5A */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x7E, 0x00, /* Z */ /* 5B */ 0x7C, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7C, 0x00, /* [ */ /* 5C */ 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x00, 0x00, /* \ */ /* 5D */ 0x3E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3E, 0x00, /* ] */ /* 5E */ 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^ */ /* 5F */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, /* _ */ /* 60 */ 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ` */ /* 61 */ 0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3E, 0x00, /* a */ /* 62 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x7C, 0x00, /* b */ /* 63 */ 0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, /* c */ /* 64 */ 0x06, 0x06, 0x3E, 0x66, 0x66, 0x66, 0x3E, 0x00, /* d */ /* 65 */ 0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, /* e */ /* 66 */ 0x1C, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x00, /* f */ /* 67 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* g */ /* 68 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* h */ /* 69 */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3C, 0x00, /* i */ /* 6A */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, /* j */ /* 6B */ 0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, /* k */ /* 6C */ 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x00, /* l */ /* 6D */ 0x00, 0x00, 0x36, 0x7F, 0x6B, 0x6B, 0x63, 0x00, /* m */ /* 6E */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* n */ /* 6F */ 0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, /* o */ /* 70 */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, /* p */ /* 71 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x07, /* q */ /* 72 */ 0x00, 0x00, 0x6C, 0x76, 0x60, 0x60, 0x60, 0x00, /* r */ /* 73 */ 0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, /* s */ /* 74 */ 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x1C, 0x00, /* t */ /* 75 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, /* u */ /* 76 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* v */ /* 77 */ 0x00, 0x00, 0x63, 0x6B, 0x6B, 0x7F, 0x36, 0x00, /* w */ /* 78 */ 0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, /* x */ /* 79 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* y */ /* 7A */ 0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* z */ /* 7B */ 0x0C, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0C, 0x00, /* { */ /* 7C */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* | */ /* 7D */ 0x30, 0x18, 0x18, 0x0E, 0x18, 0x18, 0x30, 0x00, /* } */ /* 7E */ 0x31, 0x6B, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, /* ~ */ /* 7F */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /*  */ /* 80 */ 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x30, 0x60, /* 81 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 82 */ 0x0c, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 83 */ 0x18, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 84 */ 0x66, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 85 */ 0x30, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 86 */ 0x3c, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 87 */ 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x60, /* 88 */ 0x3c, 0x66, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 89 */ 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 8A */ 0x30, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 8B */ 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8C */ 0x3c, 0x66, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8D */ 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8E */ 0x66, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00, /* 8F */ 0x18, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00, /* 90 */ 0x0c, 0x18, 0x7e, 0x60, 0x7c, 0x60, 0x7e, 0x00, /* 91 */ 0x00, 0x00, 0x3f, 0x0d, 0x3f, 0x6c, 0x3f, 0x00, /* 92 */ 0x3f, 0x66, 0x66, 0x7f, 0x66, 0x66, 0x67, 0x00, /* 93 */ 0x3c, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 94 */ 0x66, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 95 */ 0x30, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 96 */ 0x3c, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 97 */ 0x30, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 98 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c, /* 99 */ 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00, /* 9A */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00, /* 9B */ 0x08, 0x3e, 0x6b, 0x68, 0x6b, 0x3e, 0x08, 0x00, /* 9C */ 0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00, /* 9D */ 0x66, 0x3c, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, /* 9E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 9F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* A0 */ 0x0c, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* A1 */ 0x0c, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* A2 */ 0x0c, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* A3 */ 0x0c, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* A4 */ 0x36, 0x6c, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x00, /* A5 */ 0x36, 0x6c, 0x00, 0x66, 0x76, 0x6e, 0x66, 0x00, /* A6 */ 0x1c, 0x06, 0x1e, 0x36, 0x1e, 0x00, 0x3e, 0x00, /* A7 */ 0x1c, 0x36, 0x36, 0x36, 0x1c, 0x00, 0x3e, 0x00, /* A8 */ 0x18, 0x00, 0x18, 0x18, 0x30, 0x66, 0x3c, 0x00, /* A9 */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* AA */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* AB */ 0x40, 0xc0, 0x40, 0x4f, 0x41, 0x0f, 0x08, 0x0f, /* AC */ 0x40, 0xc0, 0x40, 0x48, 0x48, 0x0a, 0x0f, 0x02, /* AD */ 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* AE */ 0x00, 0x33, 0x66, 0xcc, 0xcc, 0x66, 0x33, 0x00, /* AF */ 0x00, 0xcc, 0x66, 0x33, 0x33, 0x66, 0xcc, 0x00, /* B0 */ 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, /* B1 */ 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, /* B2 */ 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, /* B3 */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, /* B4 */ 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, /* B5 */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, /* B6 */ 0x66, 0x66, 0x66, 0xe6, 0x66, 0x66, 0x66, 0x66, /* B7 */ 0x00, 0x00, 0x00, 0xfe, 0x66, 0x66, 0x66, 0x66, /* B8 */ 0x00, 0x00, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, /* B9 */ 0x66, 0x66, 0xe6, 0x06, 0xe6, 0x66, 0x66, 0x66, /* BA */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* BB */ 0x00, 0x00, 0xfe, 0x06, 0xe6, 0x66, 0x66, 0x66, /* BC */ 0x66, 0x66, 0xe6, 0x06, 0xfe, 0x00, 0x00, 0x00, /* BD */ 0x66, 0x66, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, /* BE */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x00, 0x00, 0x00, /* BF */ 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, /* C0 */ 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, /* C1 */ 0x18, 0x18, 0x18, 0xff, 0x00, 0x00, 0x00, 0x00, /* C2 */ 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, /* C3 */ 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, /* C4 */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, /* C5 */ 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, /* C6 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, /* C7 */ 0x66, 0x66, 0x66, 0x67, 0x66, 0x66, 0x66, 0x66, /* C8 */ 0x66, 0x66, 0x67, 0x60, 0x7f, 0x00, 0x00, 0x00, /* C9 */ 0x00, 0x00, 0x7f, 0x60, 0x67, 0x66, 0x66, 0x66, /* CA */ 0x66, 0x66, 0xe7, 0x00, 0xff, 0x00, 0x00, 0x00, /* CB */ 0x00, 0x00, 0xff, 0x00, 0xe7, 0x66, 0x66, 0x66, /* CC */ 0x66, 0x66, 0x67, 0x60, 0x67, 0x66, 0x66, 0x66, /* CD */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, /* CE */ 0x66, 0x66, 0xe7, 0x00, 0xe7, 0x66, 0x66, 0x66, /* CF */ 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, /* D0 */ 0x66, 0x66, 0x66, 0xff, 0x00, 0x00, 0x00, 0x00, /* D1 */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, /* D2 */ 0x00, 0x00, 0x00, 0xff, 0x66, 0x66, 0x66, 0x66, /* D3 */ 0x66, 0x66, 0x66, 0x7f, 0x00, 0x00, 0x00, 0x00, /* D4 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, /* D5 */ 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, /* D6 */ 0x00, 0x00, 0x00, 0x7f, 0x66, 0x66, 0x66, 0x66, /* D7 */ 0x66, 0x66, 0x66, 0xff, 0x66, 0x66, 0x66, 0x66, /* D8 */ 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, /* D9 */ 0x18, 0x18, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00, /* DA */ 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, /* DB */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* DC */ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, /* DD */ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, /* DE */ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, /* DF */ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, /* E0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E1 */ 0x3c, 0x66, 0x66, 0x6c, 0x66, 0x66, 0x6c, 0xc0, /* E2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E6 */ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x3e, 0x60, /* E7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E8 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EA */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* ED */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EF */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F1 */ 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x7e, 0x00, /* F2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F6 */ 0x00, 0x18, 0x00, 0xff, 0x00, 0x18, 0x00, 0x00, /* F7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F8 */ 0x3c, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, /* F9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FA */ 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, /* FB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } }; const struct font_desc font_acorn_8x8 = { .idx = ACORN8x8_IDX, .name = "Acorn8x8", .width = 8, .height = 8, .charcount = 256, .data = acorndata_8x8.data, #ifdef CONFIG_ARCH_ACORN .pref = 20, #else .pref = 0, #endif };
linux-master
lib/fonts/font_acorn_8x8.c
// SPDX-License-Identifier: GPL-2.0 /**********************************************/ /* */ /* Font file generated by cpi2fnt */ /* */ /**********************************************/ #include <linux/font.h> #define FONTDATAMAX 2048 static const struct font_data fontdata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x7e, /* 01111110 */ 0x81, /* 10000001 */ 0xa5, /* 10100101 */ 0x81, /* 10000001 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0x81, /* 10000001 */ 0x7e, /* 01111110 */ /* 2 0x02 '^B' */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xdb, /* 11011011 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ /* 3 0x03 '^C' */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ /* 6 0x06 '^F' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xe7, /* 11100111 */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x42, /* 01000010 */ 0x42, /* 01000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0x99, /* 10011001 */ 0xbd, /* 10111101 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0xc3, /* 11000011 */ 0xff, /* 11111111 */ /* 11 0x0b '^K' */ 0x0f, /* 00001111 */ 0x07, /* 00000111 */ 0x0f, /* 00001111 */ 0x7d, /* 01111101 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ /* 12 0x0c '^L' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ /* 13 0x0d '^M' */ 0x3f, /* 00111111 */ 0x33, /* 00110011 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x70, /* 01110000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ /* 14 0x0e '^N' */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x67, /* 01100111 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ /* 15 0x0f '^O' */ 0x18, /* 00011000 */ 0xdb, /* 11011011 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0x3c, /* 00111100 */ 0xdb, /* 11011011 */ 0x18, /* 00011000 */ /* 16 0x10 '^P' */ 0x80, /* 10000000 */ 0xe0, /* 11100000 */ 0xf8, /* 11111000 */ 0xfe, /* 11111110 */ 0xf8, /* 11111000 */ 0xe0, /* 11100000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x02, /* 00000010 */ 0x0e, /* 00001110 */ 0x3e, /* 00111110 */ 0xfe, /* 11111110 */ 0x3e, /* 00111110 */ 0x0e, /* 00001110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ /* 19 0x13 '^S' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x7f, /* 01111111 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7b, /* 01111011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x3e, /* 00111110 */ 0x61, /* 01100001 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x86, /* 10000110 */ 0x7c, /* 01111100 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ /* 24 0x18 '^X' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x24, /* 00100100 */ 0x66, /* 01100110 */ 0xff, /* 11111111 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^_' */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x18, /* 00011000 */ 0x3e, /* 00111110 */ 0x60, /* 01100000 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0xff, /* 11111111 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x18, /* 00011000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x1c, /* 00011100 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x1c, /* 00011100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ /* 60 0x3c '<' */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xc0, /* 11000000 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0xf8, /* 11111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0xfe, /* 11111110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x62, /* 01100010 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0xfe, /* 11111110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xce, /* 11001110 */ 0x66, /* 01100110 */ 0x3a, /* 00111010 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x1e, /* 00011110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0xe6, /* 11100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0xc6, /* 11000110 */ 0xee, /* 11101110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xce, /* 11001110 */ 0x7c, /* 01111100 */ 0x0e, /* 00001110 */ /* 82 0x52 'R' */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x5a, /* 01011010 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x8c, /* 10001100 */ 0x18, /* 00011000 */ 0x32, /* 00110010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x3c, /* 00111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0xc0, /* 11000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x3c, /* 00111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ /* 96 0x60 '`' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x7c, /* 01111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x1c, /* 00011100 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x60, /* 01100000 */ 0xf8, /* 11111000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0xf8, /* 11111000 */ /* 104 0x68 'h' */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x6c, /* 01101100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ /* 107 0x6b 'k' */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0xfc, /* 11111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x36, /* 00110110 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0xfc, /* 11111100 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x4c, /* 01001100 */ 0x18, /* 00011000 */ 0x32, /* 00110010 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ /* 124 0x7c '|' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 126 0x7e '~' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 128 0x80 'Ç' */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ /* 129 0x81 'ü' */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 130 0x82 'é' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 131 0x83 'â' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 132 0x84 'ä' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 133 0x85 'à' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 134 0x86 'å' */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 135 0x87 'ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x7e, /* 01111110 */ 0x0c, /* 00001100 */ 0x38, /* 00111000 */ /* 136 0x88 'ê' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 137 0x89 'ë' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 138 0x8a 'è' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 139 0x8b 'ï' */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 140 0x8c 'î' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 141 0x8d 'ì' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 142 0x8e 'Ä' */ 0xc6, /* 11000110 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 143 0x8f 'Å' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 144 0x90 'É' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xf8, /* 11111000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 145 0x91 'æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 146 0x92 'Æ' */ 0x3e, /* 00111110 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ /* 147 0x93 'ô' */ 0x7c, /* 01111100 */ 0x82, /* 10000010 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 148 0x94 'ö' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 149 0x95 'ò' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 150 0x96 'û' */ 0x78, /* 01111000 */ 0x84, /* 10000100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 151 0x97 'ù' */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 152 0x98 'ÿ' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0xfc, /* 11111100 */ /* 153 0x99 'Ö' */ 0xc6, /* 11000110 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 154 0x9a 'Ü' */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 155 0x9b '¢' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 156 0x9c '£' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x64, /* 01100100 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ /* 157 0x9d '¥' */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 158 0x9e '₧' */ 0xf8, /* 11111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xfa, /* 11111010 */ 0xc6, /* 11000110 */ 0xcf, /* 11001111 */ 0xc6, /* 11000110 */ 0xc7, /* 11000111 */ /* 159 0x9f 'ƒ' */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 160 0xa0 'á' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 161 0xa1 'í' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 162 0xa2 'ó' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ /* 163 0xa3 'ú' */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 164 0xa4 'ñ' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ /* 165 0xa5 'Ñ' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ /* 166 0xa6 'ª' */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 'º' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '¿' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x63, /* 01100011 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ /* 169 0xa9 '⌐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa '¬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '½' */ 0x63, /* 01100011 */ 0xe6, /* 11100110 */ 0x6c, /* 01101100 */ 0x7e, /* 01111110 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x0f, /* 00001111 */ /* 172 0xac '¼' */ 0x63, /* 01100011 */ 0xe6, /* 11100110 */ 0x6c, /* 01101100 */ 0x7a, /* 01111010 */ 0x36, /* 00110110 */ 0x6a, /* 01101010 */ 0xdf, /* 11011111 */ 0x06, /* 00000110 */ /* 173 0xad '¡' */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 174 0xae '«' */ 0x00, /* 00000000 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x66, /* 01100110 */ 0x33, /* 00110011 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '»' */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x66, /* 01100110 */ 0x33, /* 00110011 */ 0x66, /* 01100110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '░' */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ 0x22, /* 00100010 */ 0x88, /* 10001000 */ /* 177 0xb1 '▒' */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ /* 178 0xb2 '▓' */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ /* 179 0xb3 '│' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 180 0xb4 '┤' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 181 0xb5 '╡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 182 0xb6 '╢' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 183 0xb7 '╖' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 184 0xb8 '╕' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 185 0xb9 '╣' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 186 0xba '║' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 187 0xbb '╗' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 188 0xbc '╝' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '╜' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '╛' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '┐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 192 0xc0 '└' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 '┴' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 '┬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 195 0xc3 '├' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 196 0xc4 '─' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 '┼' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 198 0xc6 '╞' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 199 0xc7 '╟' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 200 0xc8 '╚' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 '╔' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 202 0xca '╩' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb '╦' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 204 0xcc '╠' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 205 0xcd '═' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce '╬' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 207 0xcf '╧' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 '╨' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 '╤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 210 0xd2 '╥' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 211 0xd3 '╙' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 '╘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 '╒' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 214 0xd6 '╓' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 215 0xd7 '╫' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 216 0xd8 '╪' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 217 0xd9 '┘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda '┌' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 219 0xdb '█' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 220 0xdc '▄' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 221 0xdd '▌' */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ /* 222 0xde '▐' */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ /* 223 0xdf '▀' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 'α' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xc8, /* 11001000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ /* 225 0xe1 'ß' */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ /* 226 0xe2 'Γ' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 227 0xe3 'π' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ /* 228 0xe4 'Σ' */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ /* 229 0xe5 'σ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ /* 230 0xe6 'µ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0xc0, /* 11000000 */ /* 231 0xe7 'τ' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ /* 232 0xe8 'Φ' */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ /* 233 0xe9 'Θ' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ /* 234 0xea 'Ω' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xee, /* 11101110 */ 0x00, /* 00000000 */ /* 235 0xeb 'δ' */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x3e, /* 00111110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 236 0xec '∞' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed 'φ' */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ /* 238 0xee 'ε' */ 0x1e, /* 00011110 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 239 0xef '∩' */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ /* 240 0xf0 '≡' */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 '±' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 242 0xf2 '≥' */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 243 0xf3 '≤' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ /* 244 0xf4 '⌠' */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 245 0xf5 '⌡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ /* 246 0xf6 '÷' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '≈' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 '°' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 '·' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa '•' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb '√' */ 0x0f, /* 00001111 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xec, /* 11101100 */ 0x6c, /* 01101100 */ 0x3c, /* 00111100 */ 0x1c, /* 00011100 */ /* 252 0xfc 'ⁿ' */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd '²' */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe '■' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ } }; const struct font_desc font_vga_8x8 = { .idx = VGA8x8_IDX, .name = "VGA8x8", .width = 8, .height = 8, .charcount = 256, .data = fontdata_8x8.data, .pref = 0, };
linux-master
lib/fonts/font_8x8.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/font.h> #include <linux/module.h> #define FONTDATAMAX 16384 static const struct font_data fontdata_ter16x32 = { { 0, 0, FONTDATAMAX, 0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc, 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xee, 0xee, 0xee, 0xee, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xef, 0xee, 0xe7, 0xce, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xf0, 0x1e, 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xe3, 0x8e, 0xe3, 0x8e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xe0, 0x0e, 0xf0, 0x1e, 0xf8, 0x3e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x3c, 0xfc, 0x7e, 0xfe, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7f, 0xfc, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 3 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 4 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x3b, 0xb8, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7f, 0xfc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 5 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7b, 0xbc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0, 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xf0, 0x0f, 0xf0, 0x0f, 0xf0, 0x0f, 0xf8, 0x1f, 0xfc, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 8 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x0e, 0x70, 0x0c, 0x30, 0x0c, 0x30, 0x0e, 0x70, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 9 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0x3f, 0xf8, 0x1f, 0xf1, 0x8f, 0xf3, 0xcf, 0xf3, 0xcf, 0xf1, 0x8f, 0xf8, 0x1f, 0xfc, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0x03, 0xfe, 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x76, 0x00, 0xe6, 0x01, 0xc6, 0x03, 0x86, 0x3f, 0xe0, 0x7f, 0xf0, 0xf0, 0x78, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xf0, 0x78, 0x7f, 0xf0, 0x3f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 11 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 12 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0xf8, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 13 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x3e, 0xf0, 0x3c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 14 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x73, 0x9c, 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x7c, 0x7c, 0x7c, 0x7c, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, 0x73, 0x9c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00, 0xff, 0x00, 0xff, 0xc0, 0xff, 0xf0, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf0, 0xff, 0xc0, 0xff, 0x00, 0xfc, 0x00, 0xf0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x0f, 0x00, 0x3f, 0x00, 0xff, 0x03, 0xff, 0x0f, 0xff, 0x3f, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0x0f, 0xff, 0x03, 0xff, 0x00, 0xff, 0x00, 0x3f, 0x00, 0x0f, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 19 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe, 0x79, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce, 0x79, 0xce, 0x3f, 0xce, 0x1f, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1e, 0x00, 0x0f, 0xc0, 0x0f, 0xe0, 0x1c, 0xf0, 0x1c, 0x78, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xf0, 0x03, 0xf0, 0x00, 0x78, 0x00, 0x38, 0x1c, 0x38, 0x1e, 0x78, 0x0f, 0xf0, 0x07, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 21 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 22 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c, 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 25 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 26 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x60, 0x0e, 0x70, 0x1c, 0x38, 0x38, 0x1c, 0x70, 0x0e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x70, 0x0e, 0x38, 0x1c, 0x1c, 0x38, 0x0e, 0x70, 0x06, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 29 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x80, 0x01, 0x80, 0x03, 0xc0, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0, 0x1f, 0xf8, 0x1f, 0xf8, 0x3f, 0xfc, 0x3f, 0xfc, 0x7f, 0xfe, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfe, 0x7f, 0xfe, 0x3f, 0xfc, 0x3f, 0xfc, 0x1f, 0xf8, 0x1f, 0xf8, 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x03, 0xc0, 0x01, 0x80, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 32 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 33 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc, 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc, 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 35 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x7b, 0x80, 0x3f, 0xf0, 0x1f, 0xf8, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1c, 0x3f, 0x9c, 0x3b, 0xb8, 0x3b, 0xb8, 0x3f, 0xf0, 0x1f, 0x70, 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0xf8, 0x0f, 0xfc, 0x1d, 0xdc, 0x1d, 0xdc, 0x39, 0xfc, 0x38, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc0, 0x1f, 0xe0, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x1c, 0xe0, 0x0f, 0xc0, 0x0f, 0x80, 0x1f, 0xce, 0x38, 0xee, 0x70, 0x7c, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x7c, 0x3f, 0xee, 0x1f, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 41 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x38, 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 43 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 44 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 45 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 46 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc, 0x71, 0xdc, 0x73, 0x9c, 0x77, 0x1c, 0x7e, 0x1c, 0x7c, 0x1c, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0x80, 0x0f, 0x80, 0x1f, 0x80, 0x1f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 49 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x0f, 0xf8, 0x0f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 51 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00, 0x7c, 0x00, 0xfc, 0x01, 0xdc, 0x03, 0x9c, 0x07, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 52 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 53 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xf8, 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 55 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 59 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 61 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 62 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xfc, 0x78, 0x0e, 0x70, 0x06, 0x71, 0xfe, 0x73, 0xfe, 0x77, 0x8e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x9e, 0x73, 0xfe, 0x71, 0xf6, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xfe, 0x1f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 65 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x7f, 0xf0, 0x7f, 0xf0, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 66 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xc0, 0x7f, 0xf0, 0x70, 0x78, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x78, 0x7f, 0xf0, 0x7f, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 68 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 69 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x71, 0xfc, 0x71, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 71 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x0f, 0xe0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 73 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0xfe, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x78, 0x3f, 0xf0, 0x1f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 74 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x0c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x70, 0x70, 0xe0, 0x71, 0xc0, 0x73, 0x80, 0x77, 0x00, 0x7e, 0x00, 0x7c, 0x00, 0x7c, 0x00, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 75 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 76 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e, 0x78, 0x1e, 0x7c, 0x3e, 0x7e, 0x7e, 0x7e, 0x7e, 0x77, 0xee, 0x73, 0xce, 0x73, 0xce, 0x71, 0x8e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c, 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 79 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x79, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 81 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 82 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 83 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 84 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 85 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 86 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x71, 0x8e, 0x73, 0xce, 0x73, 0xce, 0x77, 0xee, 0x7e, 0x7e, 0x7e, 0x7e, 0x7c, 0x3e, 0x78, 0x1e, 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x38, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 89 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 91 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 92 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 93 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 94 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 96 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 99 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 100 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 101 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x01, 0xfe, 0x03, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 102 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 103 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 105 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x00, 0xf8, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x38, 0x38, 0x38, 0x38, 0x3c, 0x78, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, /* 106 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x1c, 0x38, 0x38, 0x38, 0x70, 0x38, 0xe0, 0x39, 0xc0, 0x3b, 0x80, 0x3f, 0x00, 0x3f, 0x00, 0x3b, 0x80, 0x39, 0xc0, 0x38, 0xe0, 0x38, 0x70, 0x38, 0x38, 0x38, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 107 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 108 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x73, 0xbc, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 109 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 110 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 111 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 112 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, /* 113 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xfc, 0x77, 0xfc, 0x7e, 0x00, 0x7c, 0x00, 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 114 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 115 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x7f, 0xf0, 0x7f, 0xf0, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80, 0x03, 0xfc, 0x01, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 116 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 117 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 118 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 119 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 121 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 122 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0, 0x03, 0xf0, 0x07, 0x80, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x3e, 0x00, 0x3e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80, 0x03, 0xf0, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 123 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 124 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3f, 0x00, 0x07, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x01, 0xf0, 0x01, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x80, 0x3f, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 125 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x1c, 0x3f, 0x1c, 0x77, 0x9c, 0x73, 0xdc, 0x71, 0xf8, 0x70, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 126 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0, 0x1e, 0xf0, 0x3c, 0x78, 0x78, 0x3c, 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xff, 0xfe, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 127 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 129 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 130 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 131 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 132 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 133 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 134 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 135 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 137 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 138 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 139 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 140 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 141 */ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 142 */ 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 143 */ 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0, 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e, 0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe, 0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce, 0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 145 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfe, 0x7f, 0xfe, 0xf1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xff, 0xfe, 0xff, 0xfe, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xfe, 0xe1, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 146 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 147 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 148 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 149 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 150 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 151 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 152 */ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 153 */ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 154 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 155 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x7f, 0xe0, 0x7f, 0xe0, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1c, 0x1c, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 156 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 157 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x80, 0xe3, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe3, 0xc0, 0xff, 0xf0, 0xff, 0x70, 0xe0, 0x70, 0xe3, 0xfe, 0xe3, 0xfe, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x7e, 0xe0, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 158 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc, 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80, 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 159 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 161 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 162 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 163 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 164 */ 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c, 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c, 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 165 */ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf8, 0x1f, 0xf8, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf8, 0x0f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 166 */ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 167 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 169 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 170 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00, 0x7c, 0x06, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c, 0x70, 0x1c, 0xe0, 0x1d, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0xfc, 0x1d, 0xfe, 0x39, 0xce, 0x71, 0xce, 0x60, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xfe, 0x01, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 171 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x0e, 0x00, 0x0e, 0x06, 0x0e, 0x0e, 0x0e, 0x1c, 0x0e, 0x38, 0x0e, 0x70, 0x00, 0xe0, 0x01, 0xce, 0x03, 0x9e, 0x07, 0x3e, 0x0e, 0x7e, 0x1c, 0xee, 0x39, 0xce, 0x73, 0xfe, 0x63, 0xfe, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 172 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 173 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 174 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 175 */ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, /* 176 */ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, /* 177 */ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, /* 178 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 179 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 180 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 181 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 182 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 183 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 184 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70, 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 185 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 186 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x70, 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 187 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70, 0x00, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 189 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 190 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 191 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 192 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 193 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 194 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 195 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 196 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 197 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 198 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 199 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00, 0x0e, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 200 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 201 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 202 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 203 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 204 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 205 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 206 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 207 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 208 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 209 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 210 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 211 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 212 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 213 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 214 */ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 215 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 216 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 217 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 218 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 219 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 220 */ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, /* 221 */ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, /* 222 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 223 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xee, 0x3f, 0xfe, 0x78, 0x3c, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x3c, 0x3f, 0xfe, 0x1f, 0xee, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 224 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xe0, 0x7f, 0xf0, 0x70, 0x78, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x70, 0x7f, 0xf0, 0x7f, 0xf0, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 225 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 226 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 227 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 228 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe, 0x78, 0xf0, 0x70, 0x78, 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 229 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc, 0x7f, 0xdc, 0x7f, 0x9c, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 230 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xc0, 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 231 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 232 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x77, 0xdc, 0x77, 0xdc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 233 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 234 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x1f, 0xf0, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 235 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf8, 0x7f, 0xfc, 0xe7, 0xce, 0xe3, 0x8e, 0xe3, 0x8e, 0xe3, 0x8e, 0xe3, 0x8e, 0xe7, 0xce, 0x7f, 0xfc, 0x3e, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 236 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf0, 0x1f, 0xf8, 0x38, 0xfc, 0x38, 0xfc, 0x39, 0xdc, 0x39, 0xdc, 0x3b, 0x9c, 0x3b, 0x9c, 0x3f, 0x1c, 0x3f, 0x1c, 0x1f, 0xf8, 0x0f, 0xf0, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 237 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xfc, 0x1f, 0xfc, 0x3c, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x3c, 0x00, 0x1f, 0xfc, 0x07, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 238 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x1f, 0xf0, 0x3c, 0x78, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 239 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 240 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 241 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 242 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 243 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc, 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 244 */ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80, 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 245 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 246 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 247 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 248 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0, 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 249 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 250 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3e, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x38, 0x3c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xb8, 0x03, 0xf8, 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 251 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 252 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 253 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 254 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */ } }; const struct font_desc font_ter_16x32 = { .idx = TER16x32_IDX, .name = "TER16x32", .width = 16, .height = 32, .charcount = 256, .data = fontdata_ter16x32.data, #ifdef __sparc__ .pref = 5, #else .pref = -1, #endif };
linux-master
lib/fonts/font_ter16x32.c
// SPDX-License-Identifier: GPL-2.0 /**********************************************/ /* */ /* Font file generated by cpi2fnt */ /* */ /**********************************************/ #include <linux/font.h> #include <linux/module.h> #define FONTDATAMAX 4096 static const struct font_data fontdata_8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x81, /* 10000001 */ 0xa5, /* 10100101 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0x81, /* 10000001 */ 0x81, /* 10000001 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 2 0x02 '^B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xdb, /* 11011011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 3 0x03 '^C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0xe7, /* 11100111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 6 0x06 '^F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xe7, /* 11100111 */ 0xc3, /* 11000011 */ 0xc3, /* 11000011 */ 0xe7, /* 11100111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x42, /* 01000010 */ 0x42, /* 01000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xc3, /* 11000011 */ 0x99, /* 10011001 */ 0xbd, /* 10111101 */ 0xbd, /* 10111101 */ 0x99, /* 10011001 */ 0xc3, /* 11000011 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 11 0x0b '^K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x1a, /* 00011010 */ 0x32, /* 00110010 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 12 0x0c '^L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 13 0x0d '^M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x33, /* 00110011 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x70, /* 01110000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 14 0x0e '^N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x7f, /* 01111111 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x63, /* 01100011 */ 0x67, /* 01100111 */ 0xe7, /* 11100111 */ 0xe6, /* 11100110 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 15 0x0f '^O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xdb, /* 11011011 */ 0x3c, /* 00111100 */ 0xe7, /* 11100111 */ 0x3c, /* 00111100 */ 0xdb, /* 11011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 16 0x10 '^P' */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0xf0, /* 11110000 */ 0xf8, /* 11111000 */ 0xfe, /* 11111110 */ 0xf8, /* 11111000 */ 0xf0, /* 11110000 */ 0xe0, /* 11100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0e, /* 00001110 */ 0x1e, /* 00011110 */ 0x3e, /* 00111110 */ 0xfe, /* 11111110 */ 0x3e, /* 00111110 */ 0x1e, /* 00011110 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 19 0x13 '^S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7f, /* 01111111 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7b, /* 01111011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 24 0x18 '^X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xfe, /* 11111110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00101000 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x28, /* 00101000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0x7c, /* 01111100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x24, /* 00100100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x86, /* 10000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0xff, /* 11111111 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x02, /* 00000010 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x80, /* 10000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x38, /* 00111000 */ 0x78, /* 01111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x3c, /* 00111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x1c, /* 00011100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xfc, /* 11111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 60 0x3c '<' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xde, /* 11011110 */ 0xdc, /* 11011100 */ 0xc0, /* 11000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xde, /* 11011110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x66, /* 01100110 */ 0x3a, /* 00111010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe6, /* 11100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xee, /* 11101110 */ 0xfe, /* 11111110 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xde, /* 11011110 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 82 0x52 'R' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 11111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x5a, /* 01011010 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0xee, /* 11101110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x7c, /* 01111100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x7c, /* 01111100 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc2, /* 11000010 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x80, /* 10000000 */ 0xc0, /* 11000000 */ 0xe0, /* 11100000 */ 0x70, /* 01110000 */ 0x38, /* 00111000 */ 0x1c, /* 00011100 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x02, /* 00000010 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 96 0x60 '`' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x36, /* 00110110 */ 0x32, /* 00110010 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0xcc, /* 11001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 104 0x68 'h' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x6c, /* 01101100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ /* 107 0x6b 'k' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xe0, /* 11100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x78, /* 01111000 */ 0x78, /* 01111000 */ 0x6c, /* 01101100 */ 0x66, /* 01100110 */ 0xe6, /* 11100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0xfe, /* 11111110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x7c, /* 01111100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x1e, /* 00011110 */ 0x00, /* 00000000 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x66, /* 01100110 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x38, /* 00111000 */ 0x0c, /* 00001100 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0xfc, /* 11111100 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x36, /* 00110110 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xd6, /* 11010110 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 124 0x7c '|' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x70, /* 01110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x0e, /* 00001110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 126 0x7e '~' */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 128 0x80 'Ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0xc2, /* 11000010 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc2, /* 11000010 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 129 0x81 'ü' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 130 0x82 'é' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 131 0x83 'â' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 132 0x84 'ä' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 133 0x85 'à' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 134 0x86 'å' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 135 0x87 'ç' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 136 0x88 'ê' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 137 0x89 'ë' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 138 0x8a 'è' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 139 0x8b 'ï' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 140 0x8c 'î' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 141 0x8d 'ì' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 142 0x8e 'Ä' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 143 0x8f 'Å' */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 144 0x90 'É' */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x66, /* 01100110 */ 0x62, /* 01100010 */ 0x68, /* 01101000 */ 0x78, /* 01111000 */ 0x68, /* 01101000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 145 0x91 'æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xec, /* 11101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x6e, /* 01101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 146 0x92 'Æ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3e, /* 00111110 */ 0x6c, /* 01101100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xfe, /* 11111110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xce, /* 11001110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 147 0x93 'ô' */ 0x00, /* 00000000 */ 0x10, /* 00010000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 148 0x94 'ö' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 149 0x95 'ò' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 150 0x96 'û' */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 151 0x97 'ù' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 152 0x98 'ÿ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7e, /* 01111110 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x78, /* 01111000 */ 0x00, /* 00000000 */ /* 153 0x99 'Ö' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 154 0x9a 'Ü' */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 155 0x9b '¢' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 156 0x9c '£' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x64, /* 01100100 */ 0x60, /* 01100000 */ 0xf0, /* 11110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xe6, /* 11100110 */ 0xfc, /* 11111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 157 0x9d '¥' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 158 0x9e '₧' */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xf8, /* 11111000 */ 0xc4, /* 11000100 */ 0xcc, /* 11001100 */ 0xde, /* 11011110 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 159 0x9f 'ƒ' */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 160 0xa0 'á' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0x0c, /* 00001100 */ 0x7c, /* 01111100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 161 0xa1 'í' */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 162 0xa2 'ó' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 163 0xa3 'ú' */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x00, /* 00000000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 164 0xa4 'ñ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xdc, /* 11011100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 165 0xa5 'Ñ' */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0xc6, /* 11000110 */ 0xe6, /* 11100110 */ 0xf6, /* 11110110 */ 0xfe, /* 11111110 */ 0xde, /* 11011110 */ 0xce, /* 11001110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 166 0xa6 'ª' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 'º' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '¿' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x7c, /* 01111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 169 0xa9 '⌐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa '¬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '½' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xdc, /* 11011100 */ 0x86, /* 10000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x3e, /* 00111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 172 0xac '¼' */ 0x00, /* 00000000 */ 0x60, /* 01100000 */ 0xe0, /* 11100000 */ 0x62, /* 01100010 */ 0x66, /* 01100110 */ 0x6c, /* 01101100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x66, /* 01100110 */ 0xce, /* 11001110 */ 0x9a, /* 10011010 */ 0x3f, /* 00111111 */ 0x06, /* 00000110 */ 0x06, /* 00000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 173 0xad '¡' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 174 0xae '«' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '»' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xd8, /* 11011000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x6c, /* 01101100 */ 0xd8, /* 11011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '░' */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ 0x11, /* 00010001 */ 0x44, /* 01000100 */ /* 177 0xb1 '▒' */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ 0x55, /* 01010101 */ 0xaa, /* 10101010 */ /* 178 0xb2 '▓' */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ 0xdd, /* 11011101 */ 0x77, /* 01110111 */ /* 179 0xb3 '│' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 180 0xb4 '┤' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 181 0xb5 '╡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 182 0xb6 '╢' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 183 0xb7 '╖' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 184 0xb8 '╕' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 185 0xb9 '╣' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 186 0xba '║' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 187 0xbb '╗' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x06, /* 00000110 */ 0xf6, /* 11110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 188 0xbc '╝' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf6, /* 11110110 */ 0x06, /* 00000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '╜' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '╛' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '┐' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 11111000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 192 0xc0 '└' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 '┴' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 '┬' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 195 0xc3 '├' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 196 0xc4 '─' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 '┼' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 198 0xc6 '╞' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 199 0xc7 '╟' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 200 0xc8 '╚' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 '╔' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 202 0xca '╩' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb '╦' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 204 0xcc '╠' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x37, /* 00110111 */ 0x30, /* 00110000 */ 0x37, /* 00110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 205 0xcd '═' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce '╬' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xf7, /* 11110111 */ 0x00, /* 00000000 */ 0xf7, /* 11110111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 207 0xcf '╧' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 '╨' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 '╤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 210 0xd2 '╥' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 211 0xd3 '╙' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x3f, /* 00111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 '╘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 '╒' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 214 0xd6 '╓' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3f, /* 00111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 215 0xd7 '╫' */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0xff, /* 11111111 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ /* 216 0xd8 '╪' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0xff, /* 11111111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 217 0xd9 '┘' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xf8, /* 11111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda '┌' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 00011111 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 219 0xdb '█' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 220 0xdc '▄' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ /* 221 0xdd '▌' */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ 0xf0, /* 11110000 */ /* 222 0xde '▐' */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ 0x0f, /* 00001111 */ /* 223 0xdf '▀' */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0xff, /* 11111111 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 'α' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xdc, /* 11011100 */ 0x76, /* 01110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 225 0xe1 'ß' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 01111000 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xcc, /* 11001100 */ 0xd8, /* 11011000 */ 0xcc, /* 11001100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xcc, /* 11001100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 226 0xe2 'Γ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 227 0xe3 'π' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 228 0xe4 'Σ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 229 0xe5 'σ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 230 0xe6 'µ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ /* 231 0xe7 'τ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 232 0xe8 'Φ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 233 0xe9 'Θ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xfe, /* 11111110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 234 0xea 'Ω' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0xee, /* 11101110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 235 0xeb 'δ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1e, /* 00011110 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x3e, /* 00111110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x66, /* 01100110 */ 0x3c, /* 00111100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 236 0xec '∞' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed 'φ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x03, /* 00000011 */ 0x06, /* 00000110 */ 0x7e, /* 01111110 */ 0xdb, /* 11011011 */ 0xdb, /* 11011011 */ 0xf3, /* 11110011 */ 0x7e, /* 01111110 */ 0x60, /* 01100000 */ 0xc0, /* 11000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 238 0xee 'ε' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 00011100 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x7c, /* 01111100 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 239 0xef '∩' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 01111100 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0xc6, /* 11000110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 240 0xf0 '≡' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfe, /* 11111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 '±' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x7e, /* 01111110 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 242 0xf2 '≥' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x06, /* 00000110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 243 0xf3 '≤' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x30, /* 00110000 */ 0x60, /* 01100000 */ 0x30, /* 00110000 */ 0x18, /* 00011000 */ 0x0c, /* 00001100 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 244 0xf4 '⌠' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0e, /* 00001110 */ 0x1b, /* 00011011 */ 0x1b, /* 00011011 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ /* 245 0xf5 '⌡' */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0xd8, /* 11011000 */ 0x70, /* 01110000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 246 0xf6 '÷' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '≈' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x76, /* 01110110 */ 0xdc, /* 11011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 '°' */ 0x00, /* 00000000 */ 0x38, /* 00111000 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x38, /* 00111000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 '·' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa '•' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 00011000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb '√' */ 0x00, /* 00000000 */ 0x0f, /* 00001111 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0x0c, /* 00001100 */ 0xec, /* 11101100 */ 0x6c, /* 01101100 */ 0x6c, /* 01101100 */ 0x3c, /* 00111100 */ 0x1c, /* 00011100 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 252 0xfc 'ⁿ' */ 0x00, /* 00000000 */ 0x6c, /* 01101100 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x36, /* 00110110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd '²' */ 0x00, /* 00000000 */ 0x3c, /* 00111100 */ 0x66, /* 01100110 */ 0x0c, /* 00001100 */ 0x18, /* 00011000 */ 0x32, /* 00110010 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe '■' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x7e, /* 01111110 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ } }; const struct font_desc font_vga_8x16 = { .idx = VGA8x16_IDX, .name = "VGA8x16", .width = 8, .height = 16, .charcount = 256, .data = fontdata_8x16.data, .pref = 0, }; EXPORT_SYMBOL(font_vga_8x16);
linux-master
lib/fonts/font_8x16.c
/* Hand composed "Minuscule" 4x6 font, with binary data generated using * Perl stub. * * Use 'perl -x mini_4x6.c < mini_4x6.c > new_version.c' to regenerate * binary data. * * Created by Kenneth Albanowski. * No rights reserved, released to the public domain. * * Version 1.0 */ /* #!/usr/bin/perl -pn s{((0x)?[0-9a-fA-F]+)(.*\[([\*\ ]{4})\])}{ ($num,$pat,$bits) = ($1,$3,$4); $bits =~ s/([^\s0])|(.)/ defined($1) + 0 /ge; $num = ord(pack("B8", $bits)); $num |= $num >> 4; $num = sprintf("0x%.2x", $num); #print "$num,$pat,$bits\n"; $num . $pat; }ge; __END__; */ /* Note: binary data consists of one byte for each row of each character top to bottom, character 0 to character 255, six bytes per character. Each byte contains the same four character bits in both nybbles. MSBit to LSBit = left to right. */ #include <linux/font.h> #define FONTDATAMAX 1536 static const struct font_data fontdata_mini_4x6 = { { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 1: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 2: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 3: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 4: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 5: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 6: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 7: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 8: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 9: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 10: '' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 11: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 12: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 13: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 14: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 15: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 16: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 17: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 18: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 19: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 20: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 21: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 22: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 23: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 24: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 25: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 26: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 27: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 28: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 29: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 30: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 31: ' ' */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 32: ' ' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 33: '!' */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 34: '"' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 35: '#' */ 0xaa, /*= [* * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 36: '$' */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 37: '%' */ 0xaa, /*= [* * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 38: '&' */ 0x66, /*= [ ** ] */ 0x99, /*= [* *] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0xdd, /*= [** *] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 39: ''' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 40: '(' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 41: ')' */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 42: '*' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 43: '+' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 44: ',' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 45: '-' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 46: '.' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 47: '/' */ 0x00, /*= [ ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 48: '0' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 49: '1' */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 50: '2' */ 0xcc, /*= [** ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 51: '3' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 52: '4' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 53: '5' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 54: '6' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 55: '7' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 56: '8' */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 57: '9' */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 58: ':' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 59: ';' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ /*}*/ /*{*/ /* Char 60: '<' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 61: '=' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 62: '>' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 63: '?' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 64: '@' */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 65: 'A' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 66: 'B' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 67: 'C' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 68: 'D' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 69: 'E' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 70: 'F' */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 71: 'G' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 72: 'H' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 73: 'I' */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 74: 'J' */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 75: 'K' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 76: 'L' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 77: 'M' */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 78: 'N' */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 79: 'O' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 80: 'P' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 81: 'Q' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 82: 'R' */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 83: 'S' */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 84: 'T' */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 85: 'U' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 86: 'V' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 87: 'W' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 88: 'X' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 89: 'Y' */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 90: 'Z' */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 91: '[' */ 0x66, /*= [ ** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 92: '\' */ 0x00, /*= [ ] */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 93: ']' */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 94: '^' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 95: '_' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 96: '`' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 97: 'a' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 98: 'b' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 99: 'c' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 100: 'd' */ 0x22, /*= [ * ] */ 0x22, /*= [ * ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 101: 'e' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x88, /*= [* ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 102: 'f' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 103: 'g' */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 104: 'h' */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 105: 'i' */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 106: 'j' */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 107: 'k' */ 0x00, /*= [ ] */ 0x88, /*= [* ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 108: 'l' */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 109: 'm' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 110: 'n' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 111: 'o' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 112: 'p' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0xcc, /*= [** ] */ 0x88, /*= [* ] */ /*}*/ /*{*/ /* Char 113: 'q' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x22, /*= [ * ] */ /*}*/ /*{*/ /* Char 114: 'r' */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xaa, /*= [* * ] */ 0x88, /*= [* ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 115: 's' */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 116: 't' */ 0x00, /*= [ ] */ 0x44, /*= [ * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 117: 'u' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 118: 'v' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 119: 'w' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 120: 'x' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 121: 'y' */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x22, /*= [ * ] */ 0xcc, /*= [** ] */ /*}*/ /*{*/ /* Char 122: 'z' */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 123: '{' */ 0x22, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x22, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 124: '|' */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 125: '}' */ 0x88, /*= [* ] */ 0x44, /*= [ * ] */ 0x66, /*= [ ** ] */ 0x44, /*= [ * ] */ 0x88, /*= [* ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 126: '~' */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 127: '' */ 0x44, /*= [ * ] */ 0xaa, /*= [* * ] */ 0xaa, /*= [* * ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 128: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 129: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 130: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 131: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 132: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 133: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 134: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 135: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 136: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 137: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 138: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 139: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 140: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 141: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 142: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 143: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 144: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 145: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 146: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 147: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 148: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 149: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 150: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 151: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 152: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 153: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 154: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 155: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 156: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 157: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 158: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 159: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 160: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 161: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 162: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 163: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 164: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 165: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 166: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 167: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 168: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 169: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 170: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 171: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 172: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 173: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 174: */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 175: */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x66, /*= [ ** ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 176: */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ 0x88, /*= [* ] */ 0x22, /*= [ * ] */ /*}*/ /*{*/ /* Char 177: */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ 0xaa, /*= [* * ] */ 0x55, /*= [ * *] */ /*}*/ /*{*/ /* Char 178: */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ 0xdd, /*= [** *] */ 0xbb, /*= [* **] */ /*}*/ /*{*/ /* Char 179: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 180: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 181: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 182: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 183: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 184: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 185: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 186: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 187: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 188: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 189: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 190: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 191: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xcc, /*= [** ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 192: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 193: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 194: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 195: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 196: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 197: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 198: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 199: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 200: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 201: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 202: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 203: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 204: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 205: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 206: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 207: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 208: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 209: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 210: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 211: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 212: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 213: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 214: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 215: */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0xff, /*= [****] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ /*}*/ /*{*/ /* Char 216: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 217: */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0xcc, /*= [** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 218: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x77, /*= [ ***] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ 0x44, /*= [ * ] */ /*}*/ /*{*/ /* Char 219: */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 220: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ /*}*/ /*{*/ /* Char 221: */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ 0xcc, /*= [** ] */ /*}*/ /*{*/ /* Char 222: */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ 0x33, /*= [ **] */ /*}*/ /*{*/ /* Char 223: */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0xff, /*= [****] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 224: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 225: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 226: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 227: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 228: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 229: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 230: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 231: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 232: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 233: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 234: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 235: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 236: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 237: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 238: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 239: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 240: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 241: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 242: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 243: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 244: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 245: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 246: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 247: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 248: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 249: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 250: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 251: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 252: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 253: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 254: */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ 0x66, /*= [ ** ] */ 0x66, /*= [ ** ] */ 0x00, /*= [ ] */ 0x00, /*= [ ] */ /*}*/ /*{*/ /* Char 255: */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ } }; const struct font_desc font_mini_4x6 = { .idx = MINI4x6_IDX, .name = "MINI4x6", .width = 4, .height = 6, .charcount = 256, .data = fontdata_mini_4x6.data, .pref = 3, };
linux-master
lib/fonts/font_mini_4x6.c
// SPDX-License-Identifier: GPL-2.0 /******************************** * adapted from font_sun12x22.c * * by Jurriaan Kalkman 06-2005 * ********************************/ #include <linux/font.h> #define FONTDATAMAX 9216 static const struct font_data fontdata_10x18 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 1 0x01 '^A' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x40, 0x40, /* 0100000001 */ 0x5b, 0x40, /* 0101101101 */ 0x40, 0x40, /* 0100000001 */ 0x44, 0x40, /* 0100010001 */ 0x44, 0x40, /* 0100010001 */ 0x51, 0x40, /* 0101000101 */ 0x4e, 0x40, /* 0100111001 */ 0x40, 0x40, /* 0100000001 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 2 0x02 '^B' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x64, 0xc0, /* 0110010011 */ 0x7f, 0xc0, /* 0111111111 */ 0x7b, 0xc0, /* 0111101111 */ 0x7b, 0xc0, /* 0111101111 */ 0x6e, 0xc0, /* 0110111011 */ 0x71, 0xc0, /* 0111000111 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 3 0x03 '^C' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x11, 0x00, /* 0001000100 */ 0x3b, 0x80, /* 0011101110 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 4 0x04 '^D' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 5 0x05 '^E' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x35, 0x80, /* 0011010110 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 6 0x06 '^F' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x35, 0x80, /* 0011010110 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 7 0x07 '^G' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 8 0x08 '^H' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xf3, 0xc0, /* 1111001111 */ 0xe1, 0xc0, /* 1110000111 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xe1, 0xc0, /* 1110000111 */ 0xe1, 0xc0, /* 1110000111 */ 0xf3, 0xc0, /* 1111001111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 9 0x09 '^I' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x12, 0x00, /* 0001001000 */ 0x12, 0x00, /* 0001001000 */ 0x21, 0x00, /* 0010000100 */ 0x21, 0x00, /* 0010000100 */ 0x12, 0x00, /* 0001001000 */ 0x12, 0x00, /* 0001001000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 10 0x0a '^J' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xf3, 0xc0, /* 1111001111 */ 0xed, 0xc0, /* 1110110111 */ 0xed, 0xc0, /* 1110110111 */ 0xde, 0xc0, /* 1101111011 */ 0xde, 0xc0, /* 1101111011 */ 0xed, 0xc0, /* 1110110111 */ 0xed, 0xc0, /* 1110110111 */ 0xf3, 0xc0, /* 1111001111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 11 0x0b '^K' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x03, 0xc0, /* 0000001111 */ 0x06, 0xc0, /* 0000011011 */ 0x0c, 0xc0, /* 0000110011 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0x66, 0x00, /* 0110011000 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 12 0x0c '^L' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 13 0x0d '^M' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x80, /* 0000111110 */ 0x08, 0x80, /* 0000100010 */ 0x0f, 0x80, /* 0000111110 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x38, 0x00, /* 0011100000 */ 0x78, 0x00, /* 0111100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 14 0x0e '^N' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x80, /* 0001111110 */ 0x10, 0x80, /* 0001000010 */ 0x1f, 0x80, /* 0001111110 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x10, 0x80, /* 0001000010 */ 0x13, 0x80, /* 0001001110 */ 0x17, 0x80, /* 0001011110 */ 0x73, 0x00, /* 0111001100 */ 0xf0, 0x00, /* 1111000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 15 0x0f '^O' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x24, 0x80, /* 0010010010 */ 0x15, 0x00, /* 0001010100 */ 0x55, 0x40, /* 0101010101 */ 0x3f, 0x80, /* 0011111110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x3f, 0x80, /* 0011111110 */ 0x55, 0x40, /* 0101010101 */ 0x15, 0x00, /* 0001010100 */ 0x24, 0x80, /* 0010010010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 16 0x10 '^P' */ 0x00, 0x80, /* 0000000010 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x80, /* 0000001110 */ 0x07, 0x80, /* 0000011110 */ 0x0f, 0x80, /* 0000111110 */ 0x1f, 0x80, /* 0001111110 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0x80, /* 0111111110 */ 0xff, 0x80, /* 1111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x80, /* 0001111110 */ 0x0f, 0x80, /* 0000111110 */ 0x07, 0x80, /* 0000011110 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x80, /* 0000000010 */ 0x00, 0x00, /* 0000000000 */ /* 17 0x11 '^Q' */ 0x40, 0x00, /* 0100000000 */ 0x60, 0x00, /* 0110000000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x7c, 0x00, /* 0111110000 */ 0x7e, 0x00, /* 0111111000 */ 0x7f, 0x00, /* 0111111100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x00, /* 0111111100 */ 0x7e, 0x00, /* 0111111000 */ 0x7c, 0x00, /* 0111110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x60, 0x00, /* 0110000000 */ 0x40, 0x00, /* 0100000000 */ 0x00, 0x00, /* 0000000000 */ /* 18 0x12 '^R' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 19 0x13 '^S' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 20 0x14 '^T' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x39, 0x80, /* 0011100110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x39, 0x80, /* 0011100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x39, 0xc0, /* 0011100111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 21 0x15 '^U' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 22 0x16 '^V' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 23 0x17 '^W' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 24 0x18 '^X' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 25 0x19 '^Y' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 26 0x1a '^Z' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x06, 0x00, /* 0000011000 */ 0x07, 0x00, /* 0000011100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x07, 0x00, /* 0000011100 */ 0x06, 0x00, /* 0000011000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 27 0x1b '^[' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x18, 0x00, /* 0001100000 */ 0x38, 0x00, /* 0011100000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x38, 0x00, /* 0011100000 */ 0x18, 0x00, /* 0001100000 */ 0x08, 0x00, /* 0000100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 28 0x1c '^\' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 29 0x1d '^]' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x12, 0x00, /* 0001001000 */ 0x33, 0x00, /* 0011001100 */ 0x73, 0x80, /* 0111001110 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x73, 0x80, /* 0111001110 */ 0x33, 0x00, /* 0011001100 */ 0x12, 0x00, /* 0001001000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 30 0x1e '^^' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 31 0x1f '^_' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 32 0x20 ' ' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 33 0x21 '!' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 34 0x22 '"' */ 0x00, 0x00, /* 0000000000 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0xf7, 0x80, /* 1111011110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x63, 0x00, /* 0110001100 */ 0x42, 0x00, /* 0100001000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 35 0x23 '#' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 36 0x24 '$' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x3e, 0x00, /* 0011111000 */ 0x3f, 0x00, /* 0011111100 */ 0x6f, 0x80, /* 0110111110 */ 0x6d, 0x80, /* 0110110110 */ 0x6c, 0x80, /* 0110110010 */ 0x3c, 0x00, /* 0011110000 */ 0x0f, 0x00, /* 0000111100 */ 0x0d, 0x80, /* 0000110110 */ 0x4d, 0x80, /* 0100110110 */ 0x6d, 0x80, /* 0110110110 */ 0x7f, 0x00, /* 0111111100 */ 0x3e, 0x00, /* 0011111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 37 0x25 '%' */ 0x00, 0x00, /* 0000000000 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0x00, /* 0111101100 */ 0x7b, 0x00, /* 0111101100 */ 0x36, 0x00, /* 0011011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0x00, /* 0001101100 */ 0x37, 0x80, /* 0011011110 */ 0x37, 0x80, /* 0011011110 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 38 0x26 '&' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x00, /* 0000011100 */ 0x0f, 0x80, /* 0000111110 */ 0x19, 0x80, /* 0001100110 */ 0x19, 0x80, /* 0001100110 */ 0x0f, 0x80, /* 0000111110 */ 0x1e, 0x00, /* 0001111000 */ 0x3e, 0x00, /* 0011111000 */ 0x76, 0x00, /* 0111011000 */ 0x66, 0x40, /* 0110011001 */ 0x63, 0xc0, /* 0110001111 */ 0x63, 0x80, /* 0110001110 */ 0x63, 0x00, /* 0110001100 */ 0x3f, 0x80, /* 0011111110 */ 0x1c, 0xc0, /* 0001110011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 39 0x27 ''' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x20, 0x00, /* 0010000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 40 0x28 '(' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 41 0x29 ')' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 42 0x2a '*' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x4c, 0x80, /* 0100110010 */ 0x6d, 0x80, /* 0110110110 */ 0x3f, 0x00, /* 0011111100 */ 0x7f, 0x80, /* 0111111110 */ 0x3f, 0x00, /* 0011111100 */ 0x6d, 0x80, /* 0110110110 */ 0x4c, 0x80, /* 0100110010 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 43 0x2b '+' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 44 0x2c ',' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x40, 0x00, /* 0100000000 */ /* 45 0x2d '-' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 46 0x2e '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 47 0x2f '/' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 48 0x30 '0' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x80, /* 0110001110 */ 0x65, 0x80, /* 0110010110 */ 0x65, 0x80, /* 0110010110 */ 0x69, 0x80, /* 0110100110 */ 0x69, 0x80, /* 0110100110 */ 0x71, 0x80, /* 0111000110 */ 0x61, 0x00, /* 0110000100 */ 0x31, 0x00, /* 0011000100 */ 0x3e, 0x00, /* 0011111000 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 49 0x31 '1' */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x0c, 0x00, /* 0000110000 */ 0x1c, 0x00, /* 0001110000 */ 0x3c, 0x00, /* 0011110000 */ 0x6c, 0x00, /* 0110110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 50 0x32 '2' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x80, /* 0110001110 */ 0x41, 0x80, /* 0100000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 51 0x33 '3' */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x00, /* 0001110000 */ 0x3e, 0x00, /* 0011111000 */ 0x47, 0x00, /* 0100011100 */ 0x03, 0x00, /* 0000001100 */ 0x07, 0x00, /* 0000011100 */ 0x06, 0x00, /* 0000011000 */ 0x0e, 0x00, /* 0000111000 */ 0x07, 0x00, /* 0000011100 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x61, 0x80, /* 0110000110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 52 0x34 '4' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0e, 0x00, /* 0000111000 */ 0x1e, 0x00, /* 0001111000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0xc6, 0x00, /* 1100011000 */ 0xc6, 0x00, /* 1100011000 */ 0xff, 0x80, /* 1111111110 */ 0xff, 0x80, /* 1111111110 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 53 0x35 '5' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x7e, 0x00, /* 0111111000 */ 0x67, 0x00, /* 0110011100 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 54 0x36 '6' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x6e, 0x00, /* 0110111000 */ 0x7f, 0x00, /* 0111111100 */ 0x73, 0x80, /* 0111001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x00, /* 0111000100 */ 0x3e, 0x00, /* 0011111000 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 55 0x37 '7' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x80, /* 0001111110 */ 0x3f, 0x80, /* 0011111110 */ 0x61, 0x80, /* 0110000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 56 0x38 '8' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x16, 0x00, /* 0001011000 */ 0x23, 0x00, /* 0010001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x00, /* 0011000100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 57 0x39 '9' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x80, /* 0010001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0x80, /* 0011110110 */ 0x19, 0x80, /* 0001100110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 58 0x3a ':' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 59 0x3b ';' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x20, 0x00, /* 0010000000 */ /* 60 0x3c '<' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 61 0x3d '=' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 62 0x3e '>' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 63 0x3f '?' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x3b, 0x80, /* 0011101110 */ 0x21, 0x80, /* 0010000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 64 0x40 '@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x80, /* 0011000110 */ 0x65, 0x80, /* 0110010110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6f, 0x80, /* 0110111110 */ 0x60, 0x00, /* 0110000000 */ 0x31, 0x80, /* 0011000110 */ 0x3f, 0x80, /* 0011111110 */ 0x0f, 0x00, /* 0000111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 65 0x41 'A' */ 0x00, 0x00, /* 0000000000 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x19, 0x80, /* 0001100110 */ 0x31, 0x80, /* 0011000110 */ 0x3f, 0x80, /* 0011111110 */ 0x31, 0x80, /* 0011000110 */ 0x61, 0x80, /* 0110000110 */ 0x60, 0xc0, /* 0110000011 */ 0x60, 0xc0, /* 0110000011 */ 0xf1, 0xc0, /* 1111000111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 66 0x42 'B' */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0x62, 0x00, /* 0110001000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x66, 0x00, /* 0110011000 */ 0x7e, 0x00, /* 0111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0xfe, 0x00, /* 1111111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 67 0x43 'C' */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x00, /* 0000111100 */ 0x11, 0x80, /* 0001000110 */ 0x20, 0x80, /* 0010000010 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x30, 0x80, /* 0011000010 */ 0x19, 0x00, /* 0001100100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 68 0x44 'D' */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0x67, 0x00, /* 0110011100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x66, 0x00, /* 0110011000 */ 0xf8, 0x00, /* 1111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 69 0x45 'E' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 70 0x46 'F' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 71 0x47 'G' */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0x00, /* 0000111100 */ 0x11, 0x80, /* 0001000110 */ 0x20, 0x80, /* 0010000010 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x67, 0xc0, /* 0110011111 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x80, /* 0010000110 */ 0x31, 0x80, /* 0011000110 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 72 0x48 'H' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 73 0x49 'I' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 74 0x4a 'J' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x08, 0x00, /* 0000100000 */ 0x70, 0x00, /* 0111000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 75 0x4b 'K' */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0x80, /* 1111000110 */ 0x63, 0x00, /* 0110001100 */ 0x66, 0x00, /* 0110011000 */ 0x6c, 0x00, /* 0110110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x66, 0x00, /* 0110011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 76 0x4c 'L' */ 0x00, 0x00, /* 0000000000 */ 0x78, 0x00, /* 0111100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 77 0x4d 'M' */ 0x00, 0x00, /* 0000000000 */ 0xe0, 0xc0, /* 1110000011 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x73, 0x80, /* 0111001110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 78 0x4e 'N' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x67, 0x80, /* 0110011110 */ 0x67, 0x80, /* 0110011110 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 79 0x4f 'O' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x00, /* 0010000100 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 80 0x50 'P' */ 0x00, 0x00, /* 0000000000 */ 0xfe, 0x00, /* 1111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x7e, 0x00, /* 0111111000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xf0, 0x00, /* 1111000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 81 0x51 'Q' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x13, 0x00, /* 0001001100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x31, 0x80, /* 0011000110 */ 0x3b, 0x00, /* 0011101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x26, 0x00, /* 0010011000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ /* 82 0x52 'R' */ 0x00, 0x00, /* 0000000000 */ 0xfe, 0x00, /* 1111111000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x7e, 0x00, /* 0111111000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x6e, 0x00, /* 0110111000 */ 0x67, 0x00, /* 0110011100 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0xc0, /* 0110000111 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 83 0x53 'S' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x63, 0x00, /* 0110001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 84 0x54 'T' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x4c, 0x80, /* 0100110010 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 85 0x55 'U' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 86 0x56 'V' */ 0x00, 0x00, /* 0000000000 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x12, 0x00, /* 0001001000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 87 0x57 'W' */ 0x00, 0x00, /* 0000000000 */ 0xe1, 0xc0, /* 1110000111 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xe0, 0xc0, /* 1110000011 */ 0x61, 0x80, /* 0110000110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x7f, 0x80, /* 0111111110 */ 0x77, 0x00, /* 0111011100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 88 0x58 'X' */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x1c, 0x00, /* 0001110000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 89 0x59 'Y' */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 90 0x5a 'Z' */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x80, /* 0011111110 */ 0x21, 0x80, /* 0010000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x3f, 0x80, /* 0011111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 91 0x5b '[' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1f, 0x00, /* 0001111100 */ 0x1f, 0x00, /* 0001111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 92 0x5c '\' */ 0x00, 0x00, /* 0000000000 */ 0xc0, 0x00, /* 1100000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0xc0, /* 0000000011 */ 0x00, 0x00, /* 0000000000 */ /* 93 0x5d ']' */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x3e, 0x00, /* 0011111000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x3e, 0x00, /* 0011111000 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 94 0x5e '^' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 95 0x5f '_' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ /* 96 0x60 '`' */ 0x04, 0x00, /* 0000010000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 97 0x61 'a' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 98 0x62 'b' */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0xe0, 0x00, /* 1110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x66, 0x00, /* 0110011000 */ 0x6f, 0x00, /* 0110111100 */ 0x73, 0x80, /* 0111001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x7b, 0x00, /* 0111101100 */ 0x4e, 0x00, /* 0100111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 99 0x63 'c' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x37, 0x00, /* 0011011100 */ 0x23, 0x00, /* 0010001100 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x71, 0x00, /* 0111000100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 100 0x64 'd' */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x80, /* 0000001110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x0d, 0x80, /* 0000110110 */ 0x37, 0x80, /* 0011011110 */ 0x23, 0x80, /* 0010001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x35, 0x80, /* 0011010110 */ 0x19, 0xc0, /* 0001100111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 101 0x65 'e' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 102 0x66 'f' */ 0x07, 0x00, /* 0000011100 */ 0x09, 0x80, /* 0000100110 */ 0x09, 0x80, /* 0000100110 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x7f, 0x00, /* 0111111100 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 103 0x67 'g' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x80, /* 0001110010 */ 0x37, 0x80, /* 0011011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x3c, 0x00, /* 0011110000 */ 0x60, 0x00, /* 0110000000 */ 0x7f, 0x00, /* 0111111100 */ 0x3f, 0x80, /* 0011111110 */ 0x21, 0x80, /* 0010000110 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x00, /* 0111111100 */ 0x3e, 0x00, /* 0011111000 */ /* 104 0x68 'h' */ 0x10, 0x00, /* 0001000000 */ 0x30, 0x00, /* 0011000000 */ 0x70, 0x00, /* 0111000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x37, 0x00, /* 0011011100 */ 0x3b, 0x80, /* 0011101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 105 0x69 'i' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 106 0x6a 'j' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x80, /* 0000011110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x41, 0x80, /* 0100000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x3f, 0x00, /* 0011111100 */ 0x1c, 0x00, /* 0001110000 */ /* 107 0x6b 'k' */ 0x60, 0x00, /* 0110000000 */ 0xe0, 0x00, /* 1110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x63, 0x80, /* 0110001110 */ 0x66, 0x00, /* 0110011000 */ 0x6c, 0x00, /* 0110110000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ 0x78, 0x00, /* 0111100000 */ 0x6c, 0x00, /* 0110110000 */ 0x6e, 0x00, /* 0110111000 */ 0x67, 0x00, /* 0110011100 */ 0xf3, 0x80, /* 1111001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 108 0x6c 'l' */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 109 0x6d 'm' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xdb, 0x80, /* 1101101110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0xed, 0xc0, /* 1110110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 110 0x6e 'n' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x6f, 0x00, /* 0110111100 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 111 0x6f 'o' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 112 0x70 'p' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xde, 0x00, /* 1101111000 */ 0x76, 0x00, /* 0111011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x7b, 0x00, /* 0111101100 */ 0x7e, 0x00, /* 0111111000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xf0, 0x00, /* 1111000000 */ /* 113 0x71 'q' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0xc0, /* 0000111011 */ 0x1b, 0x80, /* 0001101110 */ 0x33, 0x80, /* 0011001110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x71, 0x80, /* 0111000110 */ 0x3b, 0x80, /* 0011101110 */ 0x1f, 0x80, /* 0001111110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0xc0, /* 0000001111 */ /* 114 0x72 'r' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x73, 0x00, /* 0111001100 */ 0x35, 0x80, /* 0011010110 */ 0x39, 0x80, /* 0011100110 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x78, 0x00, /* 0111100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 115 0x73 's' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x00, /* 0110000100 */ 0x70, 0x00, /* 0111000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x07, 0x00, /* 0000011100 */ 0x43, 0x00, /* 0100001100 */ 0x63, 0x00, /* 0110001100 */ 0x7e, 0x00, /* 0111111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 116 0x74 't' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x18, 0x00, /* 0001100000 */ 0x7f, 0x80, /* 0111111110 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x1c, 0x80, /* 0001110010 */ 0x0f, 0x00, /* 0000111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 117 0x75 'u' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 118 0x76 'v' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0xc0, /* 1111000111 */ 0x60, 0xc0, /* 0110000011 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x19, 0x80, /* 0001100110 */ 0x1b, 0x00, /* 0001101100 */ 0x0f, 0x00, /* 0000111100 */ 0x0f, 0x00, /* 0000111100 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 119 0x77 'w' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xe3, 0xc0, /* 1110001111 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0x6b, 0x00, /* 0110101100 */ 0x6b, 0x00, /* 0110101100 */ 0x7e, 0x00, /* 0111111000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 120 0x78 'x' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x1c, 0x00, /* 0001110000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x66, 0x00, /* 0110011000 */ 0x63, 0x00, /* 0110001100 */ 0xf7, 0x80, /* 1111011110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 121 0x79 'y' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1b, 0x00, /* 0001101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ /* 122 0x7a 'z' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x43, 0x00, /* 0100001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x80, /* 0110000010 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 123 0x7b '{' */ 0x07, 0x00, /* 0000011100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x70, 0x00, /* 0111000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x07, 0x00, /* 0000011100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 124 0x7c '|' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ /* 125 0x7d '}' */ 0x38, 0x00, /* 0011100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x03, 0x80, /* 0000001110 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x38, 0x00, /* 0011100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 126 0x7e '~' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x80, /* 0001100010 */ 0x3d, 0x80, /* 0011110110 */ 0x6f, 0x00, /* 0110111100 */ 0x46, 0x00, /* 0100011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 127 0x7f '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x12, 0x00, /* 0001001000 */ 0x21, 0x00, /* 0010000100 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 128 0x80 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x21, 0x80, /* 0010000110 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x40, 0x00, /* 0100000000 */ 0x60, 0x80, /* 0110000010 */ 0x31, 0x00, /* 0011000100 */ 0x1e, 0x00, /* 0001111000 */ 0x08, 0x00, /* 0000100000 */ 0x04, 0x00, /* 0000010000 */ 0x02, 0x00, /* 0000001000 */ 0x02, 0x00, /* 0000001000 */ 0x1c, 0x00, /* 0001110000 */ /* 129 0x81 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x3b, 0x00, /* 0011101100 */ 0x1c, 0x80, /* 0001110010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 130 0x82 '.' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x00, /* 0000000100 */ 0x02, 0x00, /* 0000001000 */ 0x04, 0x00, /* 0000010000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 131 0x83 '.' */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x31, 0x80, /* 0011000110 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 132 0x84 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 133 0x85 '.' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 134 0x86 '.' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 135 0x87 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x20, 0x80, /* 0010000010 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x70, 0x80, /* 0111000010 */ 0x30, 0x80, /* 0011000010 */ 0x1f, 0x00, /* 0001111100 */ 0x04, 0x00, /* 0000010000 */ 0x02, 0x00, /* 0000001000 */ 0x01, 0x00, /* 0000000100 */ 0x0e, 0x00, /* 0000111000 */ /* 136 0x88 '.' */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x1b, 0x00, /* 0001101100 */ 0x31, 0x80, /* 0011000110 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 137 0x89 '.' */ 0x00, 0x00, /* 0000000000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 138 0x8a '.' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x19, 0x80, /* 0001100110 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 139 0x8b '.' */ 0x00, 0x00, /* 0000000000 */ 0x36, 0x00, /* 0011011000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 140 0x8c '.' */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 141 0x8d '.' */ 0x00, 0x00, /* 0000000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 142 0x8e '.' */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x04, 0x00, /* 0000010000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x19, 0x00, /* 0001100100 */ 0x19, 0x00, /* 0001100100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 143 0x8f '.' */ 0x04, 0x00, /* 0000010000 */ 0x0a, 0x00, /* 0000101000 */ 0x0a, 0x00, /* 0000101000 */ 0x04, 0x00, /* 0000010000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0e, 0x00, /* 0000111000 */ 0x19, 0x00, /* 0001100100 */ 0x19, 0x00, /* 0001100100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 144 0x90 '.' */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x00, /* 0011000000 */ 0x31, 0x00, /* 0011000100 */ 0x3f, 0x00, /* 0011111100 */ 0x31, 0x00, /* 0011000100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 145 0x91 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3b, 0x80, /* 0011101110 */ 0x6c, 0xc0, /* 0110110011 */ 0x4c, 0xc0, /* 0100110011 */ 0x0c, 0xc0, /* 0000110011 */ 0x3f, 0xc0, /* 0011111111 */ 0x6c, 0x00, /* 0110110000 */ 0xcc, 0x00, /* 1100110000 */ 0xcc, 0x00, /* 1100110000 */ 0xee, 0xc0, /* 1110111011 */ 0x7b, 0x80, /* 0111101110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 146 0x92 '.' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0xc0, /* 0000011111 */ 0x0e, 0x40, /* 0000111001 */ 0x0e, 0x40, /* 0000111001 */ 0x0e, 0x00, /* 0000111000 */ 0x16, 0x00, /* 0001011000 */ 0x16, 0x80, /* 0001011010 */ 0x17, 0x80, /* 0001011110 */ 0x16, 0x80, /* 0001011010 */ 0x3e, 0x00, /* 0011111000 */ 0x26, 0x00, /* 0010011000 */ 0x26, 0x00, /* 0010011000 */ 0x46, 0x40, /* 0100011001 */ 0x46, 0x40, /* 0100011001 */ 0xef, 0xc0, /* 1110111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 147 0x93 '.' */ 0x00, 0x00, /* 0000000000 */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 148 0x94 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 149 0x95 '.' */ 0x00, 0x00, /* 0000000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 150 0x96 '.' */ 0x08, 0x00, /* 0000100000 */ 0x1c, 0x00, /* 0001110000 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 151 0x97 '.' */ 0x00, 0x00, /* 0000000000 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 152 0x98 '.' */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xf3, 0xc0, /* 1111001111 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1b, 0x00, /* 0001101100 */ 0x1e, 0x00, /* 0001111000 */ 0x0e, 0x00, /* 0000111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x78, 0x00, /* 0111100000 */ 0x70, 0x00, /* 0111000000 */ /* 153 0x99 '.' */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x0c, 0x00, /* 0000110000 */ 0x17, 0x00, /* 0001011100 */ 0x23, 0x00, /* 0010001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x21, 0x00, /* 0010000100 */ 0x31, 0x00, /* 0011000100 */ 0x1a, 0x00, /* 0001101000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 154 0x9a '.' */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0xf1, 0xc0, /* 1111000111 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x60, 0x80, /* 0110000010 */ 0x71, 0x00, /* 0111000100 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 155 0x9b '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x1f, 0x80, /* 0001111110 */ 0x36, 0x80, /* 0011011010 */ 0x26, 0x00, /* 0010011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x66, 0x00, /* 0110011000 */ 0x76, 0x00, /* 0111011000 */ 0x36, 0x80, /* 0011011010 */ 0x1f, 0x80, /* 0001111110 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ /* 156 0x9c '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3b, 0x00, /* 0011101100 */ 0x33, 0x00, /* 0011001100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x7e, 0x00, /* 0111111000 */ 0x7e, 0x00, /* 0111111000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x7c, 0x80, /* 0111110010 */ 0x7f, 0x80, /* 0111111110 */ 0x43, 0x00, /* 0100001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 157 0x9d '.' */ 0x00, 0x00, /* 0000000000 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x21, 0x00, /* 0010000100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 158 0x9e '.' */ 0x00, 0x00, /* 0000000000 */ 0xbf, 0x00, /* 1011111100 */ 0x40, 0x80, /* 0100000010 */ 0x40, 0x80, /* 0100000010 */ 0x7f, 0x00, /* 0111111100 */ 0x40, 0x00, /* 0100000000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x5e, 0x00, /* 0101111000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x00, /* 0100100000 */ 0x48, 0x80, /* 0100100010 */ 0x47, 0x00, /* 0100011100 */ 0xe0, 0x00, /* 1110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 159 0x9f '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x04, 0x80, /* 0000010010 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x09, 0x00, /* 0000100100 */ 0x3e, 0x00, /* 0011111000 */ 0x48, 0x00, /* 0100100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x08, 0x00, /* 0000100000 */ 0x90, 0x00, /* 1001000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 160 0xa0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x21, 0x80, /* 0010000110 */ 0x07, 0x80, /* 0000011110 */ 0x39, 0x80, /* 0011100110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x3d, 0xc0, /* 0011110111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 161 0xa1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x00, /* 0000001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 162 0xa2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x3c, 0x00, /* 0011110000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xc1, 0x80, /* 1100000110 */ 0xe1, 0x80, /* 1110000110 */ 0x73, 0x00, /* 0111001100 */ 0x3c, 0x00, /* 0011110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 163 0xa3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0xf7, 0x80, /* 1111011110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x77, 0x00, /* 0111011100 */ 0x3d, 0x80, /* 0011110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 164 0xa4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x80, /* 0011100010 */ 0x7f, 0x80, /* 0111111110 */ 0x47, 0x00, /* 0100011100 */ 0x00, 0x00, /* 0000000000 */ 0x6f, 0x00, /* 0110111100 */ 0x7b, 0x80, /* 0111101110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x7b, 0xc0, /* 0111101111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 165 0xa5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x80, /* 0011100010 */ 0x7f, 0x80, /* 0111111110 */ 0x47, 0x00, /* 0100011100 */ 0x00, 0x00, /* 0000000000 */ 0xe3, 0xc0, /* 1110001111 */ 0x71, 0x80, /* 0111000110 */ 0x79, 0x80, /* 0111100110 */ 0x79, 0x80, /* 0111100110 */ 0x6d, 0x80, /* 0110110110 */ 0x6d, 0x80, /* 0110110110 */ 0x67, 0x80, /* 0110011110 */ 0x63, 0x80, /* 0110001110 */ 0x61, 0x80, /* 0110000110 */ 0xf0, 0xc0, /* 1111000011 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 166 0xa6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x3e, 0x00, /* 0011111000 */ 0x63, 0x00, /* 0110001100 */ 0x03, 0x00, /* 0000001100 */ 0x0f, 0x00, /* 0000111100 */ 0x33, 0x00, /* 0011001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x67, 0x00, /* 0110011100 */ 0x3b, 0x80, /* 0011101110 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 167 0xa7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x33, 0x00, /* 0011001100 */ 0x21, 0x80, /* 0010000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x00, /* 0110000100 */ 0x33, 0x00, /* 0011001100 */ 0x1c, 0x00, /* 0001110000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 168 0xa8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x00, 0x00, /* 0000000000 */ 0x06, 0x00, /* 0000011000 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x80, /* 0110000010 */ 0x73, 0x80, /* 0111001110 */ 0x3f, 0x00, /* 0011111100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 169 0xa9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 170 0xaa '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 171 0xab '.' */ 0x00, 0x00, /* 0000000000 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x20, 0x80, /* 0010000010 */ 0x21, 0x00, /* 0010000100 */ 0x22, 0x00, /* 0010001000 */ 0x74, 0x00, /* 0111010000 */ 0x08, 0x00, /* 0000100000 */ 0x17, 0x00, /* 0001011100 */ 0x28, 0x80, /* 0010100010 */ 0x43, 0x00, /* 0100001100 */ 0x04, 0x00, /* 0000010000 */ 0x08, 0x00, /* 0000100000 */ 0x0f, 0x80, /* 0000111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 172 0xac '.' */ 0x00, 0x00, /* 0000000000 */ 0x20, 0x00, /* 0010000000 */ 0x60, 0x00, /* 0110000000 */ 0x20, 0x00, /* 0010000000 */ 0x20, 0x80, /* 0010000010 */ 0x21, 0x00, /* 0010000100 */ 0x22, 0x00, /* 0010001000 */ 0x74, 0x00, /* 0111010000 */ 0x09, 0x00, /* 0000100100 */ 0x13, 0x00, /* 0001001100 */ 0x25, 0x00, /* 0010010100 */ 0x49, 0x00, /* 0100100100 */ 0x1f, 0x80, /* 0001111110 */ 0x01, 0x00, /* 0000000100 */ 0x01, 0x00, /* 0000000100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 173 0xad '.' */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 174 0xae '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0d, 0x80, /* 0000110110 */ 0x1b, 0x00, /* 0001101100 */ 0x36, 0x00, /* 0011011000 */ 0x6c, 0x00, /* 0110110000 */ 0xd8, 0x00, /* 1101100000 */ 0x6c, 0x00, /* 0110110000 */ 0x36, 0x00, /* 0011011000 */ 0x1b, 0x00, /* 0001101100 */ 0x0d, 0x80, /* 0000110110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 175 0xaf '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x6c, 0x00, /* 0110110000 */ 0x36, 0x00, /* 0011011000 */ 0x1b, 0x00, /* 0001101100 */ 0x0d, 0x80, /* 0000110110 */ 0x06, 0xc0, /* 0000011011 */ 0x0d, 0x80, /* 0000110110 */ 0x1b, 0x00, /* 0001101100 */ 0x36, 0x00, /* 0011011000 */ 0x6c, 0x00, /* 0110110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 176 0xb0 '.' */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ 0x18, 0x40, /* 0001100001 */ 0x10, 0x40, /* 0001000001 */ 0xc3, 0x00, /* 1100001100 */ 0x41, 0x00, /* 0100000100 */ /* 177 0xb1 '.' */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ 0x11, 0x00, /* 0001000100 */ 0xbb, 0x80, /* 1011101110 */ 0x11, 0x00, /* 0001000100 */ 0x44, 0x40, /* 0100010001 */ 0xee, 0xc0, /* 1110111011 */ 0x44, 0x40, /* 0100010001 */ /* 178 0xb2 '.' */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ 0xe7, 0x80, /* 1110011110 */ 0xef, 0x80, /* 1110111110 */ 0x3c, 0xc0, /* 0011110011 */ 0xbe, 0xc0, /* 1011111011 */ /* 179 0xb3 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 180 0xb4 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 181 0xb5 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 182 0xb6 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 183 0xb7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 184 0xb8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 185 0xb9 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x03, 0x00, /* 0000001100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 186 0xba '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 187 0xbb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x03, 0x00, /* 0000001100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 188 0xbc '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0x00, /* 1111101100 */ 0xfb, 0x00, /* 1111101100 */ 0x03, 0x00, /* 0000001100 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 189 0xbd '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 190 0xbe '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 191 0xbf '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 192 0xc0 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 193 0xc1 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 194 0xc2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 195 0xc3 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 196 0xc4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 197 0xc5 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 198 0xc6 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 199 0xc7 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 200 0xc8 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x18, 0x00, /* 0001100000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 201 0xc9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 202 0xca '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 203 0xcb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 204 0xcc '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x18, 0x00, /* 0001100000 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0xc0, /* 0001101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 205 0xcd '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 206 0xce '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x00, 0x00, /* 0000000000 */ 0xfb, 0xc0, /* 1111101111 */ 0xfb, 0xc0, /* 1111101111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 207 0xcf '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 208 0xd0 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 209 0xd1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 210 0xd2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 211 0xd3 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 212 0xd4 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 213 0xd5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 214 0xd6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x1f, 0xc0, /* 0001111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 215 0xd7 '.' */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ 0x1b, 0x00, /* 0001101100 */ /* 216 0xd8 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 217 0xd9 '.' */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xfc, 0x00, /* 1111110000 */ 0xfc, 0x00, /* 1111110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 218 0xda '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ /* 219 0xdb '.' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 220 0xdc '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ /* 221 0xdd '.' */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ 0xf8, 0x00, /* 1111100000 */ /* 222 0xde '.' */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ 0x07, 0xc0, /* 0000011111 */ /* 223 0xdf '.' */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0xff, 0xc0, /* 1111111111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 224 0xe0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1c, 0x80, /* 0001110010 */ 0x35, 0x80, /* 0011010110 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x63, 0x00, /* 0110001100 */ 0x37, 0x80, /* 0011011110 */ 0x1c, 0x80, /* 0001110010 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 225 0xe1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x6f, 0x00, /* 0110111100 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x63, 0x00, /* 0110001100 */ 0x6e, 0x00, /* 0110111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 226 0xe2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 227 0xe3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 228 0xe4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x80, /* 1111111110 */ 0x60, 0x00, /* 0110000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x80, /* 0011000010 */ 0x61, 0x80, /* 0110000110 */ 0xff, 0x80, /* 1111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 229 0xe5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1f, 0xc0, /* 0001111111 */ 0x36, 0x00, /* 0011011000 */ 0x63, 0x00, /* 0110001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x3e, 0x00, /* 0011111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 230 0xe6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x73, 0x80, /* 0111001110 */ 0x6d, 0x80, /* 0110110110 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0xc0, 0x00, /* 1100000000 */ /* 231 0xe7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x36, 0x40, /* 0011011001 */ 0x5e, 0x00, /* 0101111000 */ 0x8c, 0x00, /* 1000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 232 0xe8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 233 0xe9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0e, 0x00, /* 0000111000 */ 0x1f, 0x00, /* 0001111100 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x60, 0xc0, /* 0110000011 */ 0x7f, 0xc0, /* 0111111111 */ 0x7f, 0xc0, /* 0111111111 */ 0x60, 0xc0, /* 0110000011 */ 0x31, 0x80, /* 0011000110 */ 0x31, 0x80, /* 0011000110 */ 0x1f, 0x00, /* 0001111100 */ 0x0e, 0x00, /* 0000111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 234 0xea '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0xc0, 0xc0, /* 1100000011 */ 0x61, 0x80, /* 0110000110 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0xf3, 0xc0, /* 1111001111 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 235 0xeb '.' */ 0x00, 0x00, /* 0000000000 */ 0x07, 0x00, /* 0000011100 */ 0x1f, 0x80, /* 0001111110 */ 0x30, 0xc0, /* 0011000011 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x3e, 0x00, /* 0011111000 */ 0x66, 0x00, /* 0110011000 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0xc3, 0x00, /* 1100001100 */ 0x66, 0x00, /* 0110011000 */ 0x3c, 0x00, /* 0011110000 */ 0x18, 0x00, /* 0001100000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 236 0xec '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x33, 0x00, /* 0011001100 */ 0x6d, 0x80, /* 0110110110 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0x6d, 0x80, /* 0110110110 */ 0x33, 0x00, /* 0011001100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 237 0xed '.' */ 0x00, 0x00, /* 0000000000 */ 0x01, 0x80, /* 0000000110 */ 0x01, 0x80, /* 0000000110 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x37, 0x00, /* 0011011100 */ 0x6d, 0x80, /* 0110110110 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0xcc, 0xc0, /* 1100110011 */ 0x6d, 0x80, /* 0110110110 */ 0x3b, 0x00, /* 0011101100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x60, 0x00, /* 0110000000 */ 0x60, 0x00, /* 0110000000 */ 0x00, 0x00, /* 0000000000 */ /* 238 0xee '.' */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x3f, 0x80, /* 0011111110 */ 0x3f, 0x80, /* 0011111110 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x18, 0x00, /* 0001100000 */ 0x18, 0x00, /* 0001100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 239 0xef '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x61, 0x80, /* 0110000110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 240 0xf0 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 241 0xf1 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 242 0xf2 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0xe0, 0x00, /* 1110000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x38, 0x00, /* 0011100000 */ 0xe0, 0x00, /* 1110000000 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x00, /* 1111111100 */ 0xff, 0x00, /* 1111111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 243 0xf3 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x03, 0x80, /* 0000001110 */ 0x0e, 0x00, /* 0000111000 */ 0x38, 0x00, /* 0011100000 */ 0xe0, 0x00, /* 1110000000 */ 0x38, 0x00, /* 0011100000 */ 0x0e, 0x00, /* 0000111000 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0xff, 0x80, /* 1111111110 */ 0xff, 0x80, /* 1111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 244 0xf4 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x30, 0x00, /* 0011000000 */ 0x00, 0x00, /* 0000000000 */ /* 245 0xf5 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x03, 0x00, /* 0000001100 */ 0x00, 0x00, /* 0000000000 */ /* 246 0xf6 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 247 0xf7 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x38, 0x00, /* 0011100000 */ 0x6c, 0x00, /* 0110110000 */ 0x06, 0xc0, /* 0000011011 */ 0x03, 0x80, /* 0000001110 */ 0x38, 0x00, /* 0011100000 */ 0x6c, 0x00, /* 0110110000 */ 0x06, 0xc0, /* 0000011011 */ 0x03, 0x80, /* 0000001110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 248 0xf8 '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x33, 0x00, /* 0011001100 */ 0x33, 0x00, /* 0011001100 */ 0x1e, 0x00, /* 0001111000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 249 0xf9 '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x1e, 0x00, /* 0001111000 */ 0x1e, 0x00, /* 0001111000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 250 0xfa '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 251 0xfb '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x0f, 0xc0, /* 0000111111 */ 0x0f, 0xc0, /* 0000111111 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0x0c, 0x00, /* 0000110000 */ 0xcc, 0x00, /* 1100110000 */ 0x6c, 0x00, /* 0110110000 */ 0x3c, 0x00, /* 0011110000 */ 0x1c, 0x00, /* 0001110000 */ 0x0c, 0x00, /* 0000110000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 252 0xfc '.' */ 0x00, 0x00, /* 0000000000 */ 0x27, 0x00, /* 0010011100 */ 0x7b, 0x00, /* 0111101100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x31, 0x00, /* 0011000100 */ 0x7b, 0x80, /* 0111101110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 253 0xfd '.' */ 0x00, 0x00, /* 0000000000 */ 0x1e, 0x00, /* 0001111000 */ 0x3f, 0x00, /* 0011111100 */ 0x63, 0x00, /* 0110001100 */ 0x43, 0x00, /* 0100001100 */ 0x06, 0x00, /* 0000011000 */ 0x0c, 0x00, /* 0000110000 */ 0x18, 0x00, /* 0001100000 */ 0x30, 0x80, /* 0011000010 */ 0x7f, 0x80, /* 0111111110 */ 0x7f, 0x80, /* 0111111110 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 254 0xfe '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x3f, 0x00, /* 0011111100 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ /* 255 0xff '.' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ } }; const struct font_desc font_10x18 = { .idx = FONT10x18_IDX, .name = "10x18", .width = 10, .height = 18, .charcount = 256, .data = fontdata_10x18.data, #ifdef __sparc__ .pref = 5, #else .pref = -1, #endif };
linux-master
lib/fonts/font_10x18.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/font.h> #define FONTDATAMAX 2048 static const struct font_data fontdata_6x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 1 0x01 '^A' */ 0x78, /* 011110 */ 0x84, /* 100001 */ 0xCC, /* 110011 */ 0x84, /* 100001 */ 0xCC, /* 110011 */ 0xB4, /* 101101 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 2 0x02 '^B' */ 0x78, /* 011110 */ 0xFC, /* 111111 */ 0xB4, /* 101101 */ 0xFC, /* 111111 */ 0xB4, /* 101101 */ 0xCC, /* 110011 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 3 0x03 '^C' */ 0x00, /* 000000 */ 0x28, /* 001010 */ 0x7C, /* 011111 */ 0x7C, /* 011111 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 4 0x04 '^D' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x7C, /* 011111 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 5 0x05 '^E' */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x6C, /* 011011 */ 0x6C, /* 011011 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 6 0x06 '^F' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x7C, /* 011111 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 7 0x07 '^G' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x78, /* 011110 */ 0x30, /* 001100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 8 0x08 '^H' */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xCC, /* 110011 */ 0x84, /* 100001 */ 0xCC, /* 110011 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ /* 9 0x09 '^I' */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x48, /* 010010 */ 0x84, /* 100001 */ 0x48, /* 010010 */ 0x30, /* 001100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 10 0x0A '^J' */ 0xFC, /* 111111 */ 0xCC, /* 110011 */ 0xB4, /* 101101 */ 0x78, /* 011110 */ 0xB4, /* 101101 */ 0xCC, /* 110011 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ /* 11 0x0B '^K' */ 0x3C, /* 001111 */ 0x14, /* 000101 */ 0x20, /* 001000 */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 12 0x0C '^L' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 13 0x0D '^M' */ 0x18, /* 000110 */ 0x14, /* 000101 */ 0x14, /* 000101 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x70, /* 011100 */ 0x60, /* 011000 */ 0x00, /* 000000 */ /* 14 0x0E '^N' */ 0x3C, /* 001111 */ 0x24, /* 001001 */ 0x3C, /* 001111 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x6C, /* 011011 */ 0x6C, /* 011011 */ 0x00, /* 000000 */ /* 15 0x0F '^O' */ 0x10, /* 000100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x6C, /* 011011 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 16 0x10 '^P' */ 0x40, /* 010000 */ 0x60, /* 011000 */ 0x70, /* 011100 */ 0x78, /* 011110 */ 0x70, /* 011100 */ 0x60, /* 011000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 17 0x11 '^Q' */ 0x04, /* 000001 */ 0x0C, /* 000011 */ 0x1C, /* 000111 */ 0x3C, /* 001111 */ 0x1C, /* 000111 */ 0x0C, /* 000011 */ 0x04, /* 000001 */ 0x00, /* 000000 */ /* 18 0x12 '^R' */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x10, /* 000100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 19 0x13 '^S' */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x00, /* 000000 */ 0x48, /* 010010 */ 0x00, /* 000000 */ /* 20 0x14 '^T' */ 0x3C, /* 001111 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x3C, /* 001111 */ 0x14, /* 000101 */ 0x14, /* 000101 */ 0x14, /* 000101 */ 0x00, /* 000000 */ /* 21 0x15 '^U' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x30, /* 001100 */ 0x28, /* 001010 */ 0x14, /* 000101 */ 0x0C, /* 000011 */ 0x44, /* 010001 */ 0x38, /* 001110 */ /* 22 0x16 '^V' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xF8, /* 111110 */ 0xF8, /* 111110 */ 0xF8, /* 111110 */ 0x00, /* 000000 */ /* 23 0x17 '^W' */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x10, /* 000100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x7C, /* 011111 */ /* 24 0x18 '^X' */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 25 0x19 '^Y' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 26 0x1A '^Z' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x7C, /* 011111 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 27 0x1B '^[' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x7C, /* 011111 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 28 0x1C '^\' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 29 0x1D '^]' */ 0x00, /* 000000 */ 0x48, /* 010010 */ 0x84, /* 100001 */ 0xFC, /* 111111 */ 0x84, /* 100001 */ 0x48, /* 010010 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 30 0x1E '^^' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x7C, /* 011111 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 31 0x1F '^_' */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x7C, /* 011111 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 32 0x20 ' ' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 33 0x21 '!' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 34 0x22 '"' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 35 0x23 '#' */ 0x00, /* 000000 */ 0x28, /* 001010 */ 0x7C, /* 011111 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x7C, /* 011111 */ 0x28, /* 001010 */ 0x00, /* 000000 */ /* 36 0x24 '$' */ 0x10, /* 000000 */ 0x38, /* 001000 */ 0x40, /* 010000 */ 0x30, /* 001000 */ 0x08, /* 000000 */ 0x70, /* 011000 */ 0x20, /* 001000 */ 0x00, /* 000000 */ /* 37 0x25 '%' */ 0x64, /* 011001 */ 0x64, /* 011001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x4C, /* 010011 */ 0x4C, /* 010011 */ 0x00, /* 000000 */ /* 38 0x26 '&' */ 0x30, /* 001100 */ 0x48, /* 010010 */ 0x50, /* 010100 */ 0x20, /* 001000 */ 0x54, /* 010101 */ 0x48, /* 010010 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 39 0x27 ''' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 40 0x28 '(' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x20, /* 001000 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x00, /* 000000 */ /* 41 0x29 ')' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x00, /* 000000 */ /* 42 0x2A '*' */ 0x10, /* 000100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 43 0x2B '+' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 44 0x2C ',' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x30, /* 001100 */ 0x20, /* 001000 */ /* 45 0x2D '-' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 46 0x2E '.' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x18, /* 000110 */ 0x18, /* 000110 */ 0x00, /* 000000 */ /* 47 0x2F '/' */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x20, /* 001000 */ 0x40, /* 010000 */ /* 48 0x30 '0' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x54, /* 010101 */ 0x64, /* 011001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 49 0x31 '1' */ 0x10, /* 000100 */ 0x30, /* 001100 */ 0x50, /* 010100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 50 0x32 '2' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 51 0x33 '3' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x04, /* 000001 */ 0x18, /* 000110 */ 0x04, /* 000001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 52 0x34 '4' */ 0x08, /* 000010 */ 0x18, /* 000110 */ 0x28, /* 001010 */ 0x48, /* 010010 */ 0x7C, /* 011111 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x00, /* 000000 */ /* 53 0x35 '5' */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x04, /* 000001 */ 0x04, /* 000001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 54 0x36 '6' */ 0x18, /* 000110 */ 0x20, /* 001000 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 55 0x37 '7' */ 0x7C, /* 011111 */ 0x04, /* 000001 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 56 0x38 '8' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 57 0x39 '9' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x30, /* 001100 */ 0x00, /* 000000 */ /* 58 0x3A ':' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x18, /* 000110 */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x18, /* 000110 */ 0x18, /* 000110 */ 0x00, /* 000000 */ /* 59 0x3B ';' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x30, /* 001100 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x30, /* 001100 */ 0x20, /* 001000 */ /* 60 0x3C '<' */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x04, /* 000001 */ 0x00, /* 000000 */ /* 61 0x3D '=' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 62 0x3E '>' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x00, /* 000000 */ /* 63 0x3F '?' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 64 0x40 '@' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x5C, /* 010111 */ 0x54, /* 010101 */ 0x5C, /* 010111 */ 0x40, /* 010000 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 65 0x41 'A' */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 66 0x42 'B' */ 0x78, /* 011110 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x38, /* 001110 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 67 0x43 'C' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 68 0x44 'D' */ 0x78, /* 011110 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 69 0x45 'E' */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 70 0x46 'F' */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 71 0x47 'G' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x5C, /* 010111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 72 0x48 'H' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 73 0x49 'I' */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 74 0x4A 'J' */ 0x1C, /* 000111 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x30, /* 001100 */ 0x00, /* 000000 */ /* 75 0x4B 'K' */ 0x44, /* 010001 */ 0x48, /* 010010 */ 0x50, /* 010100 */ 0x60, /* 011000 */ 0x50, /* 010100 */ 0x48, /* 010010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 76 0x4C 'L' */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 77 0x4D 'M' */ 0x44, /* 010001 */ 0x6C, /* 011011 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 78 0x4E 'N' */ 0x44, /* 010001 */ 0x64, /* 011001 */ 0x54, /* 010101 */ 0x4C, /* 010011 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 79 0x4F 'O' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 80 0x50 'P' */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x78, /* 011110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 81 0x51 'Q' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x54, /* 010101 */ 0x48, /* 010010 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 82 0x52 'R' */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x78, /* 011110 */ 0x50, /* 010100 */ 0x48, /* 010010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 83 0x53 'S' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 84 0x54 'T' */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 85 0x55 'U' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 86 0x56 'V' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 87 0x57 'W' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x6C, /* 011011 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 88 0x58 'X' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 89 0x59 'Y' */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 90 0x5A 'Z' */ 0x7C, /* 011111 */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x40, /* 010000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 91 0x5B '[' */ 0x18, /* 000110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x18, /* 000110 */ 0x00, /* 000000 */ /* 92 0x5C '\' */ 0x40, /* 010000 */ 0x20, /* 001000 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x04, /* 000001 */ /* 93 0x5D ']' */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x30, /* 001100 */ 0x00, /* 000000 */ /* 94 0x5E '^' */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 95 0x5F '_' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ /* 96 0x60 '`' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 97 0x61 'a' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 98 0x62 'b' */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x58, /* 010110 */ 0x64, /* 011001 */ 0x44, /* 010001 */ 0x64, /* 011001 */ 0x58, /* 010110 */ 0x00, /* 000000 */ /* 99 0x63 'c' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 100 0x64 'd' */ 0x04, /* 000001 */ 0x04, /* 000001 */ 0x34, /* 001101 */ 0x4C, /* 010011 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 101 0x65 'e' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 102 0x66 'f' */ 0x0C, /* 000011 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 103 0x67 'g' */ 0x00, /* 000000 */ 0x34, /* 001101 */ 0x4C, /* 010011 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x04, /* 000001 */ 0x38, /* 001110 */ /* 104 0x68 'h' */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 105 0x69 'i' */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 106 0x6A 'j' */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x60, /* 011000 */ /* 107 0x6B 'k' */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x48, /* 010010 */ 0x50, /* 010100 */ 0x70, /* 011100 */ 0x48, /* 010010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 108 0x6C 'l' */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 109 0x6D 'm' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x68, /* 011010 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x00, /* 000000 */ /* 110 0x6E 'n' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x58, /* 010110 */ 0x64, /* 011001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 111 0x6F 'o' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 112 0x70 'p' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x78, /* 011110 */ 0x44, /* 010001 */ 0x64, /* 011001 */ 0x58, /* 010110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ /* 113 0x71 'q' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x04, /* 000001 */ 0x04, /* 000001 */ /* 114 0x72 'r' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x58, /* 010110 */ 0x64, /* 011001 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 115 0x73 's' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x3C, /* 001111 */ 0x40, /* 010000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 116 0x74 't' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x0C, /* 000011 */ 0x00, /* 000000 */ /* 117 0x75 'u' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 118 0x76 'v' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 119 0x77 'w' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x28, /* 001010 */ 0x00, /* 000000 */ /* 120 0x78 'x' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 121 0x79 'y' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x04, /* 000001 */ 0x38, /* 001110 */ /* 122 0x7A 'z' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 123 0x7B '{' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x00, /* 000000 */ /* 124 0x7C '|' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 125 0x7D '}' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x00, /* 000000 */ /* 126 0x7E '~' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x20, /* 001000 */ 0x54, /* 010101 */ 0x08, /* 000010 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 127 0x7F '' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 128 0x80 '\200' */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x20, /* 001000 */ /* 129 0x81 '\201' */ 0x00, /* 000000 */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 130 0x82 '\202' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 131 0x83 '\203' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 132 0x84 '\204' */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 133 0x85 '\205' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 134 0x86 '\206' */ 0x3C, /* 001111 */ 0x18, /* 000110 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 135 0x87 '\207' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x10, /* 000100 */ /* 136 0x88 '\210' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 137 0x89 '\211' */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 138 0x8A '\212' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 139 0x8B '\213' */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 140 0x8C '\214' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 141 0x8D '\215' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 142 0x8E '\216' */ 0x44, /* 010001 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 143 0x8F '\217' */ 0x30, /* 001100 */ 0x48, /* 010010 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 144 0x90 '\220' */ 0x10, /* 000100 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x78, /* 011110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 145 0x91 '\221' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x78, /* 011110 */ 0x14, /* 000101 */ 0x7C, /* 011111 */ 0x50, /* 010100 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 146 0x92 '\222' */ 0x3C, /* 001111 */ 0x50, /* 010100 */ 0x50, /* 010100 */ 0x78, /* 011110 */ 0x50, /* 010100 */ 0x50, /* 010100 */ 0x5C, /* 010111 */ 0x00, /* 000000 */ /* 147 0x93 '\223' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 148 0x94 '\224' */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 149 0x95 '\225' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 150 0x96 '\226' */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 151 0x97 '\227' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 152 0x98 '\230' */ 0x28, /* 001010 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x04, /* 000001 */ 0x38, /* 001110 */ /* 153 0x99 '\231' */ 0x84, /* 100001 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 154 0x9A '\232' */ 0x88, /* 100010 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 155 0x9B '\233' */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x50, /* 010100 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 156 0x9C '\234' */ 0x30, /* 001100 */ 0x48, /* 010010 */ 0x40, /* 010000 */ 0x70, /* 011100 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x78, /* 011110 */ 0x00, /* 000000 */ /* 157 0x9D '\235' */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 158 0x9E '\236' */ 0x70, /* 011100 */ 0x48, /* 010010 */ 0x70, /* 011100 */ 0x48, /* 010010 */ 0x5C, /* 010111 */ 0x48, /* 010010 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 159 0x9F '\237' */ 0x0C, /* 000011 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x60, /* 011000 */ 0x00, /* 000000 */ /* 160 0xA0 '\240' */ 0x18, /* 000110 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 161 0xA1 '\241' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x30, /* 001100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 162 0xA2 '\242' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 163 0xA3 '\243' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x4C, /* 010011 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 164 0xA4 '\244' */ 0x34, /* 001101 */ 0x58, /* 010110 */ 0x00, /* 000000 */ 0x58, /* 010110 */ 0x64, /* 011001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 165 0xA5 '\245' */ 0x58, /* 010110 */ 0x44, /* 010001 */ 0x64, /* 011001 */ 0x54, /* 010101 */ 0x4C, /* 010011 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 166 0xA6 '\246' */ 0x38, /* 001110 */ 0x04, /* 000001 */ 0x3C, /* 001111 */ 0x44, /* 010001 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 167 0xA7 '\247' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 168 0xA8 '\250' */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x40, /* 010000 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 169 0xA9 '\251' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 170 0xAA '\252' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x04, /* 000001 */ 0x04, /* 000001 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 171 0xAB '\253' */ 0x20, /* 001000 */ 0x24, /* 001001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x44, /* 010001 */ 0x08, /* 000010 */ 0x1C, /* 000111 */ /* 172 0xAC '\254' */ 0x20, /* 001000 */ 0x24, /* 001001 */ 0x28, /* 001010 */ 0x10, /* 000100 */ 0x28, /* 001010 */ 0x58, /* 010110 */ 0x3C, /* 001111 */ 0x08, /* 000010 */ /* 173 0xAD '\255' */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ /* 174 0xAE '\256' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x24, /* 001001 */ 0x48, /* 010010 */ 0x90, /* 100100 */ 0x48, /* 010010 */ 0x24, /* 001001 */ 0x00, /* 000000 */ /* 175 0xAF '\257' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x90, /* 100100 */ 0x48, /* 010010 */ 0x24, /* 001001 */ 0x48, /* 010010 */ 0x90, /* 100100 */ 0x00, /* 000000 */ /* 176 0xB0 '\260' */ 0x10, /* 000100 */ 0x44, /* 010001 */ 0x10, /* 000100 */ 0x44, /* 010001 */ 0x10, /* 000100 */ 0x44, /* 010001 */ 0x10, /* 000100 */ 0x44, /* 010001 */ /* 177 0xB1 '\261' */ 0xA8, /* 101010 */ 0x54, /* 010101 */ 0xA8, /* 101010 */ 0x54, /* 010101 */ 0xA8, /* 101010 */ 0x54, /* 010101 */ 0xA8, /* 101010 */ 0x54, /* 010101 */ /* 178 0xB2 '\262' */ 0xDC, /* 110111 */ 0x74, /* 011101 */ 0xDC, /* 110111 */ 0x74, /* 011101 */ 0xDC, /* 110111 */ 0x74, /* 011101 */ 0xDC, /* 110111 */ 0x74, /* 011101 */ /* 179 0xB3 '\263' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 180 0xB4 '\264' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 181 0xB5 '\265' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 182 0xB6 '\266' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xE8, /* 111010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 183 0xB7 '\267' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xF8, /* 111110 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 184 0xB8 '\270' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 185 0xB9 '\271' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xE8, /* 111010 */ 0x08, /* 000010 */ 0xE8, /* 111010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 186 0xBA '\272' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 187 0xBB '\273' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xF8, /* 111110 */ 0x08, /* 000010 */ 0xE8, /* 111010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 188 0xBC '\274' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xE8, /* 111010 */ 0x08, /* 000010 */ 0xF8, /* 111110 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 189 0xBD '\275' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xF8, /* 111110 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 190 0xBE '\276' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 191 0xBF '\277' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xF0, /* 111100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 192 0xC0 '\300' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 193 0xC1 '\301' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 194 0xC2 '\302' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 195 0xC3 '\303' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 196 0xC4 '\304' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 197 0xC5 '\305' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xFC, /* 111111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 198 0xC6 '\306' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 199 0xC7 '\307' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x2C, /* 001011 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 200 0xC8 '\310' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x2C, /* 001011 */ 0x20, /* 001000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 201 0xC9 '\311' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x3C, /* 001111 */ 0x20, /* 001000 */ 0x2C, /* 001011 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 202 0xCA '\312' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xEC, /* 111011 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 203 0xCB '\313' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xEC, /* 111011 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 204 0xCC '\314' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x2C, /* 001011 */ 0x20, /* 001000 */ 0x2C, /* 001011 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 205 0xCD '\315' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 206 0xCE '\316' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xEC, /* 111011 */ 0x00, /* 000000 */ 0xEC, /* 111011 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 207 0xCF '\317' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 208 0xD0 '\320' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 209 0xD1 '\321' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 210 0xD2 '\322' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 211 0xD3 '\323' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 212 0xD4 '\324' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 213 0xD5 '\325' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 214 0xD6 '\326' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x3C, /* 001111 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 215 0xD7 '\327' */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0xFC, /* 111111 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ /* 216 0xD8 '\330' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xFC, /* 111111 */ 0x10, /* 000100 */ 0xFC, /* 111111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 217 0xD9 '\331' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0xF0, /* 111100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 218 0xDA '\332' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x1C, /* 000111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 219 0xDB '\333' */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ /* 220 0xDC '\334' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ /* 221 0xDD '\335' */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ 0xE0, /* 111000 */ /* 222 0xDE '\336' */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ 0x1C, /* 000111 */ /* 223 0xDF '\337' */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 224 0xE0 '\340' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x34, /* 001101 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x34, /* 001101 */ 0x00, /* 000000 */ /* 225 0xE1 '\341' */ 0x24, /* 001001 */ 0x44, /* 010001 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x58, /* 010110 */ 0x40, /* 010000 */ /* 226 0xE2 '\342' */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 227 0xE3 '\343' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x28, /* 001010 */ 0x00, /* 000000 */ /* 228 0xE4 '\344' */ 0x7C, /* 011111 */ 0x24, /* 001001 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x24, /* 001001 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 229 0xE5 '\345' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x3C, /* 001111 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x30, /* 001100 */ 0x00, /* 000000 */ /* 230 0xE6 '\346' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x74, /* 011101 */ 0x40, /* 010000 */ /* 231 0xE7 '\347' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x0C, /* 000011 */ 0x00, /* 000000 */ /* 232 0xE8 '\350' */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 233 0xE9 '\351' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x7C, /* 011111 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 234 0xEA '\352' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x28, /* 001010 */ 0x6C, /* 011011 */ 0x00, /* 000000 */ /* 235 0xEB '\353' */ 0x18, /* 000110 */ 0x20, /* 001000 */ 0x18, /* 000110 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x24, /* 001001 */ 0x18, /* 000110 */ 0x00, /* 000000 */ /* 236 0xEC '\354' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 237 0xED '\355' */ 0x00, /* 000000 */ 0x04, /* 000001 */ 0x38, /* 001110 */ 0x54, /* 010101 */ 0x54, /* 010101 */ 0x38, /* 001110 */ 0x40, /* 010000 */ 0x00, /* 000000 */ /* 238 0xEE '\356' */ 0x3C, /* 001111 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x38, /* 001110 */ 0x40, /* 010000 */ 0x40, /* 010000 */ 0x3C, /* 001111 */ 0x00, /* 000000 */ /* 239 0xEF '\357' */ 0x38, /* 001110 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x44, /* 010001 */ 0x00, /* 000000 */ /* 240 0xF0 '\360' */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0xFC, /* 111111 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 241 0xF1 '\361' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x7C, /* 011111 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ /* 242 0xF2 '\362' */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 243 0xF3 '\363' */ 0x08, /* 000010 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x10, /* 000100 */ 0x08, /* 000010 */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 244 0xF4 '\364' */ 0x0C, /* 000011 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ /* 245 0xF5 '\365' */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x10, /* 000100 */ 0x60, /* 011000 */ /* 246 0xF6 '\366' */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x7C, /* 011111 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 247 0xF7 '\367' */ 0x00, /* 000000 */ 0x20, /* 001000 */ 0x54, /* 010101 */ 0x08, /* 000010 */ 0x20, /* 001000 */ 0x54, /* 010101 */ 0x08, /* 000010 */ 0x00, /* 000000 */ /* 248 0xF8 '\370' */ 0x30, /* 001100 */ 0x48, /* 010010 */ 0x48, /* 010010 */ 0x30, /* 001100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 249 0xF9 '\371' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x38, /* 001110 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 250 0xFA '\372' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x10, /* 000100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 251 0xFB '\373' */ 0x04, /* 000001 */ 0x08, /* 000010 */ 0x08, /* 000010 */ 0x50, /* 010100 */ 0x50, /* 010100 */ 0x20, /* 001000 */ 0x20, /* 001000 */ 0x00, /* 000000 */ /* 252 0xFC '\374' */ 0x60, /* 011000 */ 0x50, /* 010100 */ 0x50, /* 010100 */ 0x50, /* 010100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 253 0xFD '\375' */ 0x60, /* 011000 */ 0x10, /* 000100 */ 0x20, /* 001000 */ 0x70, /* 011100 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ /* 254 0xFE '\376' */ 0x00, /* 000000 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x38, /* 001110 */ 0x00, /* 000000 */ /* 255 0xFF '\377' */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ 0x00, /* 000000 */ } }; const struct font_desc font_6x8 = { .idx = FONT6x8_IDX, .name = "6x8", .width = 6, .height = 8, .charcount = 256, .data = fontdata_6x8.data, .pref = 0, };
linux-master
lib/fonts/font_6x8.c
/* * `Soft' font definitions * * Created 1995 by Geert Uytterhoeven * Rewritten 1998 by Martin Mares <[email protected]> * * 2001 - Documented with DocBook * - Brad Douglas <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #if defined(__mc68000__) #include <asm/setup.h> #endif #include <linux/font.h> static const struct font_desc *fonts[] = { #ifdef CONFIG_FONT_8x8 &font_vga_8x8, #endif #ifdef CONFIG_FONT_8x16 &font_vga_8x16, #endif #ifdef CONFIG_FONT_6x11 &font_vga_6x11, #endif #ifdef CONFIG_FONT_7x14 &font_7x14, #endif #ifdef CONFIG_FONT_SUN8x16 &font_sun_8x16, #endif #ifdef CONFIG_FONT_SUN12x22 &font_sun_12x22, #endif #ifdef CONFIG_FONT_10x18 &font_10x18, #endif #ifdef CONFIG_FONT_ACORN_8x8 &font_acorn_8x8, #endif #ifdef CONFIG_FONT_PEARL_8x8 &font_pearl_8x8, #endif #ifdef CONFIG_FONT_MINI_4x6 &font_mini_4x6, #endif #ifdef CONFIG_FONT_6x10 &font_6x10, #endif #ifdef CONFIG_FONT_TER16x32 &font_ter_16x32, #endif #ifdef CONFIG_FONT_6x8 &font_6x8, #endif }; #define num_fonts ARRAY_SIZE(fonts) #ifdef NO_FONTS #error No fonts configured. #endif /** * find_font - find a font * @name: string name of a font * * Find a specified font with string name @name. * * Returns %NULL if no font found, or a pointer to the * specified font. * */ const struct font_desc *find_font(const char *name) { unsigned int i; BUILD_BUG_ON(!num_fonts); for (i = 0; i < num_fonts; i++) if (!strcmp(fonts[i]->name, name)) return fonts[i]; return NULL; } EXPORT_SYMBOL(find_font); /** * get_default_font - get default font * @xres: screen size of X * @yres: screen size of Y * @font_w: bit array of supported widths (1 - 32) * @font_h: bit array of supported heights (1 - 32) * * Get the default font for a specified screen size. * Dimensions are in pixels. * * Returns %NULL if no font is found, or a pointer to the * chosen font. * */ const struct font_desc *get_default_font(int xres, int yres, u32 font_w, u32 font_h) { int i, c, cc, res; const struct font_desc *f, *g; g = NULL; cc = -10000; for (i = 0; i < num_fonts; i++) { f = fonts[i]; c = f->pref; #if defined(__mc68000__) #ifdef CONFIG_FONT_PEARL_8x8 if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX) c = 100; #endif #ifdef CONFIG_FONT_6x11 if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX) c = 100; #endif #endif if ((yres < 400) == (f->height <= 8)) c += 1000; /* prefer a bigger font for high resolution */ res = (xres / f->width) * (yres / f->height) / 1000; if (res > 20) c += 20 - res; if ((font_w & (1U << (f->width - 1))) && (font_h & (1U << (f->height - 1)))) c += 1000; if (c > cc) { cc = c; g = f; } } return g; } EXPORT_SYMBOL(get_default_font); MODULE_AUTHOR("James Simmons <[email protected]>"); MODULE_DESCRIPTION("Console Fonts"); MODULE_LICENSE("GPL");
linux-master
lib/fonts/fonts.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/font.h> #define FONTDATAMAX 4096 static const struct font_data fontdata_sun8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x6c,0xfe,0xfe,0xfe,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x7c,0xfe,0x7c,0x38,0x10,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x3c,0x3c,0xe7,0xe7,0xe7,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x3c,0x7e,0xff,0xff,0x7e,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc3,0xc3,0xe7,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x00,0x00,0x00,0x3c,0x66,0x42,0x42,0x66,0x3c,0x00,0x00,0x00,0x00,0x00, /* */ 0xff,0xff,0xff,0xff,0xff,0xc3,0x99,0xbd,0xbd,0x99,0xc3,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x1e,0x0e,0x1a,0x32,0x78,0xcc,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3c,0x66,0x66,0x66,0x66,0x3c,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3f,0x33,0x3f,0x30,0x30,0x30,0x30,0x70,0xf0,0xe0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7f,0x63,0x7f,0x63,0x63,0x63,0x63,0x67,0xe7,0xe6,0xc0,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x18,0x18,0xdb,0x3c,0xe7,0x3c,0xdb,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x80,0xc0,0xe0,0xf0,0xf8,0xfe,0xf8,0xf0,0xe0,0xc0,0x80,0x00,0x00,0x00,0x00, /* */ 0x00,0x02,0x06,0x0e,0x1e,0x3e,0xfe,0x3e,0x1e,0x0e,0x06,0x02,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x66,0x66,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7f,0xdb,0xdb,0xdb,0x7b,0x1b,0x1b,0x1b,0x1b,0x1b,0x00,0x00,0x00,0x00, /* */ 0x00,0x7c,0xc6,0x60,0x38,0x6c,0xc6,0xc6,0x6c,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xfe,0xfe,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x7e,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x3c,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x3c,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x18,0x0c,0xfe,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x30,0x60,0xfe,0x60,0x30,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xc0,0xc0,0xc0,0xfe,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x24,0x66,0xff,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x38,0x7c,0x7c,0xfe,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0xfe,0x7c,0x7c,0x38,0x38,0x10,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*!*/ 0x00,0x00,0x18,0x3c,0x3c,0x3c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /*"*/ 0x00,0x66,0x66,0x66,0x24,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*#*/ 0x00,0x00,0x00,0x6c,0x6c,0xfe,0x6c,0x6c,0x6c,0xfe,0x6c,0x6c,0x00,0x00,0x00,0x00, /*$*/ 0x18,0x18,0x7c,0xc6,0xc2,0xc0,0x7c,0x06,0x06,0x86,0xc6,0x7c,0x18,0x18,0x00,0x00, /*%*/ 0x00,0x00,0x00,0x00,0xc2,0xc6,0x0c,0x18,0x30,0x60,0xc6,0x86,0x00,0x00,0x00,0x00, /*&*/ 0x00,0x00,0x38,0x6c,0x6c,0x38,0x76,0xdc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*'*/ 0x00,0x30,0x30,0x30,0x60,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*(*/ 0x00,0x00,0x0c,0x18,0x30,0x30,0x30,0x30,0x30,0x30,0x18,0x0c,0x00,0x00,0x00,0x00, /*)*/ 0x00,0x00,0x30,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x30,0x00,0x00,0x00,0x00, /***/ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0xff,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, /*+*/ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, /*,*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x18,0x30,0x00,0x00,0x00, /*-*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*.*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x02,0x06,0x0c,0x18,0x30,0x60,0xc0,0x80,0x00,0x00,0x00,0x00, /*0*/ 0x00,0x00,0x7c,0xc6,0xc6,0xce,0xde,0xf6,0xe6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*1*/ 0x00,0x00,0x18,0x38,0x78,0x18,0x18,0x18,0x18,0x18,0x18,0x7e,0x00,0x00,0x00,0x00, /*2*/ 0x00,0x00,0x7c,0xc6,0x06,0x0c,0x18,0x30,0x60,0xc0,0xc6,0xfe,0x00,0x00,0x00,0x00, /*3*/ 0x00,0x00,0x7c,0xc6,0x06,0x06,0x3c,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00, /*4*/ 0x00,0x00,0x0c,0x1c,0x3c,0x6c,0xcc,0xfe,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00, /*5*/ 0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xfc,0x06,0x06,0x06,0xc6,0x7c,0x00,0x00,0x00,0x00, /*6*/ 0x00,0x00,0x38,0x60,0xc0,0xc0,0xfc,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*7*/ 0x00,0x00,0xfe,0xc6,0x06,0x06,0x0c,0x18,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, /*8*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7c,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*9*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0x7e,0x06,0x06,0x06,0x0c,0x78,0x00,0x00,0x00,0x00, /*:*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, /*;*/ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x30,0x00,0x00,0x00,0x00, /*<*/ 0x00,0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, /*=*/ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*>*/ 0x00,0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, /*?*/ 0x00,0x00,0x7c,0xc6,0xc6,0x0c,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, /*@*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xde,0xde,0xde,0xdc,0xc0,0x7c,0x00,0x00,0x00,0x00, /*A*/ 0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*B*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x66,0x66,0x66,0x66,0xfc,0x00,0x00,0x00,0x00, /*C*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x00,0x00,0x00,0x00, /*D*/ 0x00,0x00,0xf8,0x6c,0x66,0x66,0x66,0x66,0x66,0x66,0x6c,0xf8,0x00,0x00,0x00,0x00, /*E*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00, /*F*/ 0x00,0x00,0xfe,0x66,0x62,0x68,0x78,0x68,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*G*/ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xde,0xc6,0xc6,0x66,0x3a,0x00,0x00,0x00,0x00, /*H*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*I*/ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*J*/ 0x00,0x00,0x1e,0x0c,0x0c,0x0c,0x0c,0x0c,0xcc,0xcc,0xcc,0x78,0x00,0x00,0x00,0x00, /*K*/ 0x00,0x00,0xe6,0x66,0x66,0x6c,0x78,0x78,0x6c,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*L*/ 0x00,0x00,0xf0,0x60,0x60,0x60,0x60,0x60,0x60,0x62,0x66,0xfe,0x00,0x00,0x00,0x00, /*M*/ 0x00,0x00,0xc3,0xe7,0xff,0xff,0xdb,0xc3,0xc3,0xc3,0xc3,0xc3,0x00,0x00,0x00,0x00, /*N*/ 0x00,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /*O*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*P*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*Q*/ 0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xd6,0xde,0x7c,0x0c,0x0e,0x00,0x00, /*R*/ 0x00,0x00,0xfc,0x66,0x66,0x66,0x7c,0x6c,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*S*/ 0x00,0x00,0x7c,0xc6,0xc6,0x60,0x38,0x0c,0x06,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*T*/ 0x00,0x00,0xff,0xdb,0x99,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*U*/ 0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*V*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00, /*W*/ 0x00,0x00,0xc3,0xc3,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x66,0x00,0x00,0x00,0x00, /*X*/ 0x00,0x00,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x3c,0x66,0xc3,0xc3,0x00,0x00,0x00,0x00, /*Y*/ 0x00,0x00,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*Z*/ 0x00,0x00,0xff,0xc3,0x86,0x0c,0x18,0x30,0x60,0xc1,0xc3,0xff,0x00,0x00,0x00,0x00, /*[*/ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, /*\*/ 0x00,0x00,0x00,0x80,0xc0,0xe0,0x70,0x38,0x1c,0x0e,0x06,0x02,0x00,0x00,0x00,0x00, /*]*/ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, /*^*/ 0x10,0x38,0x6c,0xc6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*_*/ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, /*`*/ 0x30,0x30,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /*a*/ 0x00,0x00,0x00,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*b*/ 0x00,0x00,0xe0,0x60,0x60,0x78,0x6c,0x66,0x66,0x66,0x66,0x7c,0x00,0x00,0x00,0x00, /*c*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc0,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /*d*/ 0x00,0x00,0x1c,0x0c,0x0c,0x3c,0x6c,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*e*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /*f*/ 0x00,0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*g*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0xcc,0x78,0x00, /*h*/ 0x00,0x00,0xe0,0x60,0x60,0x6c,0x76,0x66,0x66,0x66,0x66,0xe6,0x00,0x00,0x00,0x00, /*i*/ 0x00,0x00,0x18,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*j*/ 0x00,0x00,0x06,0x06,0x00,0x0e,0x06,0x06,0x06,0x06,0x06,0x06,0x66,0x66,0x3c,0x00, /*k*/ 0x00,0x00,0xe0,0x60,0x60,0x66,0x6c,0x78,0x78,0x6c,0x66,0xe6,0x00,0x00,0x00,0x00, /*l*/ 0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /*m*/ 0x00,0x00,0x00,0x00,0x00,0xe6,0xff,0xdb,0xdb,0xdb,0xdb,0xdb,0x00,0x00,0x00,0x00, /*n*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00, /*o*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /*p*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xf0,0x00, /*q*/ 0x00,0x00,0x00,0x00,0x00,0x76,0xcc,0xcc,0xcc,0xcc,0xcc,0x7c,0x0c,0x0c,0x1e,0x00, /*r*/ 0x00,0x00,0x00,0x00,0x00,0xdc,0x76,0x66,0x60,0x60,0x60,0xf0,0x00,0x00,0x00,0x00, /*s*/ 0x00,0x00,0x00,0x00,0x00,0x7c,0xc6,0x60,0x38,0x0c,0xc6,0x7c,0x00,0x00,0x00,0x00, /*t*/ 0x00,0x00,0x10,0x30,0x30,0xfc,0x30,0x30,0x30,0x30,0x36,0x1c,0x00,0x00,0x00,0x00, /*u*/ 0x00,0x00,0x00,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /*v*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xc3,0x66,0x3c,0x18,0x00,0x00,0x00,0x00, /*w*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0xc3,0xc3,0xdb,0xdb,0xff,0x66,0x00,0x00,0x00,0x00, /*x*/ 0x00,0x00,0x00,0x00,0x00,0xc3,0x66,0x3c,0x18,0x3c,0x66,0xc3,0x00,0x00,0x00,0x00, /*y*/ 0x00,0x00,0x00,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0xf8,0x00, /*z*/ 0x00,0x00,0x00,0x00,0x00,0xfe,0xcc,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00, /*{*/ 0x00,0x00,0x0e,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00,0x00, /*|*/ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /*}*/ 0x00,0x00,0x70,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00,0x00, /*~*/ 0x00,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xc6,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3c,0x66,0xc2,0xc0,0xc0,0xc0,0xc2,0x66,0x3c,0x0c,0x06,0x7c,0x00,0x00, /* */ 0x00,0x00,0xcc,0x00,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x0c,0x18,0x30,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xcc,0x00,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x38,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x3c,0x66,0x60,0x60,0x66,0x3c,0x0c,0x06,0x3c,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xfe,0xc0,0xc0,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x66,0x00,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x3c,0x66,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0xc6,0x00,0x10,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x38,0x6c,0x38,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x18,0x30,0x60,0x00,0xfe,0x66,0x60,0x7c,0x60,0x60,0x66,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x1b,0x7e,0xd8,0xdc,0x77,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x3e,0x6c,0xcc,0xcc,0xfe,0xcc,0xcc,0xcc,0xcc,0xce,0x00,0x00,0x00,0x00, /* */ 0x00,0x10,0x38,0x6c,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x30,0x78,0xcc,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x60,0x30,0x18,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc6,0x00,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7e,0x06,0x0c,0x78,0x00, /* */ 0x00,0xc6,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0xc6,0x00,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x18,0x7e,0xc3,0xc0,0xc0,0xc0,0xc3,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x64,0x60,0xf0,0x60,0x60,0x60,0x60,0xe6,0xfc,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xc3,0x66,0x3c,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0xfc,0x66,0x66,0x7c,0x62,0x66,0x6f,0x66,0x66,0x66,0xf3,0x00,0x00,0x00,0x00, /* */ 0x00,0x0e,0x1b,0x18,0x18,0x18,0x7e,0x18,0x18,0x18,0x18,0x18,0xd8,0x70,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0x78,0x0c,0x7c,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x0c,0x18,0x30,0x00,0x38,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x18,0x30,0x60,0x00,0xcc,0xcc,0xcc,0xcc,0xcc,0xcc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x76,0xdc,0x00,0xdc,0x66,0x66,0x66,0x66,0x66,0x66,0x00,0x00,0x00,0x00, /* */ 0x76,0xdc,0x00,0xc6,0xe6,0xf6,0xfe,0xde,0xce,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x00,0x3c,0x6c,0x6c,0x3e,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x7c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x60,0xc0,0xc6,0xc6,0x7c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x60,0xce,0x9b,0x06,0x0c,0x1f,0x00,0x00, /* */ 0x00,0xc0,0xc0,0xc2,0xc6,0xcc,0x18,0x30,0x66,0xce,0x96,0x3e,0x06,0x06,0x00,0x00, /* */ 0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x3c,0x3c,0x3c,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x36,0x6c,0xd8,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xd8,0x6c,0x36,0x6c,0xd8,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44,0x11,0x44, /* */ 0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa,0x55,0xaa, /* */ 0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77,0xdd,0x77, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xfe,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xfe,0x06,0xf6,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0xf6,0x06,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xfe,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0xf8,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xf8,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x3f,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x37,0x30,0x37,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0xf7,0x00,0xf7,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x00,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0xff,0x00,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x18,0x18,0x18,0x18,0x18,0x1f,0x18,0x1f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3f,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x36,0x36,0x36,0x36,0x36,0x36,0x36,0xff,0x36,0x36,0x36,0x36,0x36,0x36,0x36,0x36, /* */ 0x18,0x18,0x18,0x18,0x18,0xff,0x18,0xff,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x1f,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, /* */ 0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0, /* */ 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f, /* */ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0xd8,0xd8,0xd8,0xdc,0x76,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x78,0xcc,0xcc,0xcc,0xd8,0xcc,0xc6,0xc6,0xc6,0xcc,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0xfe,0xc6,0xc6,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0xfe,0xc6,0x60,0x30,0x18,0x30,0x60,0xc6,0xfe,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xd8,0xd8,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x7c,0x60,0x60,0xc0,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x76,0xdc,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x7e,0x18,0x3c,0x66,0x66,0x66,0x3c,0x18,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x38,0x6c,0xc6,0xc6,0xfe,0xc6,0xc6,0x6c,0x38,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x38,0x6c,0xc6,0xc6,0xc6,0x6c,0x6c,0x6c,0x6c,0xee,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x1e,0x30,0x18,0x0c,0x3e,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x7e,0xdb,0xdb,0xdb,0x7e,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x03,0x06,0x7e,0xdb,0xdb,0xf3,0x7e,0x60,0xc0,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x1c,0x30,0x60,0x60,0x7c,0x60,0x60,0x60,0x30,0x1c,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x7c,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0xc6,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0xfe,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0xff,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x30,0x18,0x0c,0x06,0x0c,0x18,0x30,0x00,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x0c,0x18,0x30,0x60,0x30,0x18,0x0c,0x00,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x0e,0x1b,0x1b,0x1b,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18, /* */ 0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0xd8,0xd8,0xd8,0x70,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x76,0xdc,0x00,0x76,0xdc,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x38,0x6c,0x6c,0x38,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x0f,0x0c,0x0c,0x0c,0x0c,0x0c,0xec,0x6c,0x6c,0x3c,0x1c,0x00,0x00,0x00,0x00, /* */ 0x00,0xd8,0x6c,0x6c,0x6c,0x6c,0x6c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, } }; const struct font_desc font_sun_8x16 = { .idx = SUN8x16_IDX, .name = "SUN8x16", .width = 8, .height = 16, .charcount = 256, .data = fontdata_sun8x16.data, #ifdef __sparc__ .pref = 10, #else .pref = -1, #endif };
linux-master
lib/fonts/font_sun8x16.c
// SPDX-License-Identifier: GPL-2.0 /**********************************************/ /* */ /* Font file generated by rthelen */ /* */ /**********************************************/ #include <linux/font.h> #define FONTDATAMAX (11*256) static const struct font_data fontdata_6x11 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 1 0x01 '^A' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x84, /* 0000 00 */ 0xcc, /* 00 00 */ 0x84, /* 0000 00 */ 0xb4, /* 0 0 00 */ 0x84, /* 0000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 2 0x02 '^B' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0xfc, /* 00 */ 0xb4, /* 0 0 00 */ 0xfc, /* 00 */ 0xcc, /* 00 00 */ 0xfc, /* 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 3 0x03 '^C' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x7c, /* 0 00 */ 0x7c, /* 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 4 0x04 '^D' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x7c, /* 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 5 0x05 '^E' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x6c, /* 0 0 00 */ 0x6c, /* 0 0 00 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 6 0x06 '^F' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x7c, /* 0 00 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 7 0x07 '^G' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x78, /* 0 000 */ 0x30, /* 00 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 8 0x08 '^H' */ 0xff, /* */ 0xff, /* */ 0xff, /* */ 0xcf, /* 00 */ 0x87, /* 0000 */ 0xcf, /* 00 */ 0xff, /* */ 0xff, /* */ 0xff, /* */ 0xff, /* */ 0xff, /* */ /* 9 0x09 '^I' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x48, /* 0 00 000 */ 0x84, /* 0000 00 */ 0x48, /* 0 00 000 */ 0x30, /* 00 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 10 0x0a '^J' */ 0xff, /* */ 0xff, /* */ 0xcf, /* 00 */ 0xb7, /* 0 0 */ 0x7b, /* 0 0 */ 0xb7, /* 0 0 */ 0xcf, /* 00 */ 0xff, /* */ 0xff, /* */ 0xff, /* */ 0xff, /* */ /* 11 0x0b '^K' */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x14, /* 000 0 00 */ 0x20, /* 00 00000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 12 0x0c '^L' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 13 0x0d '^M' */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x24, /* 00 00 00 */ 0x3c, /* 00 00 */ 0x20, /* 00 00000 */ 0x20, /* 00 00000 */ 0xe0, /* 00000 */ 0xc0, /* 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 14 0x0e '^N' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0xcc, /* 00 00 */ 0xcc, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 15 0x0f '^O' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x6c, /* 0 0 00 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 16 0x10 '^P' */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x60, /* 0 00000 */ 0x70, /* 0 0000 */ 0x7c, /* 0 00 */ 0x70, /* 0 0000 */ 0x60, /* 0 00000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 17 0x11 '^Q' */ 0x00, /* 00000000 */ 0x04, /* 00000 00 */ 0x0c, /* 0000 00 */ 0x1c, /* 000 00 */ 0x7c, /* 0 00 */ 0x1c, /* 000 00 */ 0x0c, /* 0000 00 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 18 0x12 '^R' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x10, /* 000 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 19 0x13 '^S' */ 0x00, /* 00000000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 20 0x14 '^T' */ 0x3c, /* 00 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x3c, /* 00 00 */ 0x14, /* 000 0 00 */ 0x14, /* 000 0 00 */ 0x14, /* 000 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 21 0x15 '^U' */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x24, /* 00 00 00 */ 0x50, /* 0 0 0000 */ 0x48, /* 0 00 000 */ 0x24, /* 00 00 00 */ 0x14, /* 000 0 00 */ 0x48, /* 0 00 000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ /* 22 0x16 '^V' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 000 */ 0xf8, /* 000 */ 0xf8, /* 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 23 0x17 '^W' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x10, /* 000 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 24 0x18 '^X' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 25 0x19 '^Y' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 26 0x1a '^Z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x7c, /* 0 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 27 0x1b '^[' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x7c, /* 0 00 */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 28 0x1c '^\' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 29 0x1d '^]' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x48, /* 0 00 000 */ 0x84, /* 0000 00 */ 0xfc, /* 00 */ 0x84, /* 0000 00 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 30 0x1e '^^' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x7c, /* 0 00 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 31 0x1f '^`' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x7c, /* 0 00 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 32 0x20 ' ' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 33 0x21 '!' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 34 0x22 '"' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 35 0x23 '#' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x7c, /* 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x7c, /* 0 00 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 36 0x24 '$' */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x50, /* 0 0 0000 */ 0x38, /* 00 000 */ 0x14, /* 000 0 00 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 37 0x25 '%' */ 0x00, /* 00000000 */ 0x64, /* 0 00 00 */ 0x64, /* 0 00 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x4c, /* 0 00 00 */ 0x4c, /* 0 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 38 0x26 '&' */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x48, /* 0 00 000 */ 0x50, /* 0 0 0000 */ 0x20, /* 00 00000 */ 0x54, /* 0 0 0 00 */ 0x48, /* 0 00 000 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 39 0x27 ''' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 40 0x28 '(' */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 41 0x29 ')' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 42 0x2a '*' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 43 0x2b '+' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 44 0x2c ',' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ /* 45 0x2d '-' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 46 0x2e '.' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 000 000 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 47 0x2f '/' */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x20, /* 00 00000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ /* 48 0x30 '0' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x54, /* 0 0 0 00 */ 0x64, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 49 0x31 '1' */ 0x00, /* 00000000 */ 0x08, /* 0000 000 */ 0x18, /* 000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 50 0x32 '2' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 51 0x33 '3' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x04, /* 00000 00 */ 0x18, /* 000 000 */ 0x04, /* 00000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 52 0x34 '4' */ 0x00, /* 00000000 */ 0x08, /* 0000 000 */ 0x18, /* 000 000 */ 0x28, /* 00 0 000 */ 0x48, /* 0 00 000 */ 0x7c, /* 0 00 */ 0x08, /* 0000 000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 53 0x35 '5' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 54 0x36 '6' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 55 0x37 '7' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 56 0x38 '8' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 57 0x39 '9' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x04, /* 00000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 58 0x3a ':' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x18, /* 000 000 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x18, /* 000 000 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 59 0x3b ';' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x30, /* 00 0000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ /* 60 0x3c '<' */ 0x00, /* 00000000 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 61 0x3d '=' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 62 0x3e '>' */ 0x00, /* 00000000 */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 63 0x3f '?' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 64 0x40 '@' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x74, /* 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 65 0x41 'A' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 66 0x42 'B' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 67 0x43 'C' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 68 0x44 'D' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 69 0x45 'E' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 70 0x46 'F' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 71 0x47 'G' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x4c, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 72 0x48 'H' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 73 0x49 'I' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 74 0x4a 'J' */ 0x00, /* 00000000 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 75 0x4b 'K' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x48, /* 0 00 000 */ 0x50, /* 0 0 0000 */ 0x60, /* 0 00000 */ 0x50, /* 0 0 0000 */ 0x48, /* 0 00 000 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 76 0x4c 'L' */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 77 0x4d 'M' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x6c, /* 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 78 0x4e 'N' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x64, /* 0 00 00 */ 0x54, /* 0 0 0 00 */ 0x4c, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 79 0x4f 'O' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 80 0x50 'P' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 81 0x51 'Q' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 82 0x52 'R' */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 83 0x53 'S' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x38, /* 00 000 */ 0x04, /* 00000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 84 0x54 'T' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 85 0x55 'U' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 86 0x56 'V' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 87 0x57 'W' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x6c, /* 0 0 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 88 0x58 'X' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 89 0x59 'Y' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 90 0x5a 'Z' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x40, /* 0 000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 91 0x5b '[' */ 0x0c, /* 0000 00 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x0c, /* 0000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 92 0x5c '\' */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x20, /* 00 00000 */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ /* 93 0x5d ']' */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x30, /* 00 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 94 0x5e '^' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 95 0x5f '_' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 96 0x60 '`' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 97 0x61 'a' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 98 0x62 'b' */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 99 0x63 'c' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 100 0x64 'd' */ 0x00, /* 00000000 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 101 0x65 'e' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 102 0x66 'f' */ 0x00, /* 00000000 */ 0x0c, /* 0000 00 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 103 0x67 'g' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x04, /* 00000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ /* 104 0x68 'h' */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 105 0x69 'i' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 106 0x6a 'j' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x60, /* 0 00000 */ 0x00, /* 00000000 */ /* 107 0x6b 'k' */ 0x00, /* 00000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x48, /* 0 00 000 */ 0x50, /* 0 0 0000 */ 0x70, /* 0 0000 */ 0x48, /* 0 00 000 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 108 0x6c 'l' */ 0x00, /* 00000000 */ 0x30, /* 00 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 109 0x6d 'm' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 110 0x6e 'n' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x58, /* 0 0 000 */ 0x64, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 111 0x6f 'o' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 112 0x70 'p' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x78, /* 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ /* 113 0x71 'q' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ /* 114 0x72 'r' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x58, /* 0 0 000 */ 0x64, /* 0 00 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 115 0x73 's' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x40, /* 0 000000 */ 0x38, /* 00 000 */ 0x04, /* 00000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 116 0x74 't' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x0c, /* 0000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 117 0x75 'u' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 118 0x76 'v' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 119 0x77 'w' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 120 0x78 'x' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 121 0x79 'y' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x04, /* 00000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ /* 122 0x7a 'z' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 123 0x7b '{' */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ /* 124 0x7c '|' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 125 0x7d '}' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ /* 126 0x7e '~' */ 0x00, /* 00000000 */ 0x34, /* 00 0 00 */ 0x58, /* 0 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 127 0x7f '^?' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 128 0x80 '\200' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ /* 129 0x81 '\201' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 130 0x82 '\202' */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 131 0x83 '\203' */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 132 0x84 '\204' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 133 0x85 '\205' */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 134 0x86 '\206' */ 0x18, /* 000 000 */ 0x24, /* 00 00 00 */ 0x18, /* 000 000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 135 0x87 '\207' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ /* 136 0x88 '\210' */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 137 0x89 '\211' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 138 0x8a '\212' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 139 0x8b '\213' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 140 0x8c '\214' */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 141 0x8d '\215' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 142 0x8e '\216' */ 0x84, /* 0000 00 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 143 0x8f '\217' */ 0x58, /* 0 0 000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 144 0x90 '\220' */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 145 0x91 '\221' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x5c, /* 0 0 00 */ 0x50, /* 0 0 0000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 146 0x92 '\222' */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x50, /* 0 0 0000 */ 0x50, /* 0 0 0000 */ 0x78, /* 0 000 */ 0x50, /* 0 0 0000 */ 0x50, /* 0 0 0000 */ 0x5c, /* 0 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 147 0x93 '\223' */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 148 0x94 '\224' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 149 0x95 '\225' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 150 0x96 '\226' */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 151 0x97 '\227' */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 152 0x98 '\230' */ 0x00, /* 00000000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x3c, /* 00 00 */ 0x04, /* 00000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ /* 153 0x99 '\231' */ 0x84, /* 0000 00 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 154 0x9a '\232' */ 0x88, /* 000 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 155 0x9b '\233' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x50, /* 0 0 0000 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 156 0x9c '\234' */ 0x30, /* 00 0000 */ 0x48, /* 0 00 000 */ 0x40, /* 0 000000 */ 0x70, /* 0 0000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x44, /* 0 000 00 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 157 0x9d '\235' */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 158 0x9e '\236' */ 0x00, /* 00000000 */ 0x70, /* 0 0000 */ 0x48, /* 0 00 000 */ 0x70, /* 0 0000 */ 0x48, /* 0 00 000 */ 0x5c, /* 0 0 00 */ 0x48, /* 0 00 000 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 159 0x9f '\237' */ 0x00, /* 00000000 */ 0x0c, /* 0000 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x60, /* 0 00000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 160 0xa0 '\240' */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 161 0xa1 '\241' */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 162 0xa2 '\242' */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 163 0xa3 '\243' */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x4c, /* 0 00 00 */ 0x34, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 164 0xa4 '\244' */ 0x34, /* 00 0 00 */ 0x58, /* 0 0 000 */ 0x00, /* 00000000 */ 0x58, /* 0 0 000 */ 0x64, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 165 0xa5 '\245' */ 0x58, /* 0 0 000 */ 0x44, /* 0 000 00 */ 0x64, /* 0 00 00 */ 0x54, /* 0 0 0 00 */ 0x4c, /* 0 00 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 166 0xa6 '\246' */ 0x00, /* 00000000 */ 0x1c, /* 000 00 */ 0x24, /* 00 00 00 */ 0x24, /* 00 00 00 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 167 0xa7 '\247' */ 0x00, /* 00000000 */ 0x18, /* 000 000 */ 0x24, /* 00 00 00 */ 0x24, /* 00 00 00 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 168 0xa8 '\250' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x40, /* 0 000000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 169 0xa9 '\251' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 170 0xaa '\252' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x04, /* 00000 00 */ 0x04, /* 00000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 171 0xab '\253' */ 0x20, /* 00 00000 */ 0x60, /* 0 00000 */ 0x24, /* 00 00 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x44, /* 0 000 00 */ 0x08, /* 0000 000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 172 0xac '\254' */ 0x20, /* 00 00000 */ 0x60, /* 0 00000 */ 0x24, /* 00 00 00 */ 0x28, /* 00 0 000 */ 0x10, /* 000 0000 */ 0x28, /* 00 0 000 */ 0x58, /* 0 0 000 */ 0x3c, /* 00 00 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 173 0xad '\255' */ 0x00, /* 00000000 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 174 0xae '\256' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x24, /* 00 00 00 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x24, /* 00 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 175 0xaf '\257' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x48, /* 0 00 000 */ 0x24, /* 00 00 00 */ 0x24, /* 00 00 00 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 176 0xb0 '\260' */ 0x11, /* 000 000 */ 0x44, /* 0 000 00 */ 0x11, /* 000 000 */ 0x44, /* 0 000 00 */ 0x11, /* 000 000 */ 0x44, /* 0 000 00 */ 0x11, /* 000 000 */ 0x44, /* 0 000 00 */ 0x11, /* 000 000 */ 0x44, /* 0 000 00 */ 0x11, /* 000 000 */ /* 177 0xb1 '\261' */ 0x55, /* 0 0 0 0 */ 0xaa, /* 0 0 0 0 */ 0x55, /* 0 0 0 0 */ 0xaa, /* 0 0 0 0 */ 0x55, /* 0 0 0 0 */ 0xaa, /* 0 0 0 0 */ 0x55, /* 0 0 0 0 */ 0xaa, /* 0 0 0 0 */ 0x55, /* 0 0 0 0 */ 0xaa, /* 0 0 0 0 */ 0x55, /* 0 0 0 0 */ /* 178 0xb2 '\262' */ 0xdd, /* 0 0 */ 0x77, /* 0 0 */ 0xdd, /* 0 0 */ 0x77, /* 0 0 */ 0xdd, /* 0 0 */ 0x77, /* 0 0 */ 0xdd, /* 0 0 */ 0x77, /* 0 0 */ 0xdd, /* 0 0 */ 0x77, /* 0 0 */ 0xdd, /* 0 0 */ /* 179 0xb3 '\263' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 180 0xb4 '\264' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 181 0xb5 '\265' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 182 0xb6 '\266' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xe8, /* 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 183 0xb7 '\267' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 184 0xb8 '\270' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 185 0xb9 '\271' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xe8, /* 0 000 */ 0x08, /* 0000 000 */ 0xe8, /* 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 186 0xba '\272' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 187 0xbb '\273' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf8, /* 000 */ 0x08, /* 0000 000 */ 0xe8, /* 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 188 0xbc '\274' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xe8, /* 0 000 */ 0x08, /* 0000 000 */ 0xf8, /* 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 189 0xbd '\275' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xf8, /* 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 190 0xbe '\276' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 191 0xbf '\277' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xf0, /* 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 192 0xc0 '\300' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 193 0xc1 '\301' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 194 0xc2 '\302' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 195 0xc3 '\303' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 196 0xc4 '\304' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 197 0xc5 '\305' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xfc, /* 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 198 0xc6 '\306' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 199 0xc7 '\307' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x2c, /* 00 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 200 0xc8 '\310' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x2c, /* 00 0 00 */ 0x20, /* 00 00000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 201 0xc9 '\311' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x20, /* 00 00000 */ 0x2c, /* 00 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 202 0xca '\312' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xec, /* 0 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 203 0xcb '\313' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xec, /* 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 204 0xcc '\314' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x2c, /* 00 0 00 */ 0x20, /* 00 00000 */ 0x2c, /* 00 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 205 0xcd '\315' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 206 0xce '\316' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xec, /* 0 00 */ 0x00, /* 00000000 */ 0xec, /* 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 207 0xcf '\317' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 208 0xd0 '\320' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 209 0xd1 '\321' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 210 0xd2 '\322' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 211 0xd3 '\323' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 212 0xd4 '\324' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 213 0xd5 '\325' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x1c, /* 000 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 214 0xd6 '\326' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 215 0xd7 '\327' */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0xfc, /* 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ /* 216 0xd8 '\330' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xfc, /* 00 */ 0x10, /* 000 0000 */ 0xfc, /* 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 217 0xd9 '\331' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0xf0, /* 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 218 0xda '\332' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x1f, /* 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 219 0xdb '\333' */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ /* 220 0xdc '\334' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ /* 221 0xdd '\335' */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ 0xe0, /* 00000 */ /* 222 0xde '\336' */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ 0x1c, /* 000 00 */ /* 223 0xdf '\337' */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 224 0xe0 '\340' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x24, /* 00 00 00 */ 0x58, /* 0 0 000 */ 0x50, /* 0 0 0000 */ 0x54, /* 0 0 0 00 */ 0x2c, /* 00 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 225 0xe1 '\341' */ 0x18, /* 000 000 */ 0x24, /* 00 00 00 */ 0x44, /* 0 000 00 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x58, /* 0 0 000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 226 0xe2 '\342' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 227 0xe3 '\343' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 228 0xe4 '\344' */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x24, /* 00 00 00 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x24, /* 00 00 00 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 229 0xe5 '\345' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x30, /* 00 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 230 0xe6 '\346' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x48, /* 0 00 000 */ 0x74, /* 0 0 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ /* 231 0xe7 '\347' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x6c, /* 0 0 00 */ 0x98, /* 00 000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 232 0xe8 '\350' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 233 0xe9 '\351' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x4c, /* 0 00 00 */ 0x54, /* 0 0 0 00 */ 0x64, /* 0 00 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 234 0xea '\352' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x28, /* 00 0 000 */ 0x6c, /* 0 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 235 0xeb '\353' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x0c, /* 0000 00 */ 0x14, /* 000 0 00 */ 0x24, /* 00 00 00 */ 0x24, /* 00 00 00 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 236 0xec '\354' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x54, /* 0 0 0 00 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 237 0xed '\355' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x04, /* 00000 00 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x38, /* 00 000 */ 0x40, /* 0 000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 238 0xee '\356' */ 0x00, /* 00000000 */ 0x3c, /* 00 00 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x78, /* 0 000 */ 0x40, /* 0 000000 */ 0x40, /* 0 000000 */ 0x3c, /* 00 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 239 0xef '\357' */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x44, /* 0 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 240 0xf0 '\360' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0xfc, /* 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 241 0xf1 '\361' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 242 0xf2 '\362' */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x04, /* 00000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x1c, /* 000 00 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 243 0xf3 '\363' */ 0x00, /* 00000000 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x20, /* 00 00000 */ 0x10, /* 000 0000 */ 0x08, /* 0000 000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 244 0xf4 '\364' */ 0x00, /* 00000000 */ 0x0c, /* 0000 00 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ /* 245 0xf5 '\365' */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x10, /* 000 0000 */ 0x60, /* 0 00000 */ 0x00, /* 00000000 */ /* 246 0xf6 '\366' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x7c, /* 0 00 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 247 0xf7 '\367' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x34, /* 00 0 00 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x34, /* 00 0 00 */ 0x48, /* 0 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 248 0xf8 '\370' */ 0x18, /* 000 000 */ 0x24, /* 00 00 00 */ 0x24, /* 00 00 00 */ 0x18, /* 000 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 249 0xf9 '\371' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x38, /* 00 000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 250 0xfa '\372' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x10, /* 000 0000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 251 0xfb '\373' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x0c, /* 0000 00 */ 0x08, /* 0000 000 */ 0x10, /* 000 0000 */ 0x50, /* 0 0 0000 */ 0x20, /* 00 00000 */ 0x20, /* 00 00000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 252 0xfc '\374' */ 0x00, /* 00000000 */ 0x50, /* 0 0 0000 */ 0x28, /* 00 0 000 */ 0x28, /* 00 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 253 0xfd '\375' */ 0x00, /* 00000000 */ 0x70, /* 0 0000 */ 0x08, /* 0000 000 */ 0x20, /* 00 00000 */ 0x78, /* 0 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 254 0xfe '\376' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x38, /* 00 000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ /* 255 0xff '\377' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ } }; const struct font_desc font_vga_6x11 = { .idx = VGA6x11_IDX, .name = "ProFont6x11", .width = 6, .height = 11, .charcount = 256, .data = fontdata_6x11.data, /* Try avoiding this font if possible unless on MAC */ .pref = -2000, };
linux-master
lib/fonts/font_6x11.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2019, Intel Corporation. */ #include <asm/unaligned.h> #include <linux/crc32.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pldmfw.h> #include <linux/slab.h> #include <linux/uuid.h> #include "pldmfw_private.h" /* Internal structure used to store details about the PLDM image file as it is * being validated and processed. */ struct pldmfw_priv { struct pldmfw *context; const struct firmware *fw; /* current offset of firmware image */ size_t offset; struct list_head records; struct list_head components; /* PLDM Firmware Package Header */ const struct __pldm_header *header; u16 total_header_size; /* length of the component bitmap */ u16 component_bitmap_len; u16 bitmap_size; /* Start of the component image information */ u16 component_count; const u8 *component_start; /* Start pf the firmware device id records */ const u8 *record_start; u8 record_count; /* The CRC at the end of the package header */ u32 header_crc; struct pldmfw_record *matching_record; }; /** * pldm_check_fw_space - Verify that the firmware image has space left * @data: pointer to private data * @offset: offset to start from * @length: length to check for * * Verify that the firmware data can hold a chunk of bytes with the specified * offset and length. * * Returns: zero on success, or -EFAULT if the image does not have enough * space left to fit the expected length. */ static int pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length) { size_t expected_size = offset + length; struct device *dev = data->context->dev; if (data->fw->size < expected_size) { dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n", data->fw->size, expected_size); return -EFAULT; } return 0; } /** * pldm_move_fw_offset - Move the current firmware offset forward * @data: pointer to private data * @bytes_to_move: number of bytes to move the offset forward by * * Check that there is enough space past the current offset, and then move the * offset forward by this amount. * * Returns: zero on success, or -EFAULT if the image is too small to fit the * expected length. */ static int pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move) { int err; err = pldm_check_fw_space(data, data->offset, bytes_to_move); if (err) return err; data->offset += bytes_to_move; return 0; } /** * pldm_parse_header - Validate and extract details about the PLDM header * @data: pointer to private data * * Performs initial basic verification of the PLDM image, up to the first * firmware record. * * This includes the following checks and extractions * * * Verify that the UUID at the start of the header matches the expected * value as defined in the DSP0267 PLDM specification * * Check that the revision is 0x01 * * Extract the total header_size and verify that the image is large enough * to contain at least the length of this header * * Extract the size of the component bitmap length * * Extract a pointer to the start of the record area * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_header(struct pldmfw_priv *data) { const struct __pldmfw_record_area *record_area; struct device *dev = data->context->dev; const struct __pldm_header *header; size_t header_size; int err; err = pldm_move_fw_offset(data, sizeof(*header)); if (err) return err; header = (const struct __pldm_header *)data->fw->data; data->header = header; if (!uuid_equal(&header->id, &pldm_firmware_header_id)) { dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n", &pldm_firmware_header_id, &header->id); return -EINVAL; } if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) { dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n", PACKAGE_HEADER_FORMAT_REVISION, header->revision); return -EOPNOTSUPP; } data->total_header_size = get_unaligned_le16(&header->size); header_size = data->total_header_size - sizeof(*header); err = pldm_check_fw_space(data, data->offset, header_size); if (err) return err; data->component_bitmap_len = get_unaligned_le16(&header->component_bitmap_len); if (data->component_bitmap_len % 8 != 0) { dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n", data->component_bitmap_len); return -EINVAL; } data->bitmap_size = data->component_bitmap_len / 8; err = pldm_move_fw_offset(data, header->version_len); if (err) return err; /* extract a pointer to the record area, which just follows the main * PLDM header data. */ record_area = (const struct __pldmfw_record_area *)(data->fw->data + data->offset); err = pldm_move_fw_offset(data, sizeof(*record_area)); if (err) return err; data->record_count = record_area->record_count; data->record_start = record_area->records; return 0; } /** * pldm_check_desc_tlv_len - Check that the length matches expectation * @data: pointer to image details * @type: the descriptor type * @size: the length from the descriptor header * * If the descriptor type is one of the documented descriptor types according * to the standard, verify that the provided length matches. * * If the type is not recognized or is VENDOR_DEFINED, return zero. * * Returns: zero on success, or -EINVAL if the specified size of a standard * TLV does not match the expected value defined for that TLV. */ static int pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size) { struct device *dev = data->context->dev; u16 expected_size; switch (type) { case PLDM_DESC_ID_PCI_VENDOR_ID: case PLDM_DESC_ID_PCI_DEVICE_ID: case PLDM_DESC_ID_PCI_SUBVENDOR_ID: case PLDM_DESC_ID_PCI_SUBDEV_ID: expected_size = 2; break; case PLDM_DESC_ID_PCI_REVISION_ID: expected_size = 1; break; case PLDM_DESC_ID_PNP_VENDOR_ID: expected_size = 3; break; case PLDM_DESC_ID_IANA_ENTERPRISE_ID: case PLDM_DESC_ID_ACPI_VENDOR_ID: case PLDM_DESC_ID_PNP_PRODUCT_ID: case PLDM_DESC_ID_ACPI_PRODUCT_ID: expected_size = 4; break; case PLDM_DESC_ID_UUID: expected_size = 16; break; case PLDM_DESC_ID_VENDOR_DEFINED: return 0; default: /* Do not report an error on an unexpected TLV */ dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type); return 0; } if (size != expected_size) { dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n", type, size, expected_size); return -EINVAL; } return 0; } /** * pldm_parse_desc_tlvs - Check and skip past a number of TLVs * @data: pointer to private data * @record: pointer to the record this TLV belongs too * @desc_count: descriptor count * * From the current offset, read and extract the descriptor TLVs, updating the * current offset each time. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count) { const struct __pldmfw_desc_tlv *__desc; const u8 *desc_start; u8 i; desc_start = data->fw->data + data->offset; pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) { struct pldmfw_desc_tlv *desc; int err; u16 type, size; err = pldm_move_fw_offset(data, sizeof(*__desc)); if (err) return err; type = get_unaligned_le16(&__desc->type); /* According to DSP0267, this only includes the data field */ size = get_unaligned_le16(&__desc->size); err = pldm_check_desc_tlv_len(data, type, size); if (err) return err; /* check that we have space and move the offset forward */ err = pldm_move_fw_offset(data, size); if (err) return err; desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->type = type; desc->size = size; desc->data = __desc->data; list_add_tail(&desc->entry, &record->descs); } return 0; } /** * pldm_parse_one_record - Verify size of one PLDM record * @data: pointer to image details * @__record: pointer to the record to check * * This function checks that the record size does not exceed either the size * of the firmware file or the total length specified in the header section. * * It also verifies that the recorded length of the start of the record * matches the size calculated by adding the static structure length, the * component bitmap length, the version string length, the length of all * descriptor TLVs, and the length of the package data. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_one_record(struct pldmfw_priv *data, const struct __pldmfw_record_info *__record) { struct pldmfw_record *record; size_t measured_length; int err; const u8 *bitmap_ptr; u16 record_len; int i; /* Make a copy and insert it into the record list */ record = kzalloc(sizeof(*record), GFP_KERNEL); if (!record) return -ENOMEM; INIT_LIST_HEAD(&record->descs); list_add_tail(&record->entry, &data->records); /* Then check that we have space and move the offset */ err = pldm_move_fw_offset(data, sizeof(*__record)); if (err) return err; record_len = get_unaligned_le16(&__record->record_len); record->package_data_len = get_unaligned_le16(&__record->package_data_len); record->version_len = __record->version_len; record->version_type = __record->version_type; bitmap_ptr = data->fw->data + data->offset; /* check that we have space for the component bitmap length */ err = pldm_move_fw_offset(data, data->bitmap_size); if (err) return err; record->component_bitmap_len = data->component_bitmap_len; record->component_bitmap = bitmap_zalloc(record->component_bitmap_len, GFP_KERNEL); if (!record->component_bitmap) return -ENOMEM; for (i = 0; i < data->bitmap_size; i++) bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8); record->version_string = data->fw->data + data->offset; err = pldm_move_fw_offset(data, record->version_len); if (err) return err; /* Scan through the descriptor TLVs and find the end */ err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count); if (err) return err; record->package_data = data->fw->data + data->offset; err = pldm_move_fw_offset(data, record->package_data_len); if (err) return err; measured_length = data->offset - ((const u8 *)__record - data->fw->data); if (measured_length != record_len) { dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n", measured_length, record_len); return -EFAULT; } return 0; } /** * pldm_parse_records - Locate the start of the component area * @data: pointer to private data * * Extract the record count, and loop through each record, searching for the * component area. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_records(struct pldmfw_priv *data) { const struct __pldmfw_component_area *component_area; const struct __pldmfw_record_info *record; int err; u8 i; pldm_for_each_record(i, record, data->record_start, data->record_count) { err = pldm_parse_one_record(data, record); if (err) return err; } /* Extract a pointer to the component area, which just follows the * PLDM device record data. */ component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset); err = pldm_move_fw_offset(data, sizeof(*component_area)); if (err) return err; data->component_count = get_unaligned_le16(&component_area->component_image_count); data->component_start = component_area->components; return 0; } /** * pldm_parse_components - Locate the CRC header checksum * @data: pointer to private data * * Extract the component count, and find the pointer to the component area. * Scan through each component searching for the end, which should point to * the package header checksum. * * Extract the package header CRC and save it for verification. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_components(struct pldmfw_priv *data) { const struct __pldmfw_component_info *__component; struct device *dev = data->context->dev; const u8 *header_crc_ptr; int err; u8 i; pldm_for_each_component(i, __component, data->component_start, data->component_count) { struct pldmfw_component *component; u32 offset, size; err = pldm_move_fw_offset(data, sizeof(*__component)); if (err) return err; err = pldm_move_fw_offset(data, __component->version_len); if (err) return err; offset = get_unaligned_le32(&__component->location_offset); size = get_unaligned_le32(&__component->size); err = pldm_check_fw_space(data, offset, size); if (err) return err; component = kzalloc(sizeof(*component), GFP_KERNEL); if (!component) return -ENOMEM; component->index = i; component->classification = get_unaligned_le16(&__component->classification); component->identifier = get_unaligned_le16(&__component->identifier); component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp); component->options = get_unaligned_le16(&__component->options); component->activation_method = get_unaligned_le16(&__component->activation_method); component->version_type = __component->version_type; component->version_len = __component->version_len; component->version_string = __component->version_string; component->component_data = data->fw->data + offset; component->component_size = size; list_add_tail(&component->entry, &data->components); } header_crc_ptr = data->fw->data + data->offset; err = pldm_move_fw_offset(data, sizeof(data->header_crc)); if (err) return err; /* Make sure that we reached the expected offset */ if (data->offset != data->total_header_size) { dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n", data->total_header_size, data->offset); return -EFAULT; } data->header_crc = get_unaligned_le32(header_crc_ptr); return 0; } /** * pldm_verify_header_crc - Verify that the CRC in the header matches * @data: pointer to private data * * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and * compares it to the value stored in the header. * * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC. */ static int pldm_verify_header_crc(struct pldmfw_priv *data) { struct device *dev = data->context->dev; u32 calculated_crc; size_t length; /* Calculate the 32-bit CRC of the header header contents up to but * not including the checksum. Note that the Linux crc32_le function * does not perform an expected final XOR. */ length = data->offset - sizeof(data->header_crc); calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0; if (calculated_crc != data->header_crc) { dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n", calculated_crc, data->header_crc); return -EBADMSG; } return 0; } /** * pldmfw_free_priv - Free memory allocated while parsing the PLDM image * @data: pointer to the PLDM data structure * * Loops through and clears all allocated memory associated with each * allocated descriptor, record, and component. */ static void pldmfw_free_priv(struct pldmfw_priv *data) { struct pldmfw_component *component, *c_safe; struct pldmfw_record *record, *r_safe; struct pldmfw_desc_tlv *desc, *d_safe; list_for_each_entry_safe(component, c_safe, &data->components, entry) { list_del(&component->entry); kfree(component); } list_for_each_entry_safe(record, r_safe, &data->records, entry) { list_for_each_entry_safe(desc, d_safe, &record->descs, entry) { list_del(&desc->entry); kfree(desc); } if (record->component_bitmap) { bitmap_free(record->component_bitmap); record->component_bitmap = NULL; } list_del(&record->entry); kfree(record); } } /** * pldm_parse_image - parse and extract details from PLDM image * @data: pointer to private data * * Verify that the firmware file contains valid data for a PLDM firmware * file. Extract useful pointers and data from the firmware file and store * them in the data structure. * * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care * should be taken to use get_unaligned_le* when accessing data from the * pointers in data. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_parse_image(struct pldmfw_priv *data) { int err; if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size))) return -EINVAL; err = pldm_parse_header(data); if (err) return err; err = pldm_parse_records(data); if (err) return err; err = pldm_parse_components(data); if (err) return err; return pldm_verify_header_crc(data); } /* these are u32 so that we can store PCI_ANY_ID */ struct pldm_pci_record_id { int vendor; int device; int subsystem_vendor; int subsystem_device; }; /** * pldmfw_op_pci_match_record - Check if a PCI device matches the record * @context: PLDM fw update structure * @record: list of records extracted from the PLDM image * * Determine of the PCI device associated with this device matches the record * data provided. * * Searches the descriptor TLVs and extracts the relevant descriptor data into * a pldm_pci_record_id. This is then compared against the PCI device ID * information. * * Returns: true if the device matches the record, false otherwise. */ bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) { struct pci_dev *pdev = to_pci_dev(context->dev); struct pldm_pci_record_id id = { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subsystem_vendor = PCI_ANY_ID, .subsystem_device = PCI_ANY_ID, }; struct pldmfw_desc_tlv *desc; list_for_each_entry(desc, &record->descs, entry) { u16 value; int *ptr; switch (desc->type) { case PLDM_DESC_ID_PCI_VENDOR_ID: ptr = &id.vendor; break; case PLDM_DESC_ID_PCI_DEVICE_ID: ptr = &id.device; break; case PLDM_DESC_ID_PCI_SUBVENDOR_ID: ptr = &id.subsystem_vendor; break; case PLDM_DESC_ID_PCI_SUBDEV_ID: ptr = &id.subsystem_device; break; default: /* Skip unrelated TLVs */ continue; } value = get_unaligned_le16(desc->data); /* A value of zero for one of the descriptors is sometimes * used when the record should ignore this field when matching * device. For example if the record applies to any subsystem * device or vendor. */ if (value) *ptr = (int)value; else *ptr = PCI_ANY_ID; } if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && (id.device == PCI_ANY_ID || id.device == pdev->device) && (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) && (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device)) return true; else return false; } EXPORT_SYMBOL(pldmfw_op_pci_match_record); /** * pldm_find_matching_record - Find the first matching PLDM record * @data: pointer to private data * * Search through PLDM records and find the first matching entry. It is * expected that only one entry matches. * * Store a pointer to the matching record, if found. * * Returns: zero on success, or -ENOENT if no matching record is found. */ static int pldm_find_matching_record(struct pldmfw_priv *data) { struct pldmfw_record *record; list_for_each_entry(record, &data->records, entry) { if (data->context->ops->match_record(data->context, record)) { data->matching_record = record; return 0; } } return -ENOENT; } /** * pldm_send_package_data - Send firmware the package data for the record * @data: pointer to private data * * Send the package data associated with the matching record to the firmware, * using the send_pkg_data operation. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_send_package_data(struct pldmfw_priv *data) { struct pldmfw_record *record = data->matching_record; const struct pldmfw_ops *ops = data->context->ops; return ops->send_package_data(data->context, record->package_data, record->package_data_len); } /** * pldm_send_component_tables - Send component table information to firmware * @data: pointer to private data * * Loop over each component, sending the applicable components to the firmware * via the send_component_table operation. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_send_component_tables(struct pldmfw_priv *data) { unsigned long *bitmap = data->matching_record->component_bitmap; struct pldmfw_component *component; int err; list_for_each_entry(component, &data->components, entry) { u8 index = component->index, transfer_flag = 0; /* Skip components which are not intended for this device */ if (!test_bit(index, bitmap)) continue; /* determine whether this is the start, middle, end, or both * the start and end of the component tables */ if (index == find_first_bit(bitmap, data->component_bitmap_len)) transfer_flag |= PLDM_TRANSFER_FLAG_START; if (index == find_last_bit(bitmap, data->component_bitmap_len)) transfer_flag |= PLDM_TRANSFER_FLAG_END; if (!transfer_flag) transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE; err = data->context->ops->send_component_table(data->context, component, transfer_flag); if (err) return err; } return 0; } /** * pldm_flash_components - Program each component to device flash * @data: pointer to private data * * Loop through each component that is active for the matching device record, * and send it to the device driver for flashing. * * Returns: zero on success, or a negative error code on failure. */ static int pldm_flash_components(struct pldmfw_priv *data) { unsigned long *bitmap = data->matching_record->component_bitmap; struct pldmfw_component *component; int err; list_for_each_entry(component, &data->components, entry) { u8 index = component->index; /* Skip components which are not intended for this device */ if (!test_bit(index, bitmap)) continue; err = data->context->ops->flash_component(data->context, component); if (err) return err; } return 0; } /** * pldm_finalize_update - Finalize the device flash update * @data: pointer to private data * * Tell the device driver to perform any remaining logic to complete the * device update. * * Returns: zero on success, or a PLFM_FWU error indicating the reason for * failure. */ static int pldm_finalize_update(struct pldmfw_priv *data) { if (data->context->ops->finalize_update) return data->context->ops->finalize_update(data->context); return 0; } /** * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device * @context: ops and data for firmware update * @fw: firmware object pointing to the relevant firmware file to program * * Parse the data for a given firmware file, verifying that it is a valid PLDM * formatted image that matches this device. * * Extract the device record Package Data and Component Tables and send them * to the device firmware. Extract and write the flash data for each of the * components indicated in the firmware file. * * Returns: zero on success, or a negative error code on failure. */ int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw) { struct pldmfw_priv *data; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; INIT_LIST_HEAD(&data->records); INIT_LIST_HEAD(&data->components); data->fw = fw; data->context = context; err = pldm_parse_image(data); if (err) goto out_release_data; err = pldm_find_matching_record(data); if (err) goto out_release_data; err = pldm_send_package_data(data); if (err) goto out_release_data; err = pldm_send_component_tables(data); if (err) goto out_release_data; err = pldm_flash_components(data); if (err) goto out_release_data; err = pldm_finalize_update(data); out_release_data: pldmfw_free_priv(data); kfree(data); return err; } EXPORT_SYMBOL(pldmfw_flash_image); MODULE_AUTHOR("Jacob Keller <[email protected]>"); MODULE_DESCRIPTION("PLDM firmware flash update library");
linux-master
lib/pldmfw/pldmfw.c
// SPDX-License-Identifier: GPL-2.0-only /* * LZO1X Decompressor from LZO * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <[email protected]> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * * Changed for Linux kernel use by: * Nitin Gupta <[email protected]> * Richard Purdie <[email protected]> */ #ifndef STATIC #include <linux/module.h> #include <linux/kernel.h> #endif #include <asm/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" #define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) #define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) #define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun /* This MAX_255_COUNT is the maximum number of times we can add 255 to a base * count without overflowing an integer. The multiply will overflow when * multiplying 255 by more than MAXINT/255. The sum will overflow earlier * depending on the base count. Since the base count is taken from a u8 * and a few bits, it is safe to assume that it will always be lower than * or equal to 2*255, thus we can always prevent any overflow by accepting * two less 255 steps. See Documentation/staging/lzo.rst for more information. */ #define MAX_255_COUNT ((((size_t)~0) / 255) - 2) int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) { unsigned char *op; const unsigned char *ip; size_t t, next; size_t state = 0; const unsigned char *m_pos; const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; unsigned char bitstream_version; op = out; ip = in; if (unlikely(in_len < 3)) goto input_overrun; if (likely(in_len >= 5) && likely(*ip == 17)) { bitstream_version = ip[1]; ip += 2; } else { bitstream_version = 0; } if (*ip > 17) { t = *ip++ - 17; if (t < 4) { next = t; goto match_next; } goto copy_literal_run; } for (;;) { t = *ip++; if (t < 16) { if (likely(state == 0)) { if (unlikely(t == 0)) { size_t offset; const unsigned char *ip_last = ip; while (unlikely(*ip == 0)) { ip++; NEED_IP(1); } offset = ip - ip_last; if (unlikely(offset > MAX_255_COUNT)) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 15 + *ip++; } t += 3; copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { COPY8(op, ip); op += 8; ip += 8; COPY8(op, ip); op += 8; ip += 8; } while (ip < ie); ip = ie; op = oe; } else #endif { NEED_OP(t); NEED_IP(t + 3); do { *op++ = *ip++; } while (--t > 0); } state = 4; continue; } else if (state != 4) { next = t & 3; m_pos = op - 1; m_pos -= t >> 2; m_pos -= *ip++ << 2; TEST_LB(m_pos); NEED_OP(2); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; goto match_next; } else { next = t & 3; m_pos = op - (1 + M2_MAX_OFFSET); m_pos -= t >> 2; m_pos -= *ip++ << 2; t = 3; } } else if (t >= 64) { next = t & 3; m_pos = op - 1; m_pos -= (t >> 2) & 7; m_pos -= *ip++ << 3; t = (t >> 5) - 1 + (3 - 1); } else if (t >= 32) { t = (t & 31) + (3 - 1); if (unlikely(t == 2)) { size_t offset; const unsigned char *ip_last = ip; while (unlikely(*ip == 0)) { ip++; NEED_IP(1); } offset = ip - ip_last; if (unlikely(offset > MAX_255_COUNT)) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 31 + *ip++; NEED_IP(2); } m_pos = op - 1; next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; } else { NEED_IP(2); next = get_unaligned_le16(ip); if (((next & 0xfffc) == 0xfffc) && ((t & 0xf8) == 0x18) && likely(bitstream_version)) { NEED_IP(3); t &= 7; t |= ip[2] << 3; t += MIN_ZERO_RUN_LENGTH; NEED_OP(t); memset(op, 0, t); op += t; next &= 3; ip += 3; goto match_next; } else { m_pos = op; m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); if (unlikely(t == 2)) { size_t offset; const unsigned char *ip_last = ip; while (unlikely(*ip == 0)) { ip++; NEED_IP(1); } offset = ip - ip_last; if (unlikely(offset > MAX_255_COUNT)) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 7 + *ip++; NEED_IP(2); next = get_unaligned_le16(ip); } ip += 2; m_pos -= next >> 2; next &= 3; if (m_pos == op) goto eof_found; m_pos -= 0x4000; } } TEST_LB(m_pos); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (op - m_pos >= 8) { unsigned char *oe = op + t; if (likely(HAVE_OP(t + 15))) { do { COPY8(op, m_pos); op += 8; m_pos += 8; COPY8(op, m_pos); op += 8; m_pos += 8; } while (op < oe); op = oe; if (HAVE_IP(6)) { state = next; COPY4(op, ip); op += next; ip += next; continue; } } else { NEED_OP(t); do { *op++ = *m_pos++; } while (op < oe); } } else #endif { unsigned char *oe = op + t; NEED_OP(t); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; m_pos += 2; do { *op++ = *m_pos++; } while (op < oe); } match_next: state = next; t = next; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (likely(HAVE_IP(6) && HAVE_OP(4))) { COPY4(op, ip); op += t; ip += t; } else #endif { NEED_IP(t + 3); NEED_OP(t); while (t > 0) { *op++ = *ip++; t--; } } } eof_found: *out_len = op - out; return (t != 3 ? LZO_E_ERROR : ip == ip_end ? LZO_E_OK : ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); input_overrun: *out_len = op - out; return LZO_E_INPUT_OVERRUN; output_overrun: *out_len = op - out; return LZO_E_OUTPUT_OVERRUN; lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; } #ifndef STATIC EXPORT_SYMBOL_GPL(lzo1x_decompress_safe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X Decompressor"); #endif
linux-master
lib/lzo/lzo1x_decompress_safe.c
// SPDX-License-Identifier: GPL-2.0-only /* * LZO1X Compressor from LZO * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <[email protected]> * * The full LZO package can be found at: * http://www.oberhumer.com/opensource/lzo/ * * Changed for Linux kernel use by: * Nitin Gupta <[email protected]> * Richard Purdie <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <asm/unaligned.h> #include <linux/lzo.h> #include "lzodefs.h" static noinline size_t lzo1x_1_do_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, size_t ti, void *wrkmem, signed char *state_offset, const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; const unsigned char * const in_end = in + in_len; const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; op = out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; for (;;) { const unsigned char *m_pos = NULL; size_t t, m_len, m_off; u32 dv; u32 run_length = 0; literal: ip += 1 + ((ip - ii) >> 5); next: if (unlikely(ip >= ip_end)) break; dv = get_unaligned_le32(ip); if (dv == 0 && bitstream_version) { const unsigned char *ir = ip + 4; const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ defined(LZO_FAST_64BIT_MEMORY_ACCESS) u64 dv64; for (; (ir + 32) <= limit; ir += 32) { dv64 = get_unaligned((u64 *)ir); dv64 |= get_unaligned((u64 *)ir + 1); dv64 |= get_unaligned((u64 *)ir + 2); dv64 |= get_unaligned((u64 *)ir + 3); if (dv64) break; } for (; (ir + 8) <= limit; ir += 8) { dv64 = get_unaligned((u64 *)ir); if (dv64) { # if defined(__LITTLE_ENDIAN) ir += __builtin_ctzll(dv64) >> 3; # elif defined(__BIG_ENDIAN) ir += __builtin_clzll(dv64) >> 3; # else # error "missing endian definition" # endif break; } } #else while ((ir < (const unsigned char *) ALIGN((uintptr_t)ir, 4)) && (ir < limit) && (*ir == 0)) ir++; if (IS_ALIGNED((uintptr_t)ir, 4)) { for (; (ir + 4) <= limit; ir += 4) { dv = *((u32 *)ir); if (dv) { # if defined(__LITTLE_ENDIAN) ir += __builtin_ctz(dv) >> 3; # elif defined(__BIG_ENDIAN) ir += __builtin_clz(dv) >> 3; # else # error "missing endian definition" # endif break; } } } #endif while (likely(ir < limit) && unlikely(*ir == 0)) ir++; run_length = ir - ip; if (run_length > MAX_ZERO_RUN_LENGTH) run_length = MAX_ZERO_RUN_LENGTH; } else { t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK; m_pos = in + dict[t]; dict[t] = (lzo_dict_t) (ip - in); if (unlikely(dv != get_unaligned_le32(m_pos))) goto literal; } ii -= ti; ti = 0; t = ip - ii; if (t != 0) { if (t <= 3) { op[*state_offset] |= t; COPY4(op, ii); op += t; } else if (t <= 16) { *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { *op++ = (t - 3); } else { size_t tt = t - 18; *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; *op++ = 0; } *op++ = tt; } do { COPY8(op, ii); COPY8(op + 8, ii + 8); op += 16; ii += 16; t -= 16; } while (t >= 16); if (t > 0) do { *op++ = *ii++; } while (--t > 0); } } if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; run_length = 0; *state_offset = -3; goto finished_writing_instruction; } m_len = 4; { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64) u64 v; v = get_unaligned((const u64 *) (ip + m_len)) ^ get_unaligned((const u64 *) (m_pos + m_len)); if (unlikely(v == 0)) { do { m_len += 8; v = get_unaligned((const u64 *) (ip + m_len)) ^ get_unaligned((const u64 *) (m_pos + m_len)); if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (v == 0); } # if defined(__LITTLE_ENDIAN) m_len += (unsigned) __builtin_ctzll(v) / 8; # elif defined(__BIG_ENDIAN) m_len += (unsigned) __builtin_clzll(v) / 8; # else # error "missing endian definition" # endif #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32) u32 v; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (unlikely(v == 0)) { do { m_len += 4; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (v != 0) break; m_len += 4; v = get_unaligned((const u32 *) (ip + m_len)) ^ get_unaligned((const u32 *) (m_pos + m_len)); if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (v == 0); } # if defined(__LITTLE_ENDIAN) m_len += (unsigned) __builtin_ctz(v) / 8; # elif defined(__BIG_ENDIAN) m_len += (unsigned) __builtin_clz(v) / 8; # else # error "missing endian definition" # endif #else if (unlikely(ip[m_len] == m_pos[m_len])) { do { m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (ip[m_len] != m_pos[m_len]) break; m_len += 1; if (unlikely(ip + m_len >= ip_end)) goto m_len_done; } while (ip[m_len] == m_pos[m_len]); } #endif } m_len_done: m_off = ip - m_pos; ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { m_len -= M3_MAX_LEN; *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; *op++ = 0; } *op++ = (m_len); } *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); else { if (unlikely(((m_off & 0x403f) == 0x403f) && (m_len >= 261) && (m_len <= 264)) && likely(bitstream_version)) { // Under lzo-rle, block copies // for 261 <= length <= 264 and // (distance & 0x80f3) == 0x80f3 // can result in ambiguous // output. Adjust length // to 260 to prevent ambiguity. ip -= m_len - 260; m_len = 260; } m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { m_len -= 255; *op++ = 0; } *op++ = (m_len); } *op++ = (m_off << 2); *op++ = (m_off >> 6); } *state_offset = -2; finished_writing_instruction: ii = ip; goto next; } *out_len = op - out; return in_end - (ii - ti); } static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, void *wrkmem, const unsigned char bitstream_version) { const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; size_t l = in_len; size_t t = 0; signed char state_offset = -2; unsigned int m4_max_offset; // LZO v0 will never write 17 as first byte (except for zero-length // input), so this is used to version the bitstream if (bitstream_version > 0) { *op++ = 17; *op++ = bitstream_version; m4_max_offset = M4_MAX_OFFSET_V1; } else { m4_max_offset = M4_MAX_OFFSET_V0; } data_start = op; while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, &state_offset, bitstream_version); ip += ll; op += *out_len; l -= ll; } t += l; if (t > 0) { const unsigned char *ii = in + in_len - t; if (op == data_start && t <= 238) { *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { *op++ = (t - 3); } else { size_t tt = t - 18; *op++ = 0; while (tt > 255) { tt -= 255; *op++ = 0; } *op++ = tt; } if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); op += 16; ii += 16; t -= 16; } while (t >= 16); if (t > 0) do { *op++ = *ii++; } while (--t > 0); } *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; } int lzo1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, void *wrkmem) { return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); } int lzorle1x_1_compress(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len, void *wrkmem) { return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, LZO_VERSION); } EXPORT_SYMBOL_GPL(lzo1x_1_compress); EXPORT_SYMBOL_GPL(lzorle1x_1_compress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor");
linux-master
lib/lzo/lzo1x_compress.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner ([email protected]) * * Generic data width independent code which is included by the wrappers. */ { struct rs_codec *rs = rsc->codec; int i, j, pad; int nn = rs->nn; int nroots = rs->nroots; uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; uint16_t *genpoly = rs->genpoly; uint16_t fb; uint16_t msk = (uint16_t) rs->nn; /* Check length parameter for validity */ pad = nn - nroots - len; if (pad < 0 || pad >= nn) return -ERANGE; for (i = 0; i < len; i++) { fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; /* feedback term is non-zero */ if (fb != nn) { for (j = 1; j < nroots; j++) { par[j] ^= alpha_to[rs_modnn(rs, fb + genpoly[nroots - j])]; } } /* Shift */ memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); if (fb != nn) { par[nroots - 1] = alpha_to[rs_modnn(rs, fb + genpoly[0])]; } else { par[nroots - 1] = 0; } } return 0; }
linux-master
lib/reed_solomon/encode_rs.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner ([email protected]) * * Generic data width independent code which is included by the wrappers. */ { struct rs_codec *rs = rsc->codec; int deg_lambda, el, deg_omega; int i, j, r, k, pad; int nn = rs->nn; int nroots = rs->nroots; int fcr = rs->fcr; int prim = rs->prim; int iprim = rs->iprim; uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; int count = 0; int num_corrected; uint16_t msk = (uint16_t) rs->nn; /* * The decoder buffers are in the rs control struct. They are * arrays sized [nroots + 1] */ uint16_t *lambda = rsc->buffers + RS_DECODE_LAMBDA * (nroots + 1); uint16_t *syn = rsc->buffers + RS_DECODE_SYN * (nroots + 1); uint16_t *b = rsc->buffers + RS_DECODE_B * (nroots + 1); uint16_t *t = rsc->buffers + RS_DECODE_T * (nroots + 1); uint16_t *omega = rsc->buffers + RS_DECODE_OMEGA * (nroots + 1); uint16_t *root = rsc->buffers + RS_DECODE_ROOT * (nroots + 1); uint16_t *reg = rsc->buffers + RS_DECODE_REG * (nroots + 1); uint16_t *loc = rsc->buffers + RS_DECODE_LOC * (nroots + 1); /* Check length parameter for validity */ pad = nn - nroots - len; BUG_ON(pad < 0 || pad >= nn - nroots); /* Does the caller provide the syndrome ? */ if (s != NULL) { for (i = 0; i < nroots; i++) { /* The syndrome is in index form, * so nn represents zero */ if (s[i] != nn) goto decode; } /* syndrome is zero, no errors to correct */ return 0; } /* form the syndromes; i.e., evaluate data(x) at roots of * g(x) */ for (i = 0; i < nroots; i++) syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk; for (j = 1; j < len; j++) { for (i = 0; i < nroots; i++) { if (syn[i] == 0) { syn[i] = (((uint16_t) data[j]) ^ invmsk) & msk; } else { syn[i] = ((((uint16_t) data[j]) ^ invmsk) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr + i) * prim)]; } } } for (j = 0; j < nroots; j++) { for (i = 0; i < nroots; i++) { if (syn[i] == 0) { syn[i] = ((uint16_t) par[j]) & msk; } else { syn[i] = (((uint16_t) par[j]) & msk) ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr+i)*prim)]; } } } s = syn; /* Convert syndromes to index form, checking for nonzero condition */ syn_error = 0; for (i = 0; i < nroots; i++) { syn_error |= s[i]; s[i] = index_of[s[i]]; } if (!syn_error) { /* if syndrome is zero, data[] is a codeword and there are no * errors to correct. So return data[] unmodified */ return 0; } decode: memset(&lambda[1], 0, nroots * sizeof(lambda[0])); lambda[0] = 1; if (no_eras > 0) { /* Init lambda to be the erasure locator polynomial */ lambda[1] = alpha_to[rs_modnn(rs, prim * (nn - 1 - (eras_pos[0] + pad)))]; for (i = 1; i < no_eras; i++) { u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad))); for (j = i + 1; j > 0; j--) { tmp = index_of[lambda[j - 1]]; if (tmp != nn) { lambda[j] ^= alpha_to[rs_modnn(rs, u + tmp)]; } } } } for (i = 0; i < nroots + 1; i++) b[i] = index_of[lambda[i]]; /* * Begin Berlekamp-Massey algorithm to determine error+erasure * locator polynomial */ r = no_eras; el = no_eras; while (++r <= nroots) { /* r is the step number */ /* Compute discrepancy at the r-th step in poly-form */ discr_r = 0; for (i = 0; i < r; i++) { if ((lambda[i] != 0) && (s[r - i - 1] != nn)) { discr_r ^= alpha_to[rs_modnn(rs, index_of[lambda[i]] + s[r - i - 1])]; } } discr_r = index_of[discr_r]; /* Index form */ if (discr_r == nn) { /* 2 lines below: B(x) <-- x*B(x) */ memmove (&b[1], b, nroots * sizeof (b[0])); b[0] = nn; } else { /* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */ t[0] = lambda[0]; for (i = 0; i < nroots; i++) { if (b[i] != nn) { t[i + 1] = lambda[i + 1] ^ alpha_to[rs_modnn(rs, discr_r + b[i])]; } else t[i + 1] = lambda[i + 1]; } if (2 * el <= r + no_eras - 1) { el = r + no_eras - el; /* * 2 lines below: B(x) <-- inv(discr_r) * * lambda(x) */ for (i = 0; i <= nroots; i++) { b[i] = (lambda[i] == 0) ? nn : rs_modnn(rs, index_of[lambda[i]] - discr_r + nn); } } else { /* 2 lines below: B(x) <-- x*B(x) */ memmove(&b[1], b, nroots * sizeof(b[0])); b[0] = nn; } memcpy(lambda, t, (nroots + 1) * sizeof(t[0])); } } /* Convert lambda to index form and compute deg(lambda(x)) */ deg_lambda = 0; for (i = 0; i < nroots + 1; i++) { lambda[i] = index_of[lambda[i]]; if (lambda[i] != nn) deg_lambda = i; } if (deg_lambda == 0) { /* * deg(lambda) is zero even though the syndrome is non-zero * => uncorrectable error detected */ return -EBADMSG; } /* Find roots of error+erasure locator polynomial by Chien search */ memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0])); count = 0; /* Number of roots of lambda(x) */ for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) { q = 1; /* lambda[0] is always 0 */ for (j = deg_lambda; j > 0; j--) { if (reg[j] != nn) { reg[j] = rs_modnn(rs, reg[j] + j); q ^= alpha_to[reg[j]]; } } if (q != 0) continue; /* Not a root */ if (k < pad) { /* Impossible error location. Uncorrectable error. */ return -EBADMSG; } /* store root (index-form) and error location number */ root[count] = i; loc[count] = k; /* If we've already found max possible roots, * abort the search to save time */ if (++count == deg_lambda) break; } if (deg_lambda != count) { /* * deg(lambda) unequal to number of roots => uncorrectable * error detected */ return -EBADMSG; } /* * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo * x**nroots). in index form. Also find deg(omega). */ deg_omega = deg_lambda - 1; for (i = 0; i <= deg_omega; i++) { tmp = 0; for (j = i; j >= 0; j--) { if ((s[i - j] != nn) && (lambda[j] != nn)) tmp ^= alpha_to[rs_modnn(rs, s[i - j] + lambda[j])]; } omega[i] = index_of[tmp]; } /* * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form * Note: we reuse the buffer for b to store the correction pattern */ num_corrected = 0; for (j = count - 1; j >= 0; j--) { num1 = 0; for (i = deg_omega; i >= 0; i--) { if (omega[i] != nn) num1 ^= alpha_to[rs_modnn(rs, omega[i] + i * root[j])]; } if (num1 == 0) { /* Nothing to correct at this position */ b[j] = 0; continue; } num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; den = 0; /* lambda[i+1] for i even is the formal derivative * lambda_pr of lambda[i] */ for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) { if (lambda[i + 1] != nn) { den ^= alpha_to[rs_modnn(rs, lambda[i + 1] + i * root[j])]; } } b[j] = alpha_to[rs_modnn(rs, index_of[num1] + index_of[num2] + nn - index_of[den])]; num_corrected++; } /* * We compute the syndrome of the 'error' and check that it matches * the syndrome of the received word */ for (i = 0; i < nroots; i++) { tmp = 0; for (j = 0; j < count; j++) { if (b[j] == 0) continue; k = (fcr + i) * prim * (nn-loc[j]-1); tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)]; } if (tmp != alpha_to[s[i]]) return -EBADMSG; } /* * Store the error correction pattern, if a * correction buffer is available */ if (corr && eras_pos) { j = 0; for (i = 0; i < count; i++) { if (b[i]) { corr[j] = b[i]; eras_pos[j++] = loc[i] - pad; } } } else if (data && par) { /* Apply error to data and parity */ for (i = 0; i < count; i++) { if (loc[i] < (nn - nroots)) data[loc[i] - pad] ^= b[i]; else par[loc[i] - pad - len] ^= b[i]; } } return num_corrected; }
linux-master
lib/reed_solomon/decode_rs.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic Reed Solomon encoder / decoder library * * Copyright (C) 2004 Thomas Gleixner ([email protected]) * * Reed Solomon code lifted from reed solomon library written by Phil Karn * Copyright 2002 Phil Karn, KA9Q * * Description: * * The generic Reed Solomon library provides runtime configurable * encoding / decoding of RS codes. * * Each user must call init_rs to get a pointer to a rs_control structure * for the given rs parameters. The control struct is unique per instance. * It points to a codec which can be shared by multiple control structures. * If a codec is newly allocated then the polynomial arrays for fast * encoding / decoding are built. This can take some time so make sure not * to call this function from a time critical path. Usually a module / * driver should initialize the necessary rs_control structure on module / * driver init and release it on exit. * * The encoding puts the calculated syndrome into a given syndrome buffer. * * The decoding is a two step process. The first step calculates the * syndrome over the received (data + syndrome) and calls the second stage, * which does the decoding / error correction itself. Many hw encoders * provide a syndrome calculation over the received data + syndrome and can * call the second stage directly. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/rslib.h> #include <linux/slab.h> #include <linux/mutex.h> enum { RS_DECODE_LAMBDA, RS_DECODE_SYN, RS_DECODE_B, RS_DECODE_T, RS_DECODE_OMEGA, RS_DECODE_ROOT, RS_DECODE_REG, RS_DECODE_LOC, RS_DECODE_NUM_BUFFERS }; /* This list holds all currently allocated rs codec structures */ static LIST_HEAD(codec_list); /* Protection for the list */ static DEFINE_MUTEX(rslistlock); /** * codec_init - Initialize a Reed-Solomon codec * @symsize: symbol size, bits (1-8) * @gfpoly: Field generator polynomial coefficients * @gffunc: Field generator function * @fcr: first root of RS code generator polynomial, index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) * @gfp: GFP_ flags for allocations * * Allocate a codec structure and the polynom arrays for faster * en/decoding. Fill the arrays according to the given parameters. */ static struct rs_codec *codec_init(int symsize, int gfpoly, int (*gffunc)(int), int fcr, int prim, int nroots, gfp_t gfp) { int i, j, sr, root, iprim; struct rs_codec *rs; rs = kzalloc(sizeof(*rs), gfp); if (!rs) return NULL; INIT_LIST_HEAD(&rs->list); rs->mm = symsize; rs->nn = (1 << symsize) - 1; rs->fcr = fcr; rs->prim = prim; rs->nroots = nroots; rs->gfpoly = gfpoly; rs->gffunc = gffunc; /* Allocate the arrays */ rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp); if (rs->alpha_to == NULL) goto err; rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp); if (rs->index_of == NULL) goto err; rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp); if(rs->genpoly == NULL) goto err; /* Generate Galois field lookup tables */ rs->index_of[0] = rs->nn; /* log(zero) = -inf */ rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */ if (gfpoly) { sr = 1; for (i = 0; i < rs->nn; i++) { rs->index_of[sr] = i; rs->alpha_to[i] = sr; sr <<= 1; if (sr & (1 << symsize)) sr ^= gfpoly; sr &= rs->nn; } } else { sr = gffunc(0); for (i = 0; i < rs->nn; i++) { rs->index_of[sr] = i; rs->alpha_to[i] = sr; sr = gffunc(sr); } } /* If it's not primitive, exit */ if(sr != rs->alpha_to[0]) goto err; /* Find prim-th root of 1, used in decoding */ for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn); /* prim-th root of 1, index form */ rs->iprim = iprim / prim; /* Form RS code generator polynomial from its roots */ rs->genpoly[0] = 1; for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) { rs->genpoly[i + 1] = 1; /* Multiply rs->genpoly[] by @**(root + x) */ for (j = i; j > 0; j--) { if (rs->genpoly[j] != 0) { rs->genpoly[j] = rs->genpoly[j -1] ^ rs->alpha_to[rs_modnn(rs, rs->index_of[rs->genpoly[j]] + root)]; } else rs->genpoly[j] = rs->genpoly[j - 1]; } /* rs->genpoly[0] can never be zero */ rs->genpoly[0] = rs->alpha_to[rs_modnn(rs, rs->index_of[rs->genpoly[0]] + root)]; } /* convert rs->genpoly[] to index form for quicker encoding */ for (i = 0; i <= nroots; i++) rs->genpoly[i] = rs->index_of[rs->genpoly[i]]; rs->users = 1; list_add(&rs->list, &codec_list); return rs; err: kfree(rs->genpoly); kfree(rs->index_of); kfree(rs->alpha_to); kfree(rs); return NULL; } /** * free_rs - Free the rs control structure * @rs: The control structure which is not longer used by the * caller * * Free the control structure. If @rs is the last user of the associated * codec, free the codec as well. */ void free_rs(struct rs_control *rs) { struct rs_codec *cd; if (!rs) return; cd = rs->codec; mutex_lock(&rslistlock); cd->users--; if(!cd->users) { list_del(&cd->list); kfree(cd->alpha_to); kfree(cd->index_of); kfree(cd->genpoly); kfree(cd); } mutex_unlock(&rslistlock); kfree(rs); } EXPORT_SYMBOL_GPL(free_rs); /** * init_rs_internal - Allocate rs control, find a matching codec or allocate a new one * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial * must be primitive; * @gffunc: pointer to function to generate the next field element, * or the multiplicative identity element if given 0. Used * instead of gfpoly if gfpoly is 0 * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) * @gfp: GFP_ flags for allocations */ static struct rs_control *init_rs_internal(int symsize, int gfpoly, int (*gffunc)(int), int fcr, int prim, int nroots, gfp_t gfp) { struct list_head *tmp; struct rs_control *rs; unsigned int bsize; /* Sanity checks */ if (symsize < 1) return NULL; if (fcr < 0 || fcr >= (1<<symsize)) return NULL; if (prim <= 0 || prim >= (1<<symsize)) return NULL; if (nroots < 0 || nroots >= (1<<symsize)) return NULL; /* * The decoder needs buffers in each control struct instance to * avoid variable size or large fixed size allocations on * stack. Size the buffers to arrays of [nroots + 1]. */ bsize = sizeof(uint16_t) * RS_DECODE_NUM_BUFFERS * (nroots + 1); rs = kzalloc(sizeof(*rs) + bsize, gfp); if (!rs) return NULL; mutex_lock(&rslistlock); /* Walk through the list and look for a matching entry */ list_for_each(tmp, &codec_list) { struct rs_codec *cd = list_entry(tmp, struct rs_codec, list); if (symsize != cd->mm) continue; if (gfpoly != cd->gfpoly) continue; if (gffunc != cd->gffunc) continue; if (fcr != cd->fcr) continue; if (prim != cd->prim) continue; if (nroots != cd->nroots) continue; /* We have a matching one already */ cd->users++; rs->codec = cd; goto out; } /* Create a new one */ rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp); if (!rs->codec) { kfree(rs); rs = NULL; } out: mutex_unlock(&rslistlock); return rs; } /** * init_rs_gfp - Create a RS control struct and initialize it * @symsize: the symbol size (number of bits) * @gfpoly: the extended Galois field generator polynomial coefficients, * with the 0th coefficient in the low order bit. The polynomial * must be primitive; * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) * @gfp: Memory allocation flags. */ struct rs_control *init_rs_gfp(int symsize, int gfpoly, int fcr, int prim, int nroots, gfp_t gfp) { return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp); } EXPORT_SYMBOL_GPL(init_rs_gfp); /** * init_rs_non_canonical - Allocate rs control struct for fields with * non-canonical representation * @symsize: the symbol size (number of bits) * @gffunc: pointer to function to generate the next field element, * or the multiplicative identity element if given 0. Used * instead of gfpoly if gfpoly is 0 * @fcr: the first consecutive root of the rs code generator polynomial * in index form * @prim: primitive element to generate polynomial roots * @nroots: RS code generator polynomial degree (number of roots) */ struct rs_control *init_rs_non_canonical(int symsize, int (*gffunc)(int), int fcr, int prim, int nroots) { return init_rs_internal(symsize, 0, gffunc, fcr, prim, nroots, GFP_KERNEL); } EXPORT_SYMBOL_GPL(init_rs_non_canonical); #ifdef CONFIG_REED_SOLOMON_ENC8 /** * encode_rs8 - Calculate the parity for data values (8bit data width) * @rsc: the rs control structure * @data: data field of a given type * @len: data length * @par: parity data, must be initialized by caller (usually all 0) * @invmsk: invert data mask (will be xored on data) * * The parity uses a uint16_t data type to enable * symbol size > 8. The calling code must take care of encoding of the * syndrome result for storage itself. */ int encode_rs8(struct rs_control *rsc, uint8_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" } EXPORT_SYMBOL_GPL(encode_rs8); #endif #ifdef CONFIG_REED_SOLOMON_DEC8 /** * decode_rs8 - Decode codeword (8bit data width) * @rsc: the rs control structure * @data: data field of a given type * @par: received parity data field * @len: data length * @s: syndrome data field, must be in index form * (if NULL, syndrome is calculated) * @no_eras: number of erasures * @eras_pos: position of erasures, can be NULL * @invmsk: invert data mask (will be xored on data, not on parity!) * @corr: buffer to store correction bitmask on eras_pos * * The syndrome and parity uses a uint16_t data type to enable * symbol size > 8. The calling code must take care of decoding of the * syndrome result and the received parity before calling this code. * * Note: The rs_control struct @rsc contains buffers which are used for * decoding, so the caller has to ensure that decoder invocations are * serialized. * * Returns the number of corrected symbols or -EBADMSG for uncorrectable * errors. The count includes errors in the parity. */ int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len, uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { #include "decode_rs.c" } EXPORT_SYMBOL_GPL(decode_rs8); #endif #ifdef CONFIG_REED_SOLOMON_ENC16 /** * encode_rs16 - Calculate the parity for data values (16bit data width) * @rsc: the rs control structure * @data: data field of a given type * @len: data length * @par: parity data, must be initialized by caller (usually all 0) * @invmsk: invert data mask (will be xored on data, not on parity!) * * Each field in the data array contains up to symbol size bits of valid data. */ int encode_rs16(struct rs_control *rsc, uint16_t *data, int len, uint16_t *par, uint16_t invmsk) { #include "encode_rs.c" } EXPORT_SYMBOL_GPL(encode_rs16); #endif #ifdef CONFIG_REED_SOLOMON_DEC16 /** * decode_rs16 - Decode codeword (16bit data width) * @rsc: the rs control structure * @data: data field of a given type * @par: received parity data field * @len: data length * @s: syndrome data field, must be in index form * (if NULL, syndrome is calculated) * @no_eras: number of erasures * @eras_pos: position of erasures, can be NULL * @invmsk: invert data mask (will be xored on data, not on parity!) * @corr: buffer to store correction bitmask on eras_pos * * Each field in the data array contains up to symbol size bits of valid data. * * Note: The rc_control struct @rsc contains buffers which are used for * decoding, so the caller has to ensure that decoder invocations are * serialized. * * Returns the number of corrected symbols or -EBADMSG for uncorrectable * errors. The count includes errors in the parity. */ int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len, uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, uint16_t *corr) { #include "decode_rs.c" } EXPORT_SYMBOL_GPL(decode_rs16); #endif MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Reed Solomon encoder/decoder"); MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
linux-master
lib/reed_solomon/reed_solomon.c
// SPDX-License-Identifier: GPL-2.0 /* * Tests for Generic Reed Solomon encoder / decoder library * * Written by Ferdinand Blomqvist * Based on previous work by Phil Karn, KA9Q */ #include <linux/rslib.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/slab.h> enum verbosity { V_SILENT, V_PROGRESS, V_CSUMMARY }; enum method { CORR_BUFFER, CALLER_SYNDROME, IN_PLACE }; #define __param(type, name, init, msg) \ static type name = init; \ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) __param(int, v, V_PROGRESS, "Verbosity level"); __param(int, ewsc, 1, "Erasures without symbol corruption"); __param(int, bc, 1, "Test for correct behaviour beyond error correction capacity"); struct etab { int symsize; int genpoly; int fcs; int prim; int nroots; int ntrials; }; /* List of codes to test */ static struct etab Tab[] = { {2, 0x7, 1, 1, 1, 100000 }, {3, 0xb, 1, 1, 2, 100000 }, {3, 0xb, 1, 1, 3, 100000 }, {3, 0xb, 2, 1, 4, 100000 }, {4, 0x13, 1, 1, 4, 10000 }, {5, 0x25, 1, 1, 6, 1000 }, {6, 0x43, 3, 1, 8, 1000 }, {7, 0x89, 1, 1, 14, 500 }, {8, 0x11d, 1, 1, 30, 100 }, {8, 0x187, 112, 11, 32, 100 }, {9, 0x211, 1, 1, 33, 80 }, {0, 0, 0, 0, 0, 0}, }; struct estat { int dwrong; int irv; int wepos; int nwords; }; struct bcstat { int rfail; int rsuccess; int noncw; int nwords; }; struct wspace { uint16_t *c; /* sent codeword */ uint16_t *r; /* received word */ uint16_t *s; /* syndrome */ uint16_t *corr; /* correction buffer */ int *errlocs; int *derrlocs; }; struct pad { int mult; int shift; }; static struct pad pad_coef[] = { { 0, 0 }, { 1, 2 }, { 1, 1 }, { 3, 2 }, { 1, 0 }, }; static void free_ws(struct wspace *ws) { if (!ws) return; kfree(ws->errlocs); kfree(ws->c); kfree(ws); } static struct wspace *alloc_ws(struct rs_codec *rs) { int nroots = rs->nroots; struct wspace *ws; int nn = rs->nn; ws = kzalloc(sizeof(*ws), GFP_KERNEL); if (!ws) return NULL; ws->c = kmalloc_array(2 * (nn + nroots), sizeof(uint16_t), GFP_KERNEL); if (!ws->c) goto err; ws->r = ws->c + nn; ws->s = ws->r + nn; ws->corr = ws->s + nroots; ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL); if (!ws->errlocs) goto err; ws->derrlocs = ws->errlocs + nn; return ws; err: free_ws(ws); return NULL; } /* * Generates a random codeword and stores it in c. Generates random errors and * erasures, and stores the random word with errors in r. Erasure positions are * stored in derrlocs, while errlocs has one of three values in every position: * * 0 if there is no error in this position; * 1 if there is a symbol error in this position; * 2 if there is an erasure without symbol corruption. * * Returns the number of corrupted symbols. */ static int get_rcw_we(struct rs_control *rs, struct wspace *ws, int len, int errs, int eras) { int nroots = rs->codec->nroots; int *derrlocs = ws->derrlocs; int *errlocs = ws->errlocs; int dlen = len - nroots; int nn = rs->codec->nn; uint16_t *c = ws->c; uint16_t *r = ws->r; int errval; int errloc; int i; /* Load c with random data and encode */ for (i = 0; i < dlen; i++) c[i] = get_random_u32() & nn; memset(c + dlen, 0, nroots * sizeof(*c)); encode_rs16(rs, c, dlen, c + dlen, 0); /* Make copyand add errors and erasures */ memcpy(r, c, len * sizeof(*r)); memset(errlocs, 0, len * sizeof(*errlocs)); memset(derrlocs, 0, nroots * sizeof(*derrlocs)); /* Generating random errors */ for (i = 0; i < errs; i++) { do { /* Error value must be nonzero */ errval = get_random_u32() & nn; } while (errval == 0); do { /* Must not choose the same location twice */ errloc = get_random_u32_below(len); } while (errlocs[errloc] != 0); errlocs[errloc] = 1; r[errloc] ^= errval; } /* Generating random erasures */ for (i = 0; i < eras; i++) { do { /* Must not choose the same location twice */ errloc = get_random_u32_below(len); } while (errlocs[errloc] != 0); derrlocs[i] = errloc; if (ewsc && get_random_u32_below(2)) { /* Erasure with the symbol intact */ errlocs[errloc] = 2; } else { /* Erasure with corrupted symbol */ do { /* Error value must be nonzero */ errval = get_random_u32() & nn; } while (errval == 0); errlocs[errloc] = 1; r[errloc] ^= errval; errs++; } } return errs; } static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs) { int i; for (i = 0; i < nerrs; i++) data[errlocs[i]] ^= corr[i]; } static void compute_syndrome(struct rs_control *rsc, uint16_t *data, int len, uint16_t *syn) { struct rs_codec *rs = rsc->codec; uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; int nroots = rs->nroots; int prim = rs->prim; int fcr = rs->fcr; int i, j; /* Calculating syndrome */ for (i = 0; i < nroots; i++) { syn[i] = data[0]; for (j = 1; j < len; j++) { if (syn[i] == 0) { syn[i] = data[j]; } else { syn[i] = data[j] ^ alpha_to[rs_modnn(rs, index_of[syn[i]] + (fcr + i) * prim)]; } } } /* Convert to index form */ for (i = 0; i < nroots; i++) syn[i] = rs->index_of[syn[i]]; } /* Test up to error correction capacity */ static void test_uc(struct rs_control *rs, int len, int errs, int eras, int trials, struct estat *stat, struct wspace *ws, int method) { int dlen = len - rs->codec->nroots; int *derrlocs = ws->derrlocs; int *errlocs = ws->errlocs; uint16_t *corr = ws->corr; uint16_t *c = ws->c; uint16_t *r = ws->r; uint16_t *s = ws->s; int derrs, nerrs; int i, j; for (j = 0; j < trials; j++) { nerrs = get_rcw_we(rs, ws, len, errs, eras); switch (method) { case CORR_BUFFER: derrs = decode_rs16(rs, r, r + dlen, dlen, NULL, eras, derrlocs, 0, corr); fix_err(r, derrs, corr, derrlocs); break; case CALLER_SYNDROME: compute_syndrome(rs, r, len, s); derrs = decode_rs16(rs, NULL, NULL, dlen, s, eras, derrlocs, 0, corr); fix_err(r, derrs, corr, derrlocs); break; case IN_PLACE: derrs = decode_rs16(rs, r, r + dlen, dlen, NULL, eras, derrlocs, 0, NULL); break; default: continue; } if (derrs != nerrs) stat->irv++; if (method != IN_PLACE) { for (i = 0; i < derrs; i++) { if (errlocs[derrlocs[i]] != 1) stat->wepos++; } } if (memcmp(r, c, len * sizeof(*r))) stat->dwrong++; } stat->nwords += trials; } static int ex_rs_helper(struct rs_control *rs, struct wspace *ws, int len, int trials, int method) { static const char * const desc[] = { "Testing correction buffer interface...", "Testing with caller provided syndrome...", "Testing in-place interface..." }; struct estat stat = {0, 0, 0, 0}; int nroots = rs->codec->nroots; int errs, eras, retval; if (v >= V_PROGRESS) pr_info(" %s\n", desc[method]); for (errs = 0; errs <= nroots / 2; errs++) for (eras = 0; eras <= nroots - 2 * errs; eras++) test_uc(rs, len, errs, eras, trials, &stat, ws, method); if (v >= V_CSUMMARY) { pr_info(" Decodes wrong: %d / %d\n", stat.dwrong, stat.nwords); pr_info(" Wrong return value: %d / %d\n", stat.irv, stat.nwords); if (method != IN_PLACE) pr_info(" Wrong error position: %d\n", stat.wepos); } retval = stat.dwrong + stat.wepos + stat.irv; if (retval && v >= V_PROGRESS) pr_warn(" FAIL: %d decoding failures!\n", retval); return retval; } static int exercise_rs(struct rs_control *rs, struct wspace *ws, int len, int trials) { int retval = 0; int i; if (v >= V_PROGRESS) pr_info("Testing up to error correction capacity...\n"); for (i = 0; i <= IN_PLACE; i++) retval |= ex_rs_helper(rs, ws, len, trials, i); return retval; } /* Tests for correct behaviour beyond error correction capacity */ static void test_bc(struct rs_control *rs, int len, int errs, int eras, int trials, struct bcstat *stat, struct wspace *ws) { int nroots = rs->codec->nroots; int dlen = len - nroots; int *derrlocs = ws->derrlocs; uint16_t *corr = ws->corr; uint16_t *r = ws->r; int derrs, j; for (j = 0; j < trials; j++) { get_rcw_we(rs, ws, len, errs, eras); derrs = decode_rs16(rs, r, r + dlen, dlen, NULL, eras, derrlocs, 0, corr); fix_err(r, derrs, corr, derrlocs); if (derrs >= 0) { stat->rsuccess++; /* * We check that the returned word is actually a * codeword. The obvious way to do this would be to * compute the syndrome, but we don't want to replicate * that code here. However, all the codes are in * systematic form, and therefore we can encode the * returned word, and see whether the parity changes or * not. */ memset(corr, 0, nroots * sizeof(*corr)); encode_rs16(rs, r, dlen, corr, 0); if (memcmp(r + dlen, corr, nroots * sizeof(*corr))) stat->noncw++; } else { stat->rfail++; } } stat->nwords += trials; } static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws, int len, int trials) { struct bcstat stat = {0, 0, 0, 0}; int nroots = rs->codec->nroots; int errs, eras, cutoff; if (v >= V_PROGRESS) pr_info("Testing beyond error correction capacity...\n"); for (errs = 1; errs <= nroots; errs++) { eras = nroots - 2 * errs + 1; if (eras < 0) eras = 0; cutoff = nroots <= len - errs ? nroots : len - errs; for (; eras <= cutoff; eras++) test_bc(rs, len, errs, eras, trials, &stat, ws); } if (v >= V_CSUMMARY) { pr_info(" decoder gives up: %d / %d\n", stat.rfail, stat.nwords); pr_info(" decoder returns success: %d / %d\n", stat.rsuccess, stat.nwords); pr_info(" not a codeword: %d / %d\n", stat.noncw, stat.rsuccess); } if (stat.noncw && v >= V_PROGRESS) pr_warn(" FAIL: %d silent failures!\n", stat.noncw); return stat.noncw; } static int run_exercise(struct etab *e) { int nn = (1 << e->symsize) - 1; int kk = nn - e->nroots; struct rs_control *rsc; int retval = -ENOMEM; int max_pad = kk - 1; int prev_pad = -1; struct wspace *ws; int i; rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots); if (!rsc) return retval; ws = alloc_ws(rsc->codec); if (!ws) goto err; retval = 0; for (i = 0; i < ARRAY_SIZE(pad_coef); i++) { int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift; int len = nn - pad; if (pad == prev_pad) continue; prev_pad = pad; if (v >= V_PROGRESS) { pr_info("Testing (%d,%d)_%d code...\n", len, kk - pad, nn + 1); } retval |= exercise_rs(rsc, ws, len, e->ntrials); if (bc) retval |= exercise_rs_bc(rsc, ws, len, e->ntrials); } free_ws(ws); err: free_rs(rsc); return retval; } static int __init test_rslib_init(void) { int i, fail = 0; for (i = 0; Tab[i].symsize != 0 ; i++) { int retval; retval = run_exercise(Tab + i); if (retval < 0) return -ENOMEM; fail |= retval; } if (fail) pr_warn("rslib: test failed\n"); else pr_info("rslib: test ok\n"); return -EAGAIN; /* Fail will directly unload the module */ } static void __exit test_rslib_exit(void) { } module_init(test_rslib_init) module_exit(test_rslib_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ferdinand Blomqvist"); MODULE_DESCRIPTION("Reed-Solomon library test");
linux-master
lib/reed_solomon/test_rslib.c
#include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ktime.h> #include <linux/debugfs.h> #include <linux/highmem.h> #include "gup_test.h" static void put_back_pages(unsigned int cmd, struct page **pages, unsigned long nr_pages, unsigned int gup_test_flags) { unsigned long i; switch (cmd) { case GUP_FAST_BENCHMARK: case GUP_BASIC_TEST: for (i = 0; i < nr_pages; i++) put_page(pages[i]); break; case PIN_FAST_BENCHMARK: case PIN_BASIC_TEST: case PIN_LONGTERM_BENCHMARK: unpin_user_pages(pages, nr_pages); break; case DUMP_USER_PAGES_TEST: if (gup_test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) { unpin_user_pages(pages, nr_pages); } else { for (i = 0; i < nr_pages; i++) put_page(pages[i]); } break; } } static void verify_dma_pinned(unsigned int cmd, struct page **pages, unsigned long nr_pages) { unsigned long i; struct folio *folio; switch (cmd) { case PIN_FAST_BENCHMARK: case PIN_BASIC_TEST: case PIN_LONGTERM_BENCHMARK: for (i = 0; i < nr_pages; i++) { folio = page_folio(pages[i]); if (WARN(!folio_maybe_dma_pinned(folio), "pages[%lu] is NOT dma-pinned\n", i)) { dump_page(&folio->page, "gup_test failure"); break; } else if (cmd == PIN_LONGTERM_BENCHMARK && WARN(!folio_is_longterm_pinnable(folio), "pages[%lu] is NOT pinnable but pinned\n", i)) { dump_page(&folio->page, "gup_test failure"); break; } } break; } } static void dump_pages_test(struct gup_test *gup, struct page **pages, unsigned long nr_pages) { unsigned int index_to_dump; unsigned int i; /* * Zero out any user-supplied page index that is out of range. Remember: * .which_pages[] contains a 1-based set of page indices. */ for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) { if (gup->which_pages[i] > nr_pages) { pr_warn("ZEROING due to out of range: .which_pages[%u]: %u\n", i, gup->which_pages[i]); gup->which_pages[i] = 0; } } for (i = 0; i < GUP_TEST_MAX_PAGES_TO_DUMP; i++) { index_to_dump = gup->which_pages[i]; if (index_to_dump) { index_to_dump--; // Decode from 1-based, to 0-based pr_info("---- page #%u, starting from user virt addr: 0x%llx\n", index_to_dump, gup->addr); dump_page(pages[index_to_dump], "gup_test: dump_pages() test"); } } } static int __gup_test_ioctl(unsigned int cmd, struct gup_test *gup) { ktime_t start_time, end_time; unsigned long i, nr_pages, addr, next; long nr; struct page **pages; int ret = 0; bool needs_mmap_lock = cmd != GUP_FAST_BENCHMARK && cmd != PIN_FAST_BENCHMARK; if (gup->size > ULONG_MAX) return -EINVAL; nr_pages = gup->size / PAGE_SIZE; pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); if (!pages) return -ENOMEM; if (needs_mmap_lock && mmap_read_lock_killable(current->mm)) { ret = -EINTR; goto free_pages; } i = 0; nr = gup->nr_pages_per_call; start_time = ktime_get(); for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { if (nr != gup->nr_pages_per_call) break; next = addr + nr * PAGE_SIZE; if (next > gup->addr + gup->size) { next = gup->addr + gup->size; nr = (next - addr) / PAGE_SIZE; } switch (cmd) { case GUP_FAST_BENCHMARK: nr = get_user_pages_fast(addr, nr, gup->gup_flags, pages + i); break; case GUP_BASIC_TEST: nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_FAST_BENCHMARK: nr = pin_user_pages_fast(addr, nr, gup->gup_flags, pages + i); break; case PIN_BASIC_TEST: nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_LONGTERM_BENCHMARK: nr = pin_user_pages(addr, nr, gup->gup_flags | FOLL_LONGTERM, pages + i); break; case DUMP_USER_PAGES_TEST: if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i); else nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); break; default: ret = -EINVAL; goto unlock; } if (nr <= 0) break; i += nr; } end_time = ktime_get(); /* Shifting the meaning of nr_pages: now it is actual number pinned: */ nr_pages = i; gup->get_delta_usec = ktime_us_delta(end_time, start_time); gup->size = addr - gup->addr; /* * Take an un-benchmark-timed moment to verify DMA pinned * state: print a warning if any non-dma-pinned pages are found: */ verify_dma_pinned(cmd, pages, nr_pages); if (cmd == DUMP_USER_PAGES_TEST) dump_pages_test(gup, pages, nr_pages); start_time = ktime_get(); put_back_pages(cmd, pages, nr_pages, gup->test_flags); end_time = ktime_get(); gup->put_delta_usec = ktime_us_delta(end_time, start_time); unlock: if (needs_mmap_lock) mmap_read_unlock(current->mm); free_pages: kvfree(pages); return ret; } static DEFINE_MUTEX(pin_longterm_test_mutex); static struct page **pin_longterm_test_pages; static unsigned long pin_longterm_test_nr_pages; static inline void pin_longterm_test_stop(void) { if (pin_longterm_test_pages) { if (pin_longterm_test_nr_pages) unpin_user_pages(pin_longterm_test_pages, pin_longterm_test_nr_pages); kvfree(pin_longterm_test_pages); pin_longterm_test_pages = NULL; pin_longterm_test_nr_pages = 0; } } static inline int pin_longterm_test_start(unsigned long arg) { long nr_pages, cur_pages, addr, remaining_pages; int gup_flags = FOLL_LONGTERM; struct pin_longterm_test args; struct page **pages; int ret = 0; bool fast; if (pin_longterm_test_pages) return -EINVAL; if (copy_from_user(&args, (void __user *)arg, sizeof(args))) return -EFAULT; if (args.flags & ~(PIN_LONGTERM_TEST_FLAG_USE_WRITE|PIN_LONGTERM_TEST_FLAG_USE_FAST)) return -EINVAL; if (!IS_ALIGNED(args.addr | args.size, PAGE_SIZE)) return -EINVAL; if (args.size > LONG_MAX) return -EINVAL; nr_pages = args.size / PAGE_SIZE; if (!nr_pages) return -EINVAL; pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); if (!pages) return -ENOMEM; if (args.flags & PIN_LONGTERM_TEST_FLAG_USE_WRITE) gup_flags |= FOLL_WRITE; fast = !!(args.flags & PIN_LONGTERM_TEST_FLAG_USE_FAST); if (!fast && mmap_read_lock_killable(current->mm)) { kvfree(pages); return -EINTR; } pin_longterm_test_pages = pages; pin_longterm_test_nr_pages = 0; while (nr_pages - pin_longterm_test_nr_pages) { remaining_pages = nr_pages - pin_longterm_test_nr_pages; addr = args.addr + pin_longterm_test_nr_pages * PAGE_SIZE; if (fast) cur_pages = pin_user_pages_fast(addr, remaining_pages, gup_flags, pages); else cur_pages = pin_user_pages(addr, remaining_pages, gup_flags, pages); if (cur_pages < 0) { pin_longterm_test_stop(); ret = cur_pages; break; } pin_longterm_test_nr_pages += cur_pages; pages += cur_pages; } if (!fast) mmap_read_unlock(current->mm); return ret; } static inline int pin_longterm_test_read(unsigned long arg) { __u64 user_addr; unsigned long i; if (!pin_longterm_test_pages) return -EINVAL; if (copy_from_user(&user_addr, (void __user *)arg, sizeof(user_addr))) return -EFAULT; for (i = 0; i < pin_longterm_test_nr_pages; i++) { void *addr = kmap_local_page(pin_longterm_test_pages[i]); unsigned long ret; ret = copy_to_user((void __user *)(unsigned long)user_addr, addr, PAGE_SIZE); kunmap_local(addr); if (ret) return -EFAULT; user_addr += PAGE_SIZE; } return 0; } static long pin_longterm_test_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; if (mutex_lock_killable(&pin_longterm_test_mutex)) return -EINTR; switch (cmd) { case PIN_LONGTERM_TEST_START: ret = pin_longterm_test_start(arg); break; case PIN_LONGTERM_TEST_STOP: pin_longterm_test_stop(); ret = 0; break; case PIN_LONGTERM_TEST_READ: ret = pin_longterm_test_read(arg); break; } mutex_unlock(&pin_longterm_test_mutex); return ret; } static long gup_test_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct gup_test gup; int ret; switch (cmd) { case GUP_FAST_BENCHMARK: case PIN_FAST_BENCHMARK: case PIN_LONGTERM_BENCHMARK: case GUP_BASIC_TEST: case PIN_BASIC_TEST: case DUMP_USER_PAGES_TEST: break; case PIN_LONGTERM_TEST_START: case PIN_LONGTERM_TEST_STOP: case PIN_LONGTERM_TEST_READ: return pin_longterm_test_ioctl(filep, cmd, arg); default: return -EINVAL; } if (copy_from_user(&gup, (void __user *)arg, sizeof(gup))) return -EFAULT; ret = __gup_test_ioctl(cmd, &gup); if (ret) return ret; if (copy_to_user((void __user *)arg, &gup, sizeof(gup))) return -EFAULT; return 0; } static int gup_test_release(struct inode *inode, struct file *file) { pin_longterm_test_stop(); return 0; } static const struct file_operations gup_test_fops = { .open = nonseekable_open, .unlocked_ioctl = gup_test_ioctl, .compat_ioctl = compat_ptr_ioctl, .release = gup_test_release, }; static int __init gup_test_init(void) { debugfs_create_file_unsafe("gup_test", 0600, NULL, NULL, &gup_test_fops); return 0; } late_initcall(gup_test_init);
linux-master
mm/gup_test.c
// SPDX-License-Identifier: GPL-2.0-only /* * zpool memory storage api * * Copyright (C) 2014 Dan Streetman * * This is a common frontend for memory storage pool implementations. * Typically, this is used to store compressed memory. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/list.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/zpool.h> struct zpool { struct zpool_driver *driver; void *pool; }; static LIST_HEAD(drivers_head); static DEFINE_SPINLOCK(drivers_lock); /** * zpool_register_driver() - register a zpool implementation. * @driver: driver to register */ void zpool_register_driver(struct zpool_driver *driver) { spin_lock(&drivers_lock); atomic_set(&driver->refcount, 0); list_add(&driver->list, &drivers_head); spin_unlock(&drivers_lock); } EXPORT_SYMBOL(zpool_register_driver); /** * zpool_unregister_driver() - unregister a zpool implementation. * @driver: driver to unregister. * * Module usage counting is used to prevent using a driver * while/after unloading, so if this is called from module * exit function, this should never fail; if called from * other than the module exit function, and this returns * failure, the driver is in use and must remain available. */ int zpool_unregister_driver(struct zpool_driver *driver) { int ret = 0, refcount; spin_lock(&drivers_lock); refcount = atomic_read(&driver->refcount); WARN_ON(refcount < 0); if (refcount > 0) ret = -EBUSY; else list_del(&driver->list); spin_unlock(&drivers_lock); return ret; } EXPORT_SYMBOL(zpool_unregister_driver); /* this assumes @type is null-terminated. */ static struct zpool_driver *zpool_get_driver(const char *type) { struct zpool_driver *driver; spin_lock(&drivers_lock); list_for_each_entry(driver, &drivers_head, list) { if (!strcmp(driver->type, type)) { bool got = try_module_get(driver->owner); if (got) atomic_inc(&driver->refcount); spin_unlock(&drivers_lock); return got ? driver : NULL; } } spin_unlock(&drivers_lock); return NULL; } static void zpool_put_driver(struct zpool_driver *driver) { atomic_dec(&driver->refcount); module_put(driver->owner); } /** * zpool_has_pool() - Check if the pool driver is available * @type: The type of the zpool to check (e.g. zbud, zsmalloc) * * This checks if the @type pool driver is available. This will try to load * the requested module, if needed, but there is no guarantee the module will * still be loaded and available immediately after calling. If this returns * true, the caller should assume the pool is available, but must be prepared * to handle the @zpool_create_pool() returning failure. However if this * returns false, the caller should assume the requested pool type is not * available; either the requested pool type module does not exist, or could * not be loaded, and calling @zpool_create_pool() with the pool type will * fail. * * The @type string must be null-terminated. * * Returns: true if @type pool is available, false if not */ bool zpool_has_pool(char *type) { struct zpool_driver *driver = zpool_get_driver(type); if (!driver) { request_module("zpool-%s", type); driver = zpool_get_driver(type); } if (!driver) return false; zpool_put_driver(driver); return true; } EXPORT_SYMBOL(zpool_has_pool); /** * zpool_create_pool() - Create a new zpool * @type: The type of the zpool to create (e.g. zbud, zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. * * This creates a new zpool of the specified type. The gfp flags will be * used when allocating memory, if the implementation supports it. If the * ops param is NULL, then the created zpool will not be evictable. * * Implementations must guarantee this to be thread-safe. * * The @type and @name strings must be null-terminated. * * Returns: New zpool on success, NULL on failure. */ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp) { struct zpool_driver *driver; struct zpool *zpool; pr_debug("creating pool type %s\n", type); driver = zpool_get_driver(type); if (!driver) { request_module("zpool-%s", type); driver = zpool_get_driver(type); } if (!driver) { pr_err("no driver for type %s\n", type); return NULL; } zpool = kmalloc(sizeof(*zpool), gfp); if (!zpool) { pr_err("couldn't create zpool - out of memory\n"); zpool_put_driver(driver); return NULL; } zpool->driver = driver; zpool->pool = driver->create(name, gfp); if (!zpool->pool) { pr_err("couldn't create %s pool\n", type); zpool_put_driver(driver); kfree(zpool); return NULL; } pr_debug("created pool type %s\n", type); return zpool; } /** * zpool_destroy_pool() - Destroy a zpool * @zpool: The zpool to destroy. * * Implementations must guarantee this to be thread-safe, * however only when destroying different pools. The same * pool should only be destroyed once, and should not be used * after it is destroyed. * * This destroys an existing zpool. The zpool should not be in use. */ void zpool_destroy_pool(struct zpool *zpool) { pr_debug("destroying pool type %s\n", zpool->driver->type); zpool->driver->destroy(zpool->pool); zpool_put_driver(zpool->driver); kfree(zpool); } /** * zpool_get_type() - Get the type of the zpool * @zpool: The zpool to check * * This returns the type of the pool. * * Implementations must guarantee this to be thread-safe. * * Returns: The type of zpool. */ const char *zpool_get_type(struct zpool *zpool) { return zpool->driver->type; } /** * zpool_malloc_support_movable() - Check if the zpool supports * allocating movable memory * @zpool: The zpool to check * * This returns if the zpool supports allocating movable memory. * * Implementations must guarantee this to be thread-safe. * * Returns: true if the zpool supports allocating movable memory, false if not */ bool zpool_malloc_support_movable(struct zpool *zpool) { return zpool->driver->malloc_support_movable; } /** * zpool_malloc() - Allocate memory * @zpool: The zpool to allocate from. * @size: The amount of memory to allocate. * @gfp: The GFP flags to use when allocating memory. * @handle: Pointer to the handle to set * * This allocates the requested amount of memory from the pool. * The gfp flags will be used when allocating memory, if the * implementation supports it. The provided @handle will be * set to the allocated object handle. * * Implementations must guarantee this to be thread-safe. * * Returns: 0 on success, negative value on error. */ int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp, unsigned long *handle) { return zpool->driver->malloc(zpool->pool, size, gfp, handle); } /** * zpool_free() - Free previously allocated memory * @zpool: The zpool that allocated the memory. * @handle: The handle to the memory to free. * * This frees previously allocated memory. This does not guarantee * that the pool will actually free memory, only that the memory * in the pool will become available for use by the pool. * * Implementations must guarantee this to be thread-safe, * however only when freeing different handles. The same * handle should only be freed once, and should not be used * after freeing. */ void zpool_free(struct zpool *zpool, unsigned long handle) { zpool->driver->free(zpool->pool, handle); } /** * zpool_map_handle() - Map a previously allocated handle into memory * @zpool: The zpool that the handle was allocated from * @handle: The handle to map * @mapmode: How the memory should be mapped * * This maps a previously allocated handle into memory. The @mapmode * param indicates to the implementation how the memory will be * used, i.e. read-only, write-only, read-write. If the * implementation does not support it, the memory will be treated * as read-write. * * This may hold locks, disable interrupts, and/or preemption, * and the zpool_unmap_handle() must be called to undo those * actions. The code that uses the mapped handle should complete * its operations on the mapped handle memory quickly and unmap * as soon as possible. As the implementation may use per-cpu * data, multiple handles should not be mapped concurrently on * any cpu. * * Returns: A pointer to the handle's mapped memory area. */ void *zpool_map_handle(struct zpool *zpool, unsigned long handle, enum zpool_mapmode mapmode) { return zpool->driver->map(zpool->pool, handle, mapmode); } /** * zpool_unmap_handle() - Unmap a previously mapped handle * @zpool: The zpool that the handle was allocated from * @handle: The handle to unmap * * This unmaps a previously mapped handle. Any locks or other * actions that the implementation took in zpool_map_handle() * will be undone here. The memory area returned from * zpool_map_handle() should no longer be used after this. */ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) { zpool->driver->unmap(zpool->pool, handle); } /** * zpool_get_total_size() - The total size of the pool * @zpool: The zpool to check * * This returns the total size in bytes of the pool. * * Returns: Total size of the zpool in bytes. */ u64 zpool_get_total_size(struct zpool *zpool) { return zpool->driver->total_size(zpool->pool); } /** * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. * @zpool: The zpool to test * * Some allocators enter non-preemptible context in ->map() callback (e.g. * disable pagefaults) and exit that context in ->unmap(), which limits what * we can do with the mapped object. For instance, we cannot wait for * asynchronous crypto API to decompress such an object or take mutexes * since those will call into the scheduler. This function tells us whether * we use such an allocator. * * Returns: true if zpool can sleep; false otherwise. */ bool zpool_can_sleep_mapped(struct zpool *zpool) { return zpool->driver->sleep_mapped; } MODULE_AUTHOR("Dan Streetman <[email protected]>"); MODULE_DESCRIPTION("Common API for compressed memory storage");
linux-master
mm/zpool.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/filemap.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * This file handles the generic file mmap semantics used by * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ #include <linux/export.h> #include <linux/compiler.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> #include <linux/capability.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/error-injection.h> #include <linux/hash.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/security.h> #include <linux/cpuset.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/shmem_fs.h> #include <linux/rmap.h> #include <linux/delayacct.h> #include <linux/psi.h> #include <linux/ramfs.h> #include <linux/page_idle.h> #include <linux/migrate.h> #include <linux/pipe_fs_i.h> #include <linux/splice.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/filemap.h> /* * FIXME: remove all knowledge of the buffer layer from the core VM */ #include <linux/buffer_head.h> /* for try_to_free_buffers */ #include <asm/mman.h> #include "swap.h" /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. * * finished 'unifying' the page and buffer cache and SMP-threaded the * page-cache, 21.05.1999, Ingo Molnar <[email protected]> * * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <[email protected]> */ /* * Lock ordering: * * ->i_mmap_rwsem (truncate_pagecache) * ->private_lock (__free_pte->block_dirty_folio) * ->swap_lock (exclusive_swap_page, others) * ->i_pages lock * * ->i_rwsem * ->invalidate_lock (acquired by fs in truncate path) * ->i_mmap_rwsem (truncate->unmap_mapping_range) * * ->mmap_lock * ->i_mmap_rwsem * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) * * ->mmap_lock * ->invalidate_lock (filemap_fault) * ->lock_page (filemap_fault, access_process_vm) * * ->i_rwsem (generic_perform_write) * ->mmap_lock (fault_in_readable->do_page_fault) * * bdi->wb.list_lock * sb_lock (fs/fs-writeback.c) * ->i_pages lock (__sync_single_inode) * * ->i_mmap_rwsem * ->anon_vma.lock (vma_merge) * * ->anon_vma.lock * ->page_table_lock or pte_lock (anon_vma_prepare and various) * * ->page_table_lock or pte_lock * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->i_pages lock (try_to_unmap_one) * ->lruvec->lru_lock (follow_page->mark_page_accessed) * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) * ->private_lock (page_remove_rmap->set_page_dirty) * ->i_pages lock (page_remove_rmap->set_page_dirty) * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty) * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) */ static void page_cache_delete(struct address_space *mapping, struct folio *folio, void *shadow) { XA_STATE(xas, &mapping->i_pages, folio->index); long nr = 1; mapping_set_update(&xas, mapping); /* hugetlb pages are represented by a single entry in the xarray */ if (!folio_test_hugetlb(folio)) { xas_set_order(&xas, folio->index, folio_order(folio)); nr = folio_nr_pages(folio); } VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); xas_store(&xas, shadow); xas_init_marks(&xas); folio->mapping = NULL; /* Leave page->index set: truncation lookup relies upon it */ mapping->nrpages -= nr; } static void filemap_unaccount_folio(struct address_space *mapping, struct folio *folio) { long nr; VM_BUG_ON_FOLIO(folio_mapped(folio), folio); if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", current->comm, folio_pfn(folio)); dump_page(&folio->page, "still mapped when deleted"); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); if (mapping_exiting(mapping) && !folio_test_large(folio)) { int mapcount = page_mapcount(&folio->page); if (folio_ref_count(folio) >= mapcount + 2) { /* * All vmas have already been torn down, so it's * a good bet that actually the page is unmapped * and we'd rather not leak it: if we're wrong, * another bad page check should catch it later. */ page_mapcount_reset(&folio->page); folio_ref_sub(folio, mapcount); } } } /* hugetlb folios do not participate in page cache accounting. */ if (folio_test_hugetlb(folio)) return; nr = folio_nr_pages(folio); __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); if (folio_test_swapbacked(folio)) { __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); if (folio_test_pmd_mappable(folio)) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); } else if (folio_test_pmd_mappable(folio)) { __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); filemap_nr_thps_dec(mapping); } /* * At this point folio must be either written or cleaned by * truncate. Dirty folio here signals a bug and loss of * unwritten data - on ordinary filesystems. * * But it's harmless on in-memory filesystems like tmpfs; and can * occur when a driver which did get_user_pages() sets page dirty * before putting it, while the inode is being finally evicted. * * Below fixes dirty accounting after removing the folio entirely * but leaves the dirty flag set: it has no effect for truncated * folio and anyway will be cleared before returning folio to * buddy allocator. */ if (WARN_ON_ONCE(folio_test_dirty(folio) && mapping_can_writeback(mapping))) folio_account_cleaned(folio, inode_to_wb(mapping->host)); } /* * Delete a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage * is safe. The caller must hold the i_pages lock. */ void __filemap_remove_folio(struct folio *folio, void *shadow) { struct address_space *mapping = folio->mapping; trace_mm_filemap_delete_from_page_cache(folio); filemap_unaccount_folio(mapping, folio); page_cache_delete(mapping, folio, shadow); } void filemap_free_folio(struct address_space *mapping, struct folio *folio) { void (*free_folio)(struct folio *); int refs = 1; free_folio = mapping->a_ops->free_folio; if (free_folio) free_folio(folio); if (folio_test_large(folio) && !folio_test_hugetlb(folio)) refs = folio_nr_pages(folio); folio_put_refs(folio, refs); } /** * filemap_remove_folio - Remove folio from page cache. * @folio: The folio. * * This must be called only on folios that are locked and have been * verified to be in the page cache. It will never put the folio into * the free list because the caller has a reference on the page. */ void filemap_remove_folio(struct folio *folio) { struct address_space *mapping = folio->mapping; BUG_ON(!folio_test_locked(folio)); spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); __filemap_remove_folio(folio, NULL); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); filemap_free_folio(mapping, folio); } /* * page_cache_delete_batch - delete several folios from page cache * @mapping: the mapping to which folios belong * @fbatch: batch of folios to delete * * The function walks over mapping->i_pages and removes folios passed in * @fbatch from the mapping. The function expects @fbatch to be sorted * by page index and is optimised for it to be dense. * It tolerates holes in @fbatch (mapping entries at those indices are not * modified). * * The function expects the i_pages lock to be held. */ static void page_cache_delete_batch(struct address_space *mapping, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); long total_pages = 0; int i = 0; struct folio *folio; mapping_set_update(&xas, mapping); xas_for_each(&xas, folio, ULONG_MAX) { if (i >= folio_batch_count(fbatch)) break; /* A swap/dax/shadow entry got inserted? Skip it. */ if (xa_is_value(folio)) continue; /* * A page got inserted in our range? Skip it. We have our * pages locked so they are protected from being removed. * If we see a page whose index is higher than ours, it * means our page has been removed, which shouldn't be * possible because we're holding the PageLock. */ if (folio != fbatch->folios[i]) { VM_BUG_ON_FOLIO(folio->index > fbatch->folios[i]->index, folio); continue; } WARN_ON_ONCE(!folio_test_locked(folio)); folio->mapping = NULL; /* Leave folio->index set: truncation lookup relies on it */ i++; xas_store(&xas, NULL); total_pages += folio_nr_pages(folio); } mapping->nrpages -= total_pages; } void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch) { int i; if (!folio_batch_count(fbatch)) return; spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; trace_mm_filemap_delete_from_page_cache(folio); filemap_unaccount_folio(mapping, folio); } page_cache_delete_batch(mapping, fbatch); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); for (i = 0; i < folio_batch_count(fbatch); i++) filemap_free_folio(mapping, fbatch->folios[i]); } int filemap_check_errors(struct address_space *mapping) { int ret = 0; /* Check for outstanding write errors */ if (test_bit(AS_ENOSPC, &mapping->flags) && test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; if (test_bit(AS_EIO, &mapping->flags) && test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } EXPORT_SYMBOL(filemap_check_errors); static int filemap_check_and_keep_errors(struct address_space *mapping) { /* Check for outstanding write errors */ if (test_bit(AS_EIO, &mapping->flags)) return -EIO; if (test_bit(AS_ENOSPC, &mapping->flags)) return -ENOSPC; return 0; } /** * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range * @mapping: address space structure to write * @wbc: the writeback_control controlling the writeout * * Call writepages on the mapping using the provided wbc to control the * writeout. * * Return: %0 on success, negative error code otherwise. */ int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc) { int ret; if (!mapping_can_writeback(mapping) || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; wbc_attach_fdatawrite_inode(wbc, mapping->host); ret = do_writepages(mapping, wbc); wbc_detach_inode(wbc); return ret; } EXPORT_SYMBOL(filemap_fdatawrite_wbc); /** * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range * @mapping: address space structure to write * @start: offset in bytes where the range starts * @end: offset in bytes where the range ends (inclusive) * @sync_mode: enable synchronous operation * * Start writeback against all of a mapping's dirty pages that lie * within the byte offsets <start, end> inclusive. * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as * opposed to a regular memory cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. * * Return: %0 on success, negative error code otherwise. */ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) { struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = LONG_MAX, .range_start = start, .range_end = end, }; return filemap_fdatawrite_wbc(mapping, &wbc); } static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) { return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite_range); /** * filemap_flush - mostly a non-blocking flush * @mapping: target address_space * * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. * * Return: %0 on success, negative error code otherwise. */ int filemap_flush(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_NONE); } EXPORT_SYMBOL(filemap_flush); /** * filemap_range_has_page - check if a page exists in range. * @mapping: address space within which to check * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Find at least one page in the range supplied, usually used to check if * direct writing in this range will trigger a writeback. * * Return: %true if at least one page exists in the specified range, * %false otherwise. */ bool filemap_range_has_page(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { struct folio *folio; XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); pgoff_t max = end_byte >> PAGE_SHIFT; if (end_byte < start_byte) return false; rcu_read_lock(); for (;;) { folio = xas_find(&xas, max); if (xas_retry(&xas, folio)) continue; /* Shadow entries don't count */ if (xa_is_value(folio)) continue; /* * We don't need to try to pin this page; we're about to * release the RCU lock anyway. It is enough to know that * there was a page here recently. */ break; } rcu_read_unlock(); return folio != NULL; } EXPORT_SYMBOL(filemap_range_has_page); static void __filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { pgoff_t index = start_byte >> PAGE_SHIFT; pgoff_t end = end_byte >> PAGE_SHIFT; struct folio_batch fbatch; unsigned nr_folios; folio_batch_init(&fbatch); while (index <= end) { unsigned i; nr_folios = filemap_get_folios_tag(mapping, &index, end, PAGECACHE_TAG_WRITEBACK, &fbatch); if (!nr_folios) break; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; folio_wait_writeback(folio); folio_clear_error(folio); } folio_batch_release(&fbatch); cond_resched(); } } /** * filemap_fdatawait_range - wait for writeback to complete * @mapping: address space structure to wait for * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Walk the list of under-writeback pages of the given address space * in the given range and wait for all of them. Check error status of * the address space and return it. * * Since the error status of the address space is cleared by this function, * callers are responsible for checking the return value and handling and/or * reporting the error. * * Return: error status of the address space. */ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { __filemap_fdatawait_range(mapping, start_byte, end_byte); return filemap_check_errors(mapping); } EXPORT_SYMBOL(filemap_fdatawait_range); /** * filemap_fdatawait_range_keep_errors - wait for writeback to complete * @mapping: address space structure to wait for * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Walk the list of under-writeback pages of the given address space in the * given range and wait for all of them. Unlike filemap_fdatawait_range(), * this function does not clear error status of the address space. * * Use this function if callers don't handle errors themselves. Expected * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), * fsfreeze(8) */ int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { __filemap_fdatawait_range(mapping, start_byte, end_byte); return filemap_check_and_keep_errors(mapping); } EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); /** * file_fdatawait_range - wait for writeback to complete * @file: file pointing to address space structure to wait for * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Walk the list of under-writeback pages of the address space that file * refers to, in the given range and wait for all of them. Check error * status of the address space vs. the file->f_wb_err cursor and return it. * * Since the error status of the file is advanced by this function, * callers are responsible for checking the return value and handling and/or * reporting the error. * * Return: error status of the address space vs. the file->f_wb_err cursor. */ int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) { struct address_space *mapping = file->f_mapping; __filemap_fdatawait_range(mapping, start_byte, end_byte); return file_check_and_advance_wb_err(file); } EXPORT_SYMBOL(file_fdatawait_range); /** * filemap_fdatawait_keep_errors - wait for writeback without clearing errors * @mapping: address space structure to wait for * * Walk the list of under-writeback pages of the given address space * and wait for all of them. Unlike filemap_fdatawait(), this function * does not clear error status of the address space. * * Use this function if callers don't handle errors themselves. Expected * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), * fsfreeze(8) * * Return: error status of the address space. */ int filemap_fdatawait_keep_errors(struct address_space *mapping) { __filemap_fdatawait_range(mapping, 0, LLONG_MAX); return filemap_check_and_keep_errors(mapping); } EXPORT_SYMBOL(filemap_fdatawait_keep_errors); /* Returns true if writeback might be needed or already in progress. */ static bool mapping_needs_writeback(struct address_space *mapping) { return mapping->nrpages; } bool filemap_range_has_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); pgoff_t max = end_byte >> PAGE_SHIFT; struct folio *folio; if (end_byte < start_byte) return false; rcu_read_lock(); xas_for_each(&xas, folio, max) { if (xas_retry(&xas, folio)) continue; if (xa_is_value(folio)) continue; if (folio_test_dirty(folio) || folio_test_locked(folio) || folio_test_writeback(folio)) break; } rcu_read_unlock(); return folio != NULL; } EXPORT_SYMBOL_GPL(filemap_range_has_writeback); /** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages * @lstart: offset in bytes where the range starts * @lend: offset in bytes where the range ends (inclusive) * * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that @lend is inclusive (describes the last byte to be written) so * that this function can be used to write to the very end-of-file (end = -1). * * Return: error status of the address space. */ int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { int err = 0, err2; if (lend < lstart) return 0; if (mapping_needs_writeback(mapping)) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* * Even if the above returned error, the pages may be * written partially (e.g. -ENOSPC), so we wait for it. * But the -EIO is special case, it may indicate the worst * thing (e.g. bug) happened, so we avoid waiting for it. */ if (err != -EIO) __filemap_fdatawait_range(mapping, lstart, lend); } err2 = filemap_check_errors(mapping); if (!err) err = err2; return err; } EXPORT_SYMBOL(filemap_write_and_wait_range); void __filemap_set_wb_err(struct address_space *mapping, int err) { errseq_t eseq = errseq_set(&mapping->wb_err, err); trace_filemap_set_wb_err(mapping, eseq); } EXPORT_SYMBOL(__filemap_set_wb_err); /** * file_check_and_advance_wb_err - report wb error (if any) that was previously * and advance wb_err to current one * @file: struct file on which the error is being reported * * When userland calls fsync (or something like nfsd does the equivalent), we * want to report any writeback errors that occurred since the last fsync (or * since the file was opened if there haven't been any). * * Grab the wb_err from the mapping. If it matches what we have in the file, * then just quickly return 0. The file is all caught up. * * If it doesn't match, then take the mapping value, set the "seen" flag in * it and try to swap it into place. If it works, or another task beat us * to it with the new value, then update the f_wb_err and return the error * portion. The error at this point must be reported via proper channels * (a'la fsync, or NFS COMMIT operation, etc.). * * While we handle mapping->wb_err with atomic operations, the f_wb_err * value is protected by the f_lock since we must ensure that it reflects * the latest value swapped in for this file descriptor. * * Return: %0 on success, negative error code otherwise. */ int file_check_and_advance_wb_err(struct file *file) { int err = 0; errseq_t old = READ_ONCE(file->f_wb_err); struct address_space *mapping = file->f_mapping; /* Locklessly handle the common case where nothing has changed */ if (errseq_check(&mapping->wb_err, old)) { /* Something changed, must use slow path */ spin_lock(&file->f_lock); old = file->f_wb_err; err = errseq_check_and_advance(&mapping->wb_err, &file->f_wb_err); trace_file_check_and_advance_wb_err(file, old); spin_unlock(&file->f_lock); } /* * We're mostly using this function as a drop in replacement for * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect * that the legacy code would have had on these flags. */ clear_bit(AS_EIO, &mapping->flags); clear_bit(AS_ENOSPC, &mapping->flags); return err; } EXPORT_SYMBOL(file_check_and_advance_wb_err); /** * file_write_and_wait_range - write out & wait on a file range * @file: file pointing to address_space with pages * @lstart: offset in bytes where the range starts * @lend: offset in bytes where the range ends (inclusive) * * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that @lend is inclusive (describes the last byte to be written) so * that this function can be used to write to the very end-of-file (end = -1). * * After writing out and waiting on the data, we check and advance the * f_wb_err cursor to the latest value, and return any errors detected there. * * Return: %0 on success, negative error code otherwise. */ int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) { int err = 0, err2; struct address_space *mapping = file->f_mapping; if (lend < lstart) return 0; if (mapping_needs_writeback(mapping)) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ if (err != -EIO) __filemap_fdatawait_range(mapping, lstart, lend); } err2 = file_check_and_advance_wb_err(file); if (!err) err = err2; return err; } EXPORT_SYMBOL(file_write_and_wait_range); /** * replace_page_cache_folio - replace a pagecache folio with a new one * @old: folio to be replaced * @new: folio to replace with * * This function replaces a folio in the pagecache with a new one. On * success it acquires the pagecache reference for the new folio and * drops it for the old folio. Both the old and new folios must be * locked. This function does not add the new folio to the LRU, the * caller must do that. * * The remove + add is atomic. This function cannot fail. */ void replace_page_cache_folio(struct folio *old, struct folio *new) { struct address_space *mapping = old->mapping; void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; pgoff_t offset = old->index; XA_STATE(xas, &mapping->i_pages, offset); VM_BUG_ON_FOLIO(!folio_test_locked(old), old); VM_BUG_ON_FOLIO(!folio_test_locked(new), new); VM_BUG_ON_FOLIO(new->mapping, new); folio_get(new); new->mapping = mapping; new->index = offset; mem_cgroup_migrate(old, new); xas_lock_irq(&xas); xas_store(&xas, new); old->mapping = NULL; /* hugetlb pages do not participate in page cache accounting. */ if (!folio_test_hugetlb(old)) __lruvec_stat_sub_folio(old, NR_FILE_PAGES); if (!folio_test_hugetlb(new)) __lruvec_stat_add_folio(new, NR_FILE_PAGES); if (folio_test_swapbacked(old)) __lruvec_stat_sub_folio(old, NR_SHMEM); if (folio_test_swapbacked(new)) __lruvec_stat_add_folio(new, NR_SHMEM); xas_unlock_irq(&xas); if (free_folio) free_folio(old); folio_put(old); } EXPORT_SYMBOL_GPL(replace_page_cache_folio); noinline int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) { XA_STATE(xas, &mapping->i_pages, index); int huge = folio_test_hugetlb(folio); bool charged = false; long nr = 1; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); mapping_set_update(&xas, mapping); if (!huge) { int error = mem_cgroup_charge(folio, NULL, gfp); VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); if (error) return error; charged = true; xas_set_order(&xas, index, folio_order(folio)); nr = folio_nr_pages(folio); } gfp &= GFP_RECLAIM_MASK; folio_ref_add(folio, nr); folio->mapping = mapping; folio->index = xas.xa_index; do { unsigned int order = xa_get_order(xas.xa, xas.xa_index); void *entry, *old = NULL; if (order > folio_order(folio)) xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), order, gfp); xas_lock_irq(&xas); xas_for_each_conflict(&xas, entry) { old = entry; if (!xa_is_value(entry)) { xas_set_err(&xas, -EEXIST); goto unlock; } } if (old) { if (shadowp) *shadowp = old; /* entry may have been split before we acquired lock */ order = xa_get_order(xas.xa, xas.xa_index); if (order > folio_order(folio)) { /* How to handle large swap entries? */ BUG_ON(shmem_mapping(mapping)); xas_split(&xas, old, order); xas_reset(&xas); } } xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; mapping->nrpages += nr; /* hugetlb pages do not participate in page cache accounting */ if (!huge) { __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); if (folio_test_pmd_mappable(folio)) __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr); } unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); if (xas_error(&xas)) goto error; trace_mm_filemap_add_to_page_cache(folio); return 0; error: if (charged) mem_cgroup_uncharge(folio); folio->mapping = NULL; /* Leave page->index set: truncation relies upon it */ folio_put_refs(folio, nr); return xas_error(&xas); } ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp) { void *shadow = NULL; int ret; __folio_set_locked(folio); ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); if (unlikely(ret)) __folio_clear_locked(folio); else { /* * The folio might have been evicted from cache only * recently, in which case it should be activated like * any other repeatedly accessed folio. * The exception is folios getting rewritten; evicting other * data from the working set, only to cache data that will * get overwritten with something else, is a waste of memory. */ WARN_ON_ONCE(folio_test_active(folio)); if (!(gfp & __GFP_WRITE) && shadow) workingset_refault(folio, shadow); folio_add_lru(folio); } return ret; } EXPORT_SYMBOL_GPL(filemap_add_folio); #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) { int n; struct folio *folio; if (cpuset_do_page_mem_spread()) { unsigned int cpuset_mems_cookie; do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); folio = __folio_alloc_node(gfp, order, n); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); return folio; } return folio_alloc(gfp, order); } EXPORT_SYMBOL(filemap_alloc_folio); #endif /* * filemap_invalidate_lock_two - lock invalidate_lock for two mappings * * Lock exclusively invalidate_lock of any passed mapping that is not NULL. * * @mapping1: the first mapping to lock * @mapping2: the second mapping to lock */ void filemap_invalidate_lock_two(struct address_space *mapping1, struct address_space *mapping2) { if (mapping1 > mapping2) swap(mapping1, mapping2); if (mapping1) down_write(&mapping1->invalidate_lock); if (mapping2 && mapping1 != mapping2) down_write_nested(&mapping2->invalidate_lock, 1); } EXPORT_SYMBOL(filemap_invalidate_lock_two); /* * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings * * Unlock exclusive invalidate_lock of any passed mapping that is not NULL. * * @mapping1: the first mapping to unlock * @mapping2: the second mapping to unlock */ void filemap_invalidate_unlock_two(struct address_space *mapping1, struct address_space *mapping2) { if (mapping1) up_write(&mapping1->invalidate_lock); if (mapping2 && mapping1 != mapping2) up_write(&mapping2->invalidate_lock); } EXPORT_SYMBOL(filemap_invalidate_unlock_two); /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of * waitqueues where the bucket discipline is to maintain all * waiters on the same queue and wake all when any of the pages * become available, and for the woken contexts to check to be * sure the appropriate page became available, this saves space * at a cost of "thundering herd" phenomena during rare hash * collisions. */ #define PAGE_WAIT_TABLE_BITS 8 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; static wait_queue_head_t *folio_waitqueue(struct folio *folio) { return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; } void __init pagecache_init(void) { int i; for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) init_waitqueue_head(&folio_wait_table[i]); page_writeback_init(); } /* * The page wait code treats the "wait->flags" somewhat unusually, because * we have multiple different kinds of waits, not just the usual "exclusive" * one. * * We have: * * (a) no special bits set: * * We're just waiting for the bit to be released, and when a waker * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, * and remove it from the wait queue. * * Simple and straightforward. * * (b) WQ_FLAG_EXCLUSIVE: * * The waiter is waiting to get the lock, and only one waiter should * be woken up to avoid any thundering herd behavior. We'll set the * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. * * This is the traditional exclusive wait. * * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: * * The waiter is waiting to get the bit, and additionally wants the * lock to be transferred to it for fair lock behavior. If the lock * cannot be taken, we stop walking the wait queue without waking * the waiter. * * This is the "fair lock handoff" case, and in addition to setting * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see * that it now has the lock. */ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) { unsigned int flags; struct wait_page_key *key = arg; struct wait_page_queue *wait_page = container_of(wait, struct wait_page_queue, wait); if (!wake_page_match(wait_page, key)) return 0; /* * If it's a lock handoff wait, we get the bit for it, and * stop walking (and do not wake it up) if we can't. */ flags = wait->flags; if (flags & WQ_FLAG_EXCLUSIVE) { if (test_bit(key->bit_nr, &key->folio->flags)) return -1; if (flags & WQ_FLAG_CUSTOM) { if (test_and_set_bit(key->bit_nr, &key->folio->flags)) return -1; flags |= WQ_FLAG_DONE; } } /* * We are holding the wait-queue lock, but the waiter that * is waiting for this will be checking the flags without * any locking. * * So update the flags atomically, and wake up the waiter * afterwards to avoid any races. This store-release pairs * with the load-acquire in folio_wait_bit_common(). */ smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); wake_up_state(wait->private, mode); /* * Ok, we have successfully done what we're waiting for, * and we can unconditionally remove the wait entry. * * Note that this pairs with the "finish_wait()" in the * waiter, and has to be the absolute last thing we do. * After this list_del_init(&wait->entry) the wait entry * might be de-allocated and the process might even have * exited. */ list_del_init_careful(&wait->entry); return (flags & WQ_FLAG_EXCLUSIVE) != 0; } static void folio_wake_bit(struct folio *folio, int bit_nr) { wait_queue_head_t *q = folio_waitqueue(folio); struct wait_page_key key; unsigned long flags; wait_queue_entry_t bookmark; key.folio = folio; key.bit_nr = bit_nr; key.page_match = 0; bookmark.flags = 0; bookmark.private = NULL; bookmark.func = NULL; INIT_LIST_HEAD(&bookmark.entry); spin_lock_irqsave(&q->lock, flags); __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); while (bookmark.flags & WQ_FLAG_BOOKMARK) { /* * Take a breather from holding the lock, * allow pages that finish wake up asynchronously * to acquire the lock and remove themselves * from wait queue */ spin_unlock_irqrestore(&q->lock, flags); cpu_relax(); spin_lock_irqsave(&q->lock, flags); __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); } /* * It's possible to miss clearing waiters here, when we woke our page * waiters, but the hashed waitqueue has waiters for other pages on it. * That's okay, it's a rare case. The next waker will clear it. * * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE, * other), the flag may be cleared in the course of freeing the page; * but that is not required for correctness. */ if (!waitqueue_active(q) || !key.page_match) folio_clear_waiters(folio); spin_unlock_irqrestore(&q->lock, flags); } static void folio_wake(struct folio *folio, int bit) { if (!folio_test_waiters(folio)) return; folio_wake_bit(folio, bit); } /* * A choice of three behaviors for folio_wait_bit_common(): */ enum behavior { EXCLUSIVE, /* Hold ref to page and take the bit when woken, like * __folio_lock() waiting on then setting PG_locked. */ SHARED, /* Hold ref to page and check the bit when woken, like * folio_wait_writeback() waiting on PG_writeback. */ DROP, /* Drop ref to page before wait, no check when woken, * like folio_put_wait_locked() on PG_locked. */ }; /* * Attempt to check (or get) the folio flag, and mark us done * if successful. */ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, struct wait_queue_entry *wait) { if (wait->flags & WQ_FLAG_EXCLUSIVE) { if (test_and_set_bit(bit_nr, &folio->flags)) return false; } else if (test_bit(bit_nr, &folio->flags)) return false; wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; return true; } /* How many times do we accept lock stealing from under a waiter? */ int sysctl_page_lock_unfairness = 5; static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, int state, enum behavior behavior) { wait_queue_head_t *q = folio_waitqueue(folio); int unfairness = sysctl_page_lock_unfairness; struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; bool thrashing = false; unsigned long pflags; bool in_thrashing; if (bit_nr == PG_locked && !folio_test_uptodate(folio) && folio_test_workingset(folio)) { delayacct_thrashing_start(&in_thrashing); psi_memstall_enter(&pflags); thrashing = true; } init_wait(wait); wait->func = wake_page_function; wait_page.folio = folio; wait_page.bit_nr = bit_nr; repeat: wait->flags = 0; if (behavior == EXCLUSIVE) { wait->flags = WQ_FLAG_EXCLUSIVE; if (--unfairness < 0) wait->flags |= WQ_FLAG_CUSTOM; } /* * Do one last check whether we can get the * page bit synchronously. * * Do the folio_set_waiters() marking before that * to let any waker we _just_ missed know they * need to wake us up (otherwise they'll never * even go to the slow case that looks at the * page queue), and add ourselves to the wait * queue if we need to sleep. * * This part needs to be done under the queue * lock to avoid races. */ spin_lock_irq(&q->lock); folio_set_waiters(folio); if (!folio_trylock_flag(folio, bit_nr, wait)) __add_wait_queue_entry_tail(q, wait); spin_unlock_irq(&q->lock); /* * From now on, all the logic will be based on * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to * see whether the page bit testing has already * been done by the wake function. * * We can drop our reference to the folio. */ if (behavior == DROP) folio_put(folio); /* * Note that until the "finish_wait()", or until * we see the WQ_FLAG_WOKEN flag, we need to * be very careful with the 'wait->flags', because * we may race with a waker that sets them. */ for (;;) { unsigned int flags; set_current_state(state); /* Loop until we've been woken or interrupted */ flags = smp_load_acquire(&wait->flags); if (!(flags & WQ_FLAG_WOKEN)) { if (signal_pending_state(state, current)) break; io_schedule(); continue; } /* If we were non-exclusive, we're done */ if (behavior != EXCLUSIVE) break; /* If the waker got the lock for us, we're done */ if (flags & WQ_FLAG_DONE) break; /* * Otherwise, if we're getting the lock, we need to * try to get it ourselves. * * And if that fails, we'll have to retry this all. */ if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) goto repeat; wait->flags |= WQ_FLAG_DONE; break; } /* * If a signal happened, this 'finish_wait()' may remove the last * waiter from the wait-queues, but the folio waiters bit will remain * set. That's ok. The next wakeup will take care of it, and trying * to do it here would be difficult and prone to races. */ finish_wait(q, wait); if (thrashing) { delayacct_thrashing_end(&in_thrashing); psi_memstall_leave(&pflags); } /* * NOTE! The wait->flags weren't stable until we've done the * 'finish_wait()', and we could have exited the loop above due * to a signal, and had a wakeup event happen after the signal * test but before the 'finish_wait()'. * * So only after the finish_wait() can we reliably determine * if we got woken up or not, so we can now figure out the final * return value based on that state without races. * * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive * waiter, but an exclusive one requires WQ_FLAG_DONE. */ if (behavior == EXCLUSIVE) return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; } #ifdef CONFIG_MIGRATION /** * migration_entry_wait_on_locked - Wait for a migration entry to be removed * @entry: migration swap entry. * @ptl: already locked ptl. This function will drop the lock. * * Wait for a migration entry referencing the given page to be removed. This is * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except * this can be called without taking a reference on the page. Instead this * should be called while holding the ptl for the migration entry referencing * the page. * * Returns after unlocking the ptl. * * This follows the same logic as folio_wait_bit_common() so see the comments * there. */ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) __releases(ptl) { struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; bool thrashing = false; unsigned long pflags; bool in_thrashing; wait_queue_head_t *q; struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); q = folio_waitqueue(folio); if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { delayacct_thrashing_start(&in_thrashing); psi_memstall_enter(&pflags); thrashing = true; } init_wait(wait); wait->func = wake_page_function; wait_page.folio = folio; wait_page.bit_nr = PG_locked; wait->flags = 0; spin_lock_irq(&q->lock); folio_set_waiters(folio); if (!folio_trylock_flag(folio, PG_locked, wait)) __add_wait_queue_entry_tail(q, wait); spin_unlock_irq(&q->lock); /* * If a migration entry exists for the page the migration path must hold * a valid reference to the page, and it must take the ptl to remove the * migration entry. So the page is valid until the ptl is dropped. */ spin_unlock(ptl); for (;;) { unsigned int flags; set_current_state(TASK_UNINTERRUPTIBLE); /* Loop until we've been woken or interrupted */ flags = smp_load_acquire(&wait->flags); if (!(flags & WQ_FLAG_WOKEN)) { if (signal_pending_state(TASK_UNINTERRUPTIBLE, current)) break; io_schedule(); continue; } break; } finish_wait(q, wait); if (thrashing) { delayacct_thrashing_end(&in_thrashing); psi_memstall_leave(&pflags); } } #endif void folio_wait_bit(struct folio *folio, int bit_nr) { folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); } EXPORT_SYMBOL(folio_wait_bit); int folio_wait_bit_killable(struct folio *folio, int bit_nr) { return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); } EXPORT_SYMBOL(folio_wait_bit_killable); /** * folio_put_wait_locked - Drop a reference and wait for it to be unlocked * @folio: The folio to wait for. * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). * * The caller should hold a reference on @folio. They expect the page to * become unlocked relatively soon, but do not wish to hold up migration * (for example) by holding the reference while waiting for the folio to * come unlocked. After this function returns, the caller should not * dereference @folio. * * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal. */ static int folio_put_wait_locked(struct folio *folio, int state) { return folio_wait_bit_common(folio, PG_locked, state, DROP); } /** * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue * @folio: Folio defining the wait queue of interest * @waiter: Waiter to add to the queue * * Add an arbitrary @waiter to the wait queue for the nominated @folio. */ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) { wait_queue_head_t *q = folio_waitqueue(folio); unsigned long flags; spin_lock_irqsave(&q->lock, flags); __add_wait_queue_entry_tail(q, waiter); folio_set_waiters(folio); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL_GPL(folio_add_wait_queue); #ifndef clear_bit_unlock_is_negative_byte /* * PG_waiters is the high bit in the same byte as PG_lock. * * On x86 (and on many other architectures), we can clear PG_lock and * test the sign bit at the same time. But if the architecture does * not support that special operation, we just do this all by hand * instead. * * The read of PG_waiters has to be after (or concurrently with) PG_locked * being cleared, but a memory barrier should be unnecessary since it is * in the same byte as PG_locked. */ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) { clear_bit_unlock(nr, mem); /* smp_mb__after_atomic(); */ return test_bit(PG_waiters, mem); } #endif /** * folio_unlock - Unlock a locked folio. * @folio: The folio. * * Unlocks the folio and wakes up any thread sleeping on the page lock. * * Context: May be called from interrupt or process context. May not be * called from NMI context. */ void folio_unlock(struct folio *folio) { /* Bit 7 allows x86 to check the byte's sign bit */ BUILD_BUG_ON(PG_waiters != 7); BUILD_BUG_ON(PG_locked > 7); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) folio_wake_bit(folio, PG_locked); } EXPORT_SYMBOL(folio_unlock); /** * folio_end_private_2 - Clear PG_private_2 and wake any waiters. * @folio: The folio. * * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for * it. The folio reference held for PG_private_2 being set is released. * * This is, for example, used when a netfs folio is being written to a local * disk cache, thereby allowing writes to the cache for the same folio to be * serialised. */ void folio_end_private_2(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); folio_wake_bit(folio, PG_private_2); folio_put(folio); } EXPORT_SYMBOL(folio_end_private_2); /** * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. * @folio: The folio to wait on. * * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio. */ void folio_wait_private_2(struct folio *folio) { while (folio_test_private_2(folio)) folio_wait_bit(folio, PG_private_2); } EXPORT_SYMBOL(folio_wait_private_2); /** * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. * @folio: The folio to wait on. * * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a * fatal signal is received by the calling task. * * Return: * - 0 if successful. * - -EINTR if a fatal signal was encountered. */ int folio_wait_private_2_killable(struct folio *folio) { int ret = 0; while (folio_test_private_2(folio)) { ret = folio_wait_bit_killable(folio, PG_private_2); if (ret < 0) break; } return ret; } EXPORT_SYMBOL(folio_wait_private_2_killable); /** * folio_end_writeback - End writeback against a folio. * @folio: The folio. */ void folio_end_writeback(struct folio *folio) { /* * folio_test_clear_reclaim() could be used here but it is an * atomic operation and overkill in this particular case. Failing * to shuffle a folio marked for immediate reclaim is too mild * a gain to justify taking an atomic operation penalty at the * end of every folio writeback. */ if (folio_test_reclaim(folio)) { folio_clear_reclaim(folio); folio_rotate_reclaimable(folio); } /* * Writeback does not hold a folio reference of its own, relying * on truncation to wait for the clearing of PG_writeback. * But here we must make sure that the folio is not freed and * reused before the folio_wake(). */ folio_get(folio); if (!__folio_end_writeback(folio)) BUG(); smp_mb__after_atomic(); folio_wake(folio, PG_writeback); acct_reclaim_writeback(folio); folio_put(folio); } EXPORT_SYMBOL(folio_end_writeback); /** * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. * @folio: The folio to lock */ void __folio_lock(struct folio *folio) { folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, EXCLUSIVE); } EXPORT_SYMBOL(__folio_lock); int __folio_lock_killable(struct folio *folio) { return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, EXCLUSIVE); } EXPORT_SYMBOL_GPL(__folio_lock_killable); static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) { struct wait_queue_head *q = folio_waitqueue(folio); int ret = 0; wait->folio = folio; wait->bit_nr = PG_locked; spin_lock_irq(&q->lock); __add_wait_queue_entry_tail(q, &wait->wait); folio_set_waiters(folio); ret = !folio_trylock(folio); /* * If we were successful now, we know we're still on the * waitqueue as we're still under the lock. This means it's * safe to remove and return success, we know the callback * isn't going to trigger. */ if (!ret) __remove_wait_queue(q, &wait->wait); else ret = -EIOCBQUEUED; spin_unlock_irq(&q->lock); return ret; } /* * Return values: * 0 - folio is locked. * non-zero - folio is not locked. * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held. * * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed. */ vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { unsigned int flags = vmf->flags; if (fault_flag_allow_retry_first(flags)) { /* * CAUTION! In this case, mmap_lock/per-VMA lock is not * released even though returning VM_FAULT_RETRY. */ if (flags & FAULT_FLAG_RETRY_NOWAIT) return VM_FAULT_RETRY; release_fault_lock(vmf); if (flags & FAULT_FLAG_KILLABLE) folio_wait_locked_killable(folio); else folio_wait_locked(folio); return VM_FAULT_RETRY; } if (flags & FAULT_FLAG_KILLABLE) { bool ret; ret = __folio_lock_killable(folio); if (ret) { release_fault_lock(vmf); return VM_FAULT_RETRY; } } else { __folio_lock(folio); } return 0; } /** * page_cache_next_miss() - Find the next gap in the page cache. * @mapping: Mapping. * @index: Index. * @max_scan: Maximum range to search. * * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the * gap with the lowest index. * * This function may be called under the rcu_read_lock. However, this will * not atomically search a snapshot of the cache at a single point in time. * For example, if a gap is created at index 5, then subsequently a gap is * created at index 10, page_cache_next_miss covering both indices may * return 10 if called under the rcu_read_lock. * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'return - index >= max_scan' will be true). * In the rare case of index wrap-around, 0 will be returned. */ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) { XA_STATE(xas, &mapping->i_pages, index); while (max_scan--) { void *entry = xas_next(&xas); if (!entry || xa_is_value(entry)) break; if (xas.xa_index == 0) break; } return xas.xa_index; } EXPORT_SYMBOL(page_cache_next_miss); /** * page_cache_prev_miss() - Find the previous gap in the page cache. * @mapping: Mapping. * @index: Index. * @max_scan: Maximum range to search. * * Search the range [max(index - max_scan + 1, 0), index] for the * gap with the highest index. * * This function may be called under the rcu_read_lock. However, this will * not atomically search a snapshot of the cache at a single point in time. * For example, if a gap is created at index 10, then subsequently a gap is * created at index 5, page_cache_prev_miss() covering both indices may * return 5 if called under the rcu_read_lock. * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'index - return >= max_scan' will be true). * In the rare case of wrap-around, ULONG_MAX will be returned. */ pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) { XA_STATE(xas, &mapping->i_pages, index); while (max_scan--) { void *entry = xas_prev(&xas); if (!entry || xa_is_value(entry)) break; if (xas.xa_index == ULONG_MAX) break; } return xas.xa_index; } EXPORT_SYMBOL(page_cache_prev_miss); /* * Lockless page cache protocol: * On the lookup side: * 1. Load the folio from i_pages * 2. Increment the refcount if it's not zero * 3. If the folio is not found by xas_reload(), put the refcount and retry * * On the removal side: * A. Freeze the page (by zeroing the refcount if nobody else has a reference) * B. Remove the page from i_pages * C. Return the page to the page allocator * * This means that any page may have its reference count temporarily * increased by a speculative page cache (or fast GUP) lookup as it can * be allocated by another user before the RCU grace period expires. * Because the refcount temporarily acquired here may end up being the * last refcount on the page, any page allocation must be freeable by * folio_put(). */ /* * filemap_get_entry - Get a page cache entry. * @mapping: the address_space to search * @index: The page cache index. * * Looks up the page cache entry at @mapping & @index. If it is a folio, * it is returned with an increased refcount. If it is a shadow entry * of a previously evicted folio, or a swap entry from shmem/tmpfs, * it is returned without further action. * * Return: The folio, swap or shadow entry, %NULL if nothing is found. */ void *filemap_get_entry(struct address_space *mapping, pgoff_t index) { XA_STATE(xas, &mapping->i_pages, index); struct folio *folio; rcu_read_lock(); repeat: xas_reset(&xas); folio = xas_load(&xas); if (xas_retry(&xas, folio)) goto repeat; /* * A shadow entry of a recently evicted page, or a swap entry from * shmem/tmpfs. Return it without attempting to raise page count. */ if (!folio || xa_is_value(folio)) goto out; if (!folio_try_get_rcu(folio)) goto repeat; if (unlikely(folio != xas_reload(&xas))) { folio_put(folio); goto repeat; } out: rcu_read_unlock(); return folio; } /** * __filemap_get_folio - Find and get a reference to a folio. * @mapping: The address_space to search. * @index: The page index. * @fgp_flags: %FGP flags modify how the folio is returned. * @gfp: Memory allocation flags to use if %FGP_CREAT is specified. * * Looks up the page cache entry at @mapping & @index. * * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even * if the %GFP flags specified for %FGP_CREAT are atomic. * * If this function returns a folio, it is returned with an increased refcount. * * Return: The found folio or an ERR_PTR() otherwise. */ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp) { struct folio *folio; repeat: folio = filemap_get_entry(mapping, index); if (xa_is_value(folio)) folio = NULL; if (!folio) goto no_page; if (fgp_flags & FGP_LOCK) { if (fgp_flags & FGP_NOWAIT) { if (!folio_trylock(folio)) { folio_put(folio); return ERR_PTR(-EAGAIN); } } else { folio_lock(folio); } /* Has the page been truncated? */ if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); folio_put(folio); goto repeat; } VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); } if (fgp_flags & FGP_ACCESSED) folio_mark_accessed(folio); else if (fgp_flags & FGP_WRITE) { /* Clear idle flag for buffer write */ if (folio_test_idle(folio)) folio_clear_idle(folio); } if (fgp_flags & FGP_STABLE) folio_wait_stable(folio); no_page: if (!folio && (fgp_flags & FGP_CREAT)) { unsigned order = FGF_GET_ORDER(fgp_flags); int err; if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) gfp |= __GFP_WRITE; if (fgp_flags & FGP_NOFS) gfp &= ~__GFP_FS; if (fgp_flags & FGP_NOWAIT) { gfp &= ~GFP_KERNEL; gfp |= GFP_NOWAIT | __GFP_NOWARN; } if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) fgp_flags |= FGP_LOCK; if (!mapping_large_folio_support(mapping)) order = 0; if (order > MAX_PAGECACHE_ORDER) order = MAX_PAGECACHE_ORDER; /* If we're not aligned, allocate a smaller folio */ if (index & ((1UL << order) - 1)) order = __ffs(index); do { gfp_t alloc_gfp = gfp; err = -ENOMEM; if (order == 1) order = 0; if (order > 0) alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN; folio = filemap_alloc_folio(alloc_gfp, order); if (!folio) continue; /* Init accessed so avoid atomic mark_page_accessed later */ if (fgp_flags & FGP_ACCESSED) __folio_set_referenced(folio); err = filemap_add_folio(mapping, folio, index, gfp); if (!err) break; folio_put(folio); folio = NULL; } while (order-- > 0); if (err == -EEXIST) goto repeat; if (err) return ERR_PTR(err); /* * filemap_add_folio locks the page, and for mmap * we expect an unlocked page. */ if (folio && (fgp_flags & FGP_FOR_MMAP)) folio_unlock(folio); } if (!folio) return ERR_PTR(-ENOENT); return folio; } EXPORT_SYMBOL(__filemap_get_folio); static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, xa_mark_t mark) { struct folio *folio; retry: if (mark == XA_PRESENT) folio = xas_find(xas, max); else folio = xas_find_marked(xas, max, mark); if (xas_retry(xas, folio)) goto retry; /* * A shadow entry of a recently evicted page, a swap * entry from shmem/tmpfs or a DAX entry. Return it * without attempting to raise page count. */ if (!folio || xa_is_value(folio)) return folio; if (!folio_try_get_rcu(folio)) goto reset; if (unlikely(folio != xas_reload(xas))) { folio_put(folio); goto reset; } return folio; reset: xas_reset(xas); goto retry; } /** * find_get_entries - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page cache index * @end: The final page index (inclusive). * @fbatch: Where the resulting entries are placed. * @indices: The cache indices corresponding to the entries in @entries * * find_get_entries() will search for and return a batch of entries in * the mapping. The entries are placed in @fbatch. find_get_entries() * takes a reference on any actual folios it returns. * * The entries have ascending indexes. The indices may not be consecutive * due to not-present entries or large folios. * * Any shadow entries of evicted folios, or swap entries from * shmem/tmpfs, are included in the returned array. * * Return: The number of entries which were found. */ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) { XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; } rcu_read_unlock(); if (folio_batch_count(fbatch)) { unsigned long nr = 1; int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) nr = folio_nr_pages(folio); *start = indices[idx] + nr; } return folio_batch_count(fbatch); } /** * find_lock_entries - Find a batch of pagecache entries. * @mapping: The address_space to search. * @start: The starting page cache index. * @end: The final page index (inclusive). * @fbatch: Where the resulting entries are placed. * @indices: The cache indices of the entries in @fbatch. * * find_lock_entries() will return a batch of entries from @mapping. * Swap, shadow and DAX entries are included. Folios are returned * locked and with an incremented refcount. Folios which are locked * by somebody else or under writeback are skipped. Folios which are * partially outside the range are not returned. * * The entries have ascending indexes. The indices may not be consecutive * due to not-present entries, large folios, folios which could not be * locked or folios under writeback. * * Return: The number of entries which were found. */ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) { XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { if (!xa_is_value(folio)) { if (folio->index < *start) goto put; if (folio_next_index(folio) - 1 > end) goto put; if (!folio_trylock(folio)) goto put; if (folio->mapping != mapping || folio_test_writeback(folio)) goto unlock; VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), folio); } indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; continue; unlock: folio_unlock(folio); put: folio_put(folio); } rcu_read_unlock(); if (folio_batch_count(fbatch)) { unsigned long nr = 1; int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) nr = folio_nr_pages(folio); *start = indices[idx] + nr; } return folio_batch_count(fbatch); } /** * filemap_get_folios - Get a batch of folios * @mapping: The address_space to search * @start: The starting page index * @end: The final page index (inclusive) * @fbatch: The batch to fill. * * Search for and return a batch of folios in the mapping starting at * index @start and up to index @end (inclusive). The folios are returned * in @fbatch with an elevated reference count. * * The first folio may start before @start; if it does, it will contain * @start. The final folio may extend beyond @end; if it does, it will * contain @end. The folios have ascending indices. There may be gaps * between the folios if there are indices which have no folio in the * page cache. If folios are added to or removed from the page cache * while this is running, they may or may not be found by this call. * * Return: The number of folios which were found. * We also update @start to index the next folio for the traversal. */ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { /* Skip over shadow, swap and DAX entries */ if (xa_is_value(folio)) continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); if (folio_test_hugetlb(folio)) nr = 1; *start = folio->index + nr; goto out; } } /* * We come here when there is no page beyond @end. We take care to not * overflow the index @start as it confuses some of the callers. This * breaks the iteration when there is a page at index -1 but that is * already broken anyway. */ if (end == (pgoff_t)-1) *start = (pgoff_t)-1; else *start = end + 1; out: rcu_read_unlock(); return folio_batch_count(fbatch); } EXPORT_SYMBOL(filemap_get_folios); /** * filemap_get_folios_contig - Get a batch of contiguous folios * @mapping: The address_space to search * @start: The starting page index * @end: The final page index (inclusive) * @fbatch: The batch to fill * * filemap_get_folios_contig() works exactly like filemap_get_folios(), * except the returned folios are guaranteed to be contiguous. This may * not return all contiguous folios if the batch gets filled up. * * Return: The number of folios found. * Also update @start to be positioned for traversal of the next folio. */ unsigned filemap_get_folios_contig(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); unsigned long nr; struct folio *folio; rcu_read_lock(); for (folio = xas_load(&xas); folio && xas.xa_index <= end; folio = xas_next(&xas)) { if (xas_retry(&xas, folio)) continue; /* * If the entry has been swapped out, we can stop looking. * No current caller is looking for DAX entries. */ if (xa_is_value(folio)) goto update_start; if (!folio_try_get_rcu(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) goto put_folio; if (!folio_batch_add(fbatch, folio)) { nr = folio_nr_pages(folio); if (folio_test_hugetlb(folio)) nr = 1; *start = folio->index + nr; goto out; } continue; put_folio: folio_put(folio); retry: xas_reset(&xas); } update_start: nr = folio_batch_count(fbatch); if (nr) { folio = fbatch->folios[nr - 1]; if (folio_test_hugetlb(folio)) *start = folio->index + 1; else *start = folio_next_index(folio); } out: rcu_read_unlock(); return folio_batch_count(fbatch); } EXPORT_SYMBOL(filemap_get_folios_contig); /** * filemap_get_folios_tag - Get a batch of folios matching @tag * @mapping: The address_space to search * @start: The starting page index * @end: The final page index (inclusive) * @tag: The tag index * @fbatch: The batch to fill * * Same as filemap_get_folios(), but only returning folios tagged with @tag. * * Return: The number of folios found. * Also update @start to index the next folio for traversal. */ unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; rcu_read_lock(); while ((folio = find_get_entry(&xas, end, tag)) != NULL) { /* * Shadow entries should never be tagged, but this iteration * is lockless so there is a window for page reclaim to evict * a page we saw tagged. Skip over it. */ if (xa_is_value(folio)) continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); if (folio_test_hugetlb(folio)) nr = 1; *start = folio->index + nr; goto out; } } /* * We come here when there is no page beyond @end. We take care to not * overflow the index @start as it confuses some of the callers. This * breaks the iteration when there is a page at index -1 but that is * already broke anyway. */ if (end == (pgoff_t)-1) *start = (pgoff_t)-1; else *start = end + 1; out: rcu_read_unlock(); return folio_batch_count(fbatch); } EXPORT_SYMBOL(filemap_get_folios_tag); /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: * * ---R__________________________________________B__________ * ^ reading here ^ bad block(assume 4k) * * read(R) => miss => readahead(R...B) => media error => frustrating retries * => failing the whole request => read(R) => read(R+1) => * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... * * It is going insane. Fix it by quickly scaling down the readahead size. */ static void shrink_readahead_size_eio(struct file_ra_state *ra) { ra->ra_pages /= 4; } /* * filemap_get_read_batch - Get a batch of folios for read * * Get a batch of folios which represent a contiguous range of bytes in * the file. No exceptional entries will be returned. If @index is in * the middle of a folio, the entire folio will be returned. The last * folio in the batch may have the readahead flag set or the uptodate flag * clear so that the caller can take the appropriate action. */ static void filemap_get_read_batch(struct address_space *mapping, pgoff_t index, pgoff_t max, struct folio_batch *fbatch) { XA_STATE(xas, &mapping->i_pages, index); struct folio *folio; rcu_read_lock(); for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { if (xas_retry(&xas, folio)) continue; if (xas.xa_index > max || xa_is_value(folio)) break; if (xa_is_sibling(folio)) break; if (!folio_try_get_rcu(folio)) goto retry; if (unlikely(folio != xas_reload(&xas))) goto put_folio; if (!folio_batch_add(fbatch, folio)) break; if (!folio_test_uptodate(folio)) break; if (folio_test_readahead(folio)) break; xas_advance(&xas, folio_next_index(folio) - 1); continue; put_folio: folio_put(folio); retry: xas_reset(&xas); } rcu_read_unlock(); } static int filemap_read_folio(struct file *file, filler_t filler, struct folio *folio) { bool workingset = folio_test_workingset(folio); unsigned long pflags; int error; /* * A previous I/O error may have been due to temporary failures, * eg. multipath errors. PG_error will be set again if read_folio * fails. */ folio_clear_error(folio); /* Start the actual read. The read will unlock the page. */ if (unlikely(workingset)) psi_memstall_enter(&pflags); error = filler(file, folio); if (unlikely(workingset)) psi_memstall_leave(&pflags); if (error) return error; error = folio_wait_locked_killable(folio); if (error) return error; if (folio_test_uptodate(folio)) return 0; if (file) shrink_readahead_size_eio(&file->f_ra); return -EIO; } static bool filemap_range_uptodate(struct address_space *mapping, loff_t pos, size_t count, struct folio *folio, bool need_uptodate) { if (folio_test_uptodate(folio)) return true; /* pipes can't handle partially uptodate pages */ if (need_uptodate) return false; if (!mapping->a_ops->is_partially_uptodate) return false; if (mapping->host->i_blkbits >= folio_shift(folio)) return false; if (folio_pos(folio) > pos) { count -= folio_pos(folio) - pos; pos = 0; } else { pos -= folio_pos(folio); } return mapping->a_ops->is_partially_uptodate(folio, pos, count); } static int filemap_update_page(struct kiocb *iocb, struct address_space *mapping, size_t count, struct folio *folio, bool need_uptodate) { int error; if (iocb->ki_flags & IOCB_NOWAIT) { if (!filemap_invalidate_trylock_shared(mapping)) return -EAGAIN; } else { filemap_invalidate_lock_shared(mapping); } if (!folio_trylock(folio)) { error = -EAGAIN; if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) goto unlock_mapping; if (!(iocb->ki_flags & IOCB_WAITQ)) { filemap_invalidate_unlock_shared(mapping); /* * This is where we usually end up waiting for a * previously submitted readahead to finish. */ folio_put_wait_locked(folio, TASK_KILLABLE); return AOP_TRUNCATED_PAGE; } error = __folio_lock_async(folio, iocb->ki_waitq); if (error) goto unlock_mapping; } error = AOP_TRUNCATED_PAGE; if (!folio->mapping) goto unlock; error = 0; if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, need_uptodate)) goto unlock; error = -EAGAIN; if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) goto unlock; error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, folio); goto unlock_mapping; unlock: folio_unlock(folio); unlock_mapping: filemap_invalidate_unlock_shared(mapping); if (error == AOP_TRUNCATED_PAGE) folio_put(folio); return error; } static int filemap_create_folio(struct file *file, struct address_space *mapping, pgoff_t index, struct folio_batch *fbatch) { struct folio *folio; int error; folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); if (!folio) return -ENOMEM; /* * Protect against truncate / hole punch. Grabbing invalidate_lock * here assures we cannot instantiate and bring uptodate new * pagecache folios after evicting page cache during truncate * and before actually freeing blocks. Note that we could * release invalidate_lock after inserting the folio into * the page cache as the locked folio would then be enough to * synchronize with hole punching. But there are code paths * such as filemap_update_page() filling in partially uptodate * pages or ->readahead() that need to hold invalidate_lock * while mapping blocks for IO so let's hold the lock here as * well to keep locking rules simple. */ filemap_invalidate_lock_shared(mapping); error = filemap_add_folio(mapping, folio, index, mapping_gfp_constraint(mapping, GFP_KERNEL)); if (error == -EEXIST) error = AOP_TRUNCATED_PAGE; if (error) goto error; error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); if (error) goto error; filemap_invalidate_unlock_shared(mapping); folio_batch_add(fbatch, folio); return 0; error: filemap_invalidate_unlock_shared(mapping); folio_put(folio); return error; } static int filemap_readahead(struct kiocb *iocb, struct file *file, struct address_space *mapping, struct folio *folio, pgoff_t last_index) { DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); if (iocb->ki_flags & IOCB_NOIO) return -EAGAIN; page_cache_async_ra(&ractl, folio, last_index - folio->index); return 0; } static int filemap_get_pages(struct kiocb *iocb, size_t count, struct folio_batch *fbatch, bool need_uptodate) { struct file *filp = iocb->ki_filp; struct address_space *mapping = filp->f_mapping; struct file_ra_state *ra = &filp->f_ra; pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; pgoff_t last_index; struct folio *folio; int err = 0; /* "last_index" is the index of the page beyond the end of the read */ last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); retry: if (fatal_signal_pending(current)) return -EINTR; filemap_get_read_batch(mapping, index, last_index - 1, fbatch); if (!folio_batch_count(fbatch)) { if (iocb->ki_flags & IOCB_NOIO) return -EAGAIN; page_cache_sync_readahead(mapping, ra, filp, index, last_index - index); filemap_get_read_batch(mapping, index, last_index - 1, fbatch); } if (!folio_batch_count(fbatch)) { if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) return -EAGAIN; err = filemap_create_folio(filp, mapping, iocb->ki_pos >> PAGE_SHIFT, fbatch); if (err == AOP_TRUNCATED_PAGE) goto retry; return err; } folio = fbatch->folios[folio_batch_count(fbatch) - 1]; if (folio_test_readahead(folio)) { err = filemap_readahead(iocb, filp, mapping, folio, last_index); if (err) goto err; } if (!folio_test_uptodate(folio)) { if ((iocb->ki_flags & IOCB_WAITQ) && folio_batch_count(fbatch) > 1) iocb->ki_flags |= IOCB_NOWAIT; err = filemap_update_page(iocb, mapping, count, folio, need_uptodate); if (err) goto err; } return 0; err: if (err < 0) folio_put(folio); if (likely(--fbatch->nr)) return 0; if (err == AOP_TRUNCATED_PAGE) goto retry; return err; } static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) { unsigned int shift = folio_shift(folio); return (pos1 >> shift == pos2 >> shift); } /** * filemap_read - Read data from the page cache. * @iocb: The iocb to read. * @iter: Destination for the data. * @already_read: Number of bytes already read by the caller. * * Copies data from the page cache. If the data is not currently present, * uses the readahead and read_folio address_space operations to fetch it. * * Return: Total number of bytes copied, including those already read by * the caller. If an error happens before any bytes are copied, returns * a negative error number. */ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, ssize_t already_read) { struct file *filp = iocb->ki_filp; struct file_ra_state *ra = &filp->f_ra; struct address_space *mapping = filp->f_mapping; struct inode *inode = mapping->host; struct folio_batch fbatch; int i, error = 0; bool writably_mapped; loff_t isize, end_offset; loff_t last_pos = ra->prev_pos; if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) return 0; if (unlikely(!iov_iter_count(iter))) return 0; iov_iter_truncate(iter, inode->i_sb->s_maxbytes); folio_batch_init(&fbatch); do { cond_resched(); /* * If we've already successfully copied some data, then we * can no longer safely return -EIOCBQUEUED. Hence mark * an async read NOWAIT at that point. */ if ((iocb->ki_flags & IOCB_WAITQ) && already_read) iocb->ki_flags |= IOCB_NOWAIT; if (unlikely(iocb->ki_pos >= i_size_read(inode))) break; error = filemap_get_pages(iocb, iter->count, &fbatch, false); if (error < 0) break; /* * i_size must be checked after we know the pages are Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); if (unlikely(iocb->ki_pos >= isize)) goto put_folios; end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); /* * Once we start copying data, we don't want to be touching any * cachelines that might be contended: */ writably_mapped = mapping_writably_mapped(mapping); /* * When a read accesses the same folio several times, only * mark it as accessed the first time. */ if (!pos_same_folio(iocb->ki_pos, last_pos - 1, fbatch.folios[0])) folio_mark_accessed(fbatch.folios[0]); for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; size_t fsize = folio_size(folio); size_t offset = iocb->ki_pos & (fsize - 1); size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset); size_t copied; if (end_offset < folio_pos(folio)) break; if (i > 0) folio_mark_accessed(folio); /* * If users can be writing to this folio using arbitrary * virtual addresses, take care of potential aliasing * before reading the folio on the kernel side. */ if (writably_mapped) flush_dcache_folio(folio); copied = copy_folio_to_iter(folio, offset, bytes, iter); already_read += copied; iocb->ki_pos += copied; last_pos = iocb->ki_pos; if (copied < bytes) { error = -EFAULT; break; } } put_folios: for (i = 0; i < folio_batch_count(&fbatch); i++) folio_put(fbatch.folios[i]); folio_batch_init(&fbatch); } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); file_accessed(filp); ra->prev_pos = last_pos; return already_read ? already_read : error; } EXPORT_SYMBOL_GPL(filemap_read); int kiocb_write_and_wait(struct kiocb *iocb, size_t count) { struct address_space *mapping = iocb->ki_filp->f_mapping; loff_t pos = iocb->ki_pos; loff_t end = pos + count - 1; if (iocb->ki_flags & IOCB_NOWAIT) { if (filemap_range_needs_writeback(mapping, pos, end)) return -EAGAIN; return 0; } return filemap_write_and_wait_range(mapping, pos, end); } int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) { struct address_space *mapping = iocb->ki_filp->f_mapping; loff_t pos = iocb->ki_pos; loff_t end = pos + count - 1; int ret; if (iocb->ki_flags & IOCB_NOWAIT) { /* we could block if there are any pages in the range */ if (filemap_range_has_page(mapping, pos, end)) return -EAGAIN; } else { ret = filemap_write_and_wait_range(mapping, pos, end); if (ret) return ret; } /* * After a write we want buffered reads to be sure to go to disk to get * the new data. We invalidate clean cached page from the region we're * about to write. We do this *before* the write so that we can return * without clobbering -EIOCBQUEUED from ->direct_IO(). */ return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT); } /** * generic_file_read_iter - generic filesystem read routine * @iocb: kernel I/O control block * @iter: destination for the data read * * This is the "read_iter()" routine for all filesystems * that can use the page cache directly. * * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall * be returned when no data can be read without waiting for I/O requests * to complete; it doesn't prevent readahead. * * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O * requests shall be made for the read or for readahead. When no data * can be read, -EAGAIN shall be returned. When readahead would be * triggered, a partial, possibly empty read shall be returned. * * Return: * * number of bytes copied, even for partial reads * * negative error code (or 0 if IOCB_NOIO) if nothing was read */ ssize_t generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { size_t count = iov_iter_count(iter); ssize_t retval = 0; if (!count) return 0; /* skip atime */ if (iocb->ki_flags & IOCB_DIRECT) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; retval = kiocb_write_and_wait(iocb, count); if (retval < 0) return retval; file_accessed(file); retval = mapping->a_ops->direct_IO(iocb, iter); if (retval >= 0) { iocb->ki_pos += retval; count -= retval; } if (retval != -EIOCBQUEUED) iov_iter_revert(iter, count - iov_iter_count(iter)); /* * Btrfs can have a short DIO read if we encounter * compressed extents, so if there was an error, or if * we've already read everything we wanted to, or if * there was a short read because we hit EOF, go ahead * and return. Otherwise fallthrough to buffered io for * the rest of the read. Buffered reads will not work for * DAX files, so don't bother trying. */ if (retval < 0 || !count || IS_DAX(inode)) return retval; if (iocb->ki_pos >= i_size_read(inode)) return retval; } return filemap_read(iocb, iter, retval); } EXPORT_SYMBOL(generic_file_read_iter); /* * Splice subpages from a folio into a pipe. */ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, struct folio *folio, loff_t fpos, size_t size) { struct page *page; size_t spliced = 0, offset = offset_in_folio(folio, fpos); page = folio_page(folio, offset / PAGE_SIZE); size = min(size, folio_size(folio) - offset); offset %= PAGE_SIZE; while (spliced < size && !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { struct pipe_buffer *buf = pipe_head_buf(pipe); size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); *buf = (struct pipe_buffer) { .ops = &page_cache_pipe_buf_ops, .page = page, .offset = offset, .len = part, }; folio_get(folio); pipe->head++; page++; spliced += part; offset = 0; } return spliced; } /** * filemap_splice_read - Splice data from a file's pagecache into a pipe * @in: The file to read from * @ppos: Pointer to the file position to read from * @pipe: The pipe to splice into * @len: The amount to splice * @flags: The SPLICE_F_* flags * * This function gets folios from a file's pagecache and splices them into the * pipe. Readahead will be called as necessary to fill more folios. This may * be used for blockdevs also. * * Return: On success, the number of bytes read will be returned and *@ppos * will be updated if appropriate; 0 will be returned if there is no more data * to be read; -EAGAIN will be returned if the pipe had no space, and some * other negative error code will be returned on error. A short read may occur * if the pipe has insufficient space, we reach the end of the data or we hit a * hole. */ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct folio_batch fbatch; struct kiocb iocb; size_t total_spliced = 0, used, npages; loff_t isize, end_offset; bool writably_mapped; int i, error = 0; if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) return 0; init_sync_kiocb(&iocb, in); iocb.ki_pos = *ppos; /* Work out how much data we can actually add into the pipe */ used = pipe_occupancy(pipe->head, pipe->tail); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); folio_batch_init(&fbatch); do { cond_resched(); if (*ppos >= i_size_read(in->f_mapping->host)) break; iocb.ki_pos = *ppos; error = filemap_get_pages(&iocb, len, &fbatch, true); if (error < 0) break; /* * i_size must be checked after we know the pages are Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(in->f_mapping->host); if (unlikely(*ppos >= isize)) break; end_offset = min_t(loff_t, isize, *ppos + len); /* * Once we start copying data, we don't want to be touching any * cachelines that might be contended: */ writably_mapped = mapping_writably_mapped(in->f_mapping); for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; size_t n; if (folio_pos(folio) >= end_offset) goto out; folio_mark_accessed(folio); /* * If users can be writing to this folio using arbitrary * virtual addresses, take care of potential aliasing * before reading the folio on the kernel side. */ if (writably_mapped) flush_dcache_folio(folio); n = min_t(loff_t, len, isize - *ppos); n = splice_folio_into_pipe(pipe, folio, *ppos, n); if (!n) goto out; len -= n; total_spliced += n; *ppos += n; in->f_ra.prev_pos = *ppos; if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) goto out; } folio_batch_release(&fbatch); } while (len); out: folio_batch_release(&fbatch); file_accessed(in); return total_spliced ? total_spliced : error; } EXPORT_SYMBOL(filemap_splice_read); static inline loff_t folio_seek_hole_data(struct xa_state *xas, struct address_space *mapping, struct folio *folio, loff_t start, loff_t end, bool seek_data) { const struct address_space_operations *ops = mapping->a_ops; size_t offset, bsz = i_blocksize(mapping->host); if (xa_is_value(folio) || folio_test_uptodate(folio)) return seek_data ? start : end; if (!ops->is_partially_uptodate) return seek_data ? end : start; xas_pause(xas); rcu_read_unlock(); folio_lock(folio); if (unlikely(folio->mapping != mapping)) goto unlock; offset = offset_in_folio(folio, start) & ~(bsz - 1); do { if (ops->is_partially_uptodate(folio, offset, bsz) == seek_data) break; start = (start + bsz) & ~(bsz - 1); offset += bsz; } while (offset < folio_size(folio)); unlock: folio_unlock(folio); rcu_read_lock(); return start; } static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) { if (xa_is_value(folio)) return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); return folio_size(folio); } /** * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. * @mapping: Address space to search. * @start: First byte to consider. * @end: Limit of search (exclusive). * @whence: Either SEEK_HOLE or SEEK_DATA. * * If the page cache knows which blocks contain holes and which blocks * contain data, your filesystem can use this function to implement * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are * entirely memory-based such as tmpfs, and filesystems which support * unwritten extents. * * Return: The requested offset on success, or -ENXIO if @whence specifies * SEEK_DATA and there is no data after @start. There is an implicit hole * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start * and @end contain data. */ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, loff_t end, int whence) { XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); pgoff_t max = (end - 1) >> PAGE_SHIFT; bool seek_data = (whence == SEEK_DATA); struct folio *folio; if (end <= start) return -ENXIO; rcu_read_lock(); while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; size_t seek_size; if (start < pos) { if (!seek_data) goto unlock; start = pos; } seek_size = seek_folio_size(&xas, folio); pos = round_up((u64)pos + 1, seek_size); start = folio_seek_hole_data(&xas, mapping, folio, start, pos, seek_data); if (start < pos) goto unlock; if (start >= end) break; if (seek_size > PAGE_SIZE) xas_set(&xas, pos >> PAGE_SHIFT); if (!xa_is_value(folio)) folio_put(folio); } if (seek_data) start = -ENXIO; unlock: rcu_read_unlock(); if (folio && !xa_is_value(folio)) folio_put(folio); if (start > end) return end; return start; } #ifdef CONFIG_MMU #define MMAP_LOTSAMISS (100) /* * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock * @vmf - the vm_fault for this fault. * @folio - the folio to lock. * @fpin - the pointer to the file we may pin (or is already pinned). * * This works similar to lock_folio_or_retry in that it can drop the * mmap_lock. It differs in that it actually returns the folio locked * if it returns 1 and 0 if it couldn't lock the folio. If we did have * to drop the mmap_lock then fpin will point to the pinned file and * needs to be fput()'ed at a later point. */ static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, struct file **fpin) { if (folio_trylock(folio)) return 1; /* * NOTE! This will make us return with VM_FAULT_RETRY, but with * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT * is supposed to work. We have way too many special cases.. */ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) return 0; *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); if (vmf->flags & FAULT_FLAG_KILLABLE) { if (__folio_lock_killable(folio)) { /* * We didn't have the right flags to drop the mmap_lock, * but all fault_handlers only check for fatal signals * if we return VM_FAULT_RETRY, so we need to drop the * mmap_lock here and return 0 if we don't have a fpin. */ if (*fpin == NULL) mmap_read_unlock(vmf->vma->vm_mm); return 0; } } else __folio_lock(folio); return 1; } /* * Synchronous readahead happens when we don't even find a page in the page * cache at all. We don't want to perform IO under the mmap sem, so if we have * to drop the mmap sem we return the file that was pinned in order for us to do * that. If we didn't pin a file then we return NULL. The file that is * returned needs to be fput()'ed when we're done with it. */ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); struct file *fpin = NULL; unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ if (vm_flags & VM_HUGEPAGE) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ra->size = HPAGE_PMD_NR; /* * Fetch two PMD folios, so we get the chance to actually * readahead, unless we've been told not to. */ if (!(vm_flags & VM_RAND_READ)) ra->size *= 2; ra->async_size = HPAGE_PMD_NR; page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); return fpin; } #endif /* If we don't want any read-ahead, don't bother */ if (vm_flags & VM_RAND_READ) return fpin; if (!ra->ra_pages) return fpin; if (vm_flags & VM_SEQ_READ) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); page_cache_sync_ra(&ractl, ra->ra_pages); return fpin; } /* Avoid banging the cache line if not needed */ mmap_miss = READ_ONCE(ra->mmap_miss); if (mmap_miss < MMAP_LOTSAMISS * 10) WRITE_ONCE(ra->mmap_miss, ++mmap_miss); /* * Do we miss much more than hit in this file? If so, * stop bothering with read-ahead. It will only hurt. */ if (mmap_miss > MMAP_LOTSAMISS) return fpin; /* * mmap read-around */ fpin = maybe_unlock_mmap_for_io(vmf, fpin); ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); ra->size = ra->ra_pages; ra->async_size = ra->ra_pages / 4; ractl._index = ra->start; page_cache_ra_order(&ractl, ra, 0); return fpin; } /* * Asynchronous readahead happens when we find the page and PG_readahead, * so we want to possibly extend the readahead further. We return the file that * was pinned if we have to drop the mmap_lock in order to do IO. */ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, struct folio *folio) { struct file *file = vmf->vma->vm_file; struct file_ra_state *ra = &file->f_ra; DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); struct file *fpin = NULL; unsigned int mmap_miss; /* If we don't want any read-ahead, don't bother */ if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) return fpin; mmap_miss = READ_ONCE(ra->mmap_miss); if (mmap_miss) WRITE_ONCE(ra->mmap_miss, --mmap_miss); if (folio_test_readahead(folio)) { fpin = maybe_unlock_mmap_for_io(vmf, fpin); page_cache_async_ra(&ractl, folio, ra->ra_pages); } return fpin; } /** * filemap_fault - read in file data for page fault handling * @vmf: struct vm_fault containing details of the fault * * filemap_fault() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * * The goto's are kind of ugly, but this streamlines the normal case of having * it in the page cache, and handles the special cases reasonably without * having a lot of duplicated code. * * vma->vm_mm->mmap_lock must be held on entry. * * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap(). * * If our return value does not have VM_FAULT_RETRY set, the mmap_lock * has not been released. * * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. * * Return: bitwise-OR of %VM_FAULT_ codes. */ vm_fault_t filemap_fault(struct vm_fault *vmf) { int error; struct file *file = vmf->vma->vm_file; struct file *fpin = NULL; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; pgoff_t max_idx, index = vmf->pgoff; struct folio *folio; vm_fault_t ret = 0; bool mapping_locked = false; max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (unlikely(index >= max_idx)) return VM_FAULT_SIGBUS; /* * Do we have something in the page cache already? */ folio = filemap_get_folio(mapping, index); if (likely(!IS_ERR(folio))) { /* * We found the page, so try async readahead before waiting for * the lock. */ if (!(vmf->flags & FAULT_FLAG_TRIED)) fpin = do_async_mmap_readahead(vmf, folio); if (unlikely(!folio_test_uptodate(folio))) { filemap_invalidate_lock_shared(mapping); mapping_locked = true; } } else { /* No page in the page cache at all */ count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret = VM_FAULT_MAJOR; fpin = do_sync_mmap_readahead(vmf); retry_find: /* * See comment in filemap_create_folio() why we need * invalidate_lock */ if (!mapping_locked) { filemap_invalidate_lock_shared(mapping); mapping_locked = true; } folio = __filemap_get_folio(mapping, index, FGP_CREAT|FGP_FOR_MMAP, vmf->gfp_mask); if (IS_ERR(folio)) { if (fpin) goto out_retry; filemap_invalidate_unlock_shared(mapping); return VM_FAULT_OOM; } } if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) goto out_retry; /* Did it get truncated? */ if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); folio_put(folio); goto retry_find; } VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); /* * We have a locked page in the page cache, now we need to check * that it's up-to-date. If not, it is going to be due to an error. */ if (unlikely(!folio_test_uptodate(folio))) { /* * The page was in cache and uptodate and now it is not. * Strange but possible since we didn't hold the page lock all * the time. Let's drop everything get the invalidate lock and * try again. */ if (!mapping_locked) { folio_unlock(folio); folio_put(folio); goto retry_find; } goto page_not_uptodate; } /* * We've made it this far and we had to drop our mmap_lock, now is the * time to return to the upper layer and have it re-find the vma and * redo the fault. */ if (fpin) { folio_unlock(folio); goto out_retry; } if (mapping_locked) filemap_invalidate_unlock_shared(mapping); /* * Found the page and have a reference on it. * We must recheck i_size under page lock. */ max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (unlikely(index >= max_idx)) { folio_unlock(folio); folio_put(folio); return VM_FAULT_SIGBUS; } vmf->page = folio_file_page(folio, index); return ret | VM_FAULT_LOCKED; page_not_uptodate: /* * Umm, take care of errors if the page isn't up-to-date. * Try to re-read it _once_. We do this synchronously, * because there really aren't any performance issues here * and we need to check for errors. */ fpin = maybe_unlock_mmap_for_io(vmf, fpin); error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); if (fpin) goto out_retry; folio_put(folio); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; filemap_invalidate_unlock_shared(mapping); return VM_FAULT_SIGBUS; out_retry: /* * We dropped the mmap_lock, we need to return to the fault handler to * re-find the vma and come back and find our hopefully still populated * page. */ if (!IS_ERR(folio)) folio_put(folio); if (mapping_locked) filemap_invalidate_unlock_shared(mapping); if (fpin) fput(fpin); return ret | VM_FAULT_RETRY; } EXPORT_SYMBOL(filemap_fault); static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, pgoff_t start) { struct mm_struct *mm = vmf->vma->vm_mm; /* Huge page is mapped? No need to proceed. */ if (pmd_trans_huge(*vmf->pmd)) { folio_unlock(folio); folio_put(folio); return true; } if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { struct page *page = folio_file_page(folio, start); vm_fault_t ret = do_set_pmd(vmf, page); if (!ret) { /* The page is mapped successfully, reference consumed. */ folio_unlock(folio); return true; } } if (pmd_none(*vmf->pmd)) pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); return false; } static struct folio *next_uptodate_folio(struct xa_state *xas, struct address_space *mapping, pgoff_t end_pgoff) { struct folio *folio = xas_next_entry(xas, end_pgoff); unsigned long max_idx; do { if (!folio) return NULL; if (xas_retry(xas, folio)) continue; if (xa_is_value(folio)) continue; if (folio_test_locked(folio)) continue; if (!folio_try_get_rcu(folio)) continue; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(xas))) goto skip; if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) goto skip; if (!folio_trylock(folio)) goto skip; if (folio->mapping != mapping) goto unlock; if (!folio_test_uptodate(folio)) goto unlock; max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); if (xas->xa_index >= max_idx) goto unlock; return folio; unlock: folio_unlock(folio); skip: folio_put(folio); } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); return NULL; } /* * Map page range [start_page, start_page + nr_pages) of folio. * start_page is gotten from start by folio_page(folio, start) */ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = folio_page(folio, start); unsigned int count = 0; pte_t *old_ptep = vmf->pte; do { if (PageHWPoison(page + count)) goto skip; (*mmap_miss)++; /* * NOTE: If there're PTE markers, we'll leave them to be * handled in the specific fault path, and it'll prohibit the * fault-around logic. */ if (!pte_none(vmf->pte[count])) goto skip; count++; continue; skip: if (count) { set_pte_range(vmf, folio, page, count, addr); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count)) ret = VM_FAULT_NOPAGE; } count++; page += count; vmf->pte += count; addr += count * PAGE_SIZE; count = 0; } while (--nr_pages > 0); if (count) { set_pte_range(vmf, folio, page, count, addr); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count)) ret = VM_FAULT_NOPAGE; } vmf->pte = old_ptep; return ret; } static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = &folio->page; if (PageHWPoison(page)) return ret; (*mmap_miss)++; /* * NOTE: If there're PTE markers, we'll leave them to be * handled in the specific fault path, and it'll prohibit * the fault-around logic. */ if (!pte_none(ptep_get(vmf->pte))) return ret; if (vmf->address == addr) ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); folio_ref_inc(folio); return ret; } vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) { struct vm_area_struct *vma = vmf->vma; struct file *file = vma->vm_file; struct address_space *mapping = file->f_mapping; pgoff_t last_pgoff = start_pgoff; unsigned long addr; XA_STATE(xas, &mapping->i_pages, start_pgoff); struct folio *folio; vm_fault_t ret = 0; unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); if (!folio) goto out; if (filemap_map_pmd(vmf, folio, start_pgoff)) { ret = VM_FAULT_NOPAGE; goto out; } addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); if (!vmf->pte) { folio_unlock(folio); folio_put(folio); goto out; } do { unsigned long end; addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; vmf->pte += xas.xa_index - last_pgoff; last_pgoff = xas.xa_index; end = folio->index + folio_nr_pages(folio) - 1; nr_pages = min(end, end_pgoff) - xas.xa_index + 1; if (!folio_test_large(folio)) ret |= filemap_map_order0_folio(vmf, folio, addr, &mmap_miss); else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, nr_pages, &mmap_miss); folio_unlock(folio); folio_put(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); pte_unmap_unlock(vmf->pte, vmf->ptl); out: rcu_read_unlock(); mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); if (mmap_miss >= mmap_miss_saved) WRITE_ONCE(file->f_ra.mmap_miss, 0); else WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); return ret; } EXPORT_SYMBOL(filemap_map_pages); vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct folio *folio = page_folio(vmf->page); vm_fault_t ret = VM_FAULT_LOCKED; sb_start_pagefault(mapping->host->i_sb); file_update_time(vmf->vma->vm_file); folio_lock(folio); if (folio->mapping != mapping) { folio_unlock(folio); ret = VM_FAULT_NOPAGE; goto out; } /* * We mark the folio dirty already here so that when freeze is in * progress, we are guaranteed that writeback during freezing will * see the dirty folio and writeprotect it again. */ folio_mark_dirty(folio); folio_wait_stable(folio); out: sb_end_pagefault(mapping->host->i_sb); return ret; } const struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = filemap_page_mkwrite, }; /* This is used for a general mmap of a disk file */ int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->read_folio) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; return 0; } /* * This is for filesystems which do not implement ->writepage. */ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; return generic_file_mmap(file, vma); } #else vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } int generic_file_mmap(struct file *file, struct vm_area_struct *vma) { return -ENOSYS; } int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { return -ENOSYS; } #endif /* CONFIG_MMU */ EXPORT_SYMBOL(filemap_page_mkwrite); EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct folio *do_read_cache_folio(struct address_space *mapping, pgoff_t index, filler_t filler, struct file *file, gfp_t gfp) { struct folio *folio; int err; if (!filler) filler = mapping->a_ops->read_folio; repeat: folio = filemap_get_folio(mapping, index); if (IS_ERR(folio)) { folio = filemap_alloc_folio(gfp, 0); if (!folio) return ERR_PTR(-ENOMEM); err = filemap_add_folio(mapping, folio, index, gfp); if (unlikely(err)) { folio_put(folio); if (err == -EEXIST) goto repeat; /* Presumably ENOMEM for xarray node */ return ERR_PTR(err); } goto filler; } if (folio_test_uptodate(folio)) goto out; if (!folio_trylock(folio)) { folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); goto repeat; } /* Folio was truncated from mapping */ if (!folio->mapping) { folio_unlock(folio); folio_put(folio); goto repeat; } /* Someone else locked and filled the page in a very small window */ if (folio_test_uptodate(folio)) { folio_unlock(folio); goto out; } filler: err = filemap_read_folio(file, filler, folio); if (err) { folio_put(folio); if (err == AOP_TRUNCATED_PAGE) goto repeat; return ERR_PTR(err); } out: folio_mark_accessed(folio); return folio; } /** * read_cache_folio - Read into page cache, fill it if needed. * @mapping: The address_space to read from. * @index: The index to read. * @filler: Function to perform the read, or NULL to use aops->read_folio(). * @file: Passed to filler function, may be NULL if not required. * * Read one page into the page cache. If it succeeds, the folio returned * will contain @index, but it may not be the first page of the folio. * * If the filler function returns an error, it will be returned to the * caller. * * Context: May sleep. Expects mapping->invalidate_lock to be held. * Return: An uptodate folio on success, ERR_PTR() on failure. */ struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, filler_t filler, struct file *file) { return do_read_cache_folio(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_folio); /** * mapping_read_folio_gfp - Read into page cache, using specified allocation flags. * @mapping: The address_space for the folio. * @index: The index that the allocated folio will contain. * @gfp: The page allocator flags to use if allocating. * * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with * any new memory allocations done using the specified allocation flags. * * The most likely error from this function is EIO, but ENOMEM is * possible and so is EINTR. If ->read_folio returns another error, * that will be returned to the caller. * * The function expects mapping->invalidate_lock to be already held. * * Return: Uptodate folio on success, ERR_PTR() on failure. */ struct folio *mapping_read_folio_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { return do_read_cache_folio(mapping, index, NULL, NULL, gfp); } EXPORT_SYMBOL(mapping_read_folio_gfp); static struct page *do_read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp) { struct folio *folio; folio = do_read_cache_folio(mapping, index, filler, file, gfp); if (IS_ERR(folio)) return &folio->page; return folio_file_page(folio, index); } struct page *read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, struct file *file) { return do_read_cache_page(mapping, index, filler, file, mapping_gfp_mask(mapping)); } EXPORT_SYMBOL(read_cache_page); /** * read_cache_page_gfp - read into page cache, using specified page allocation flags. * @mapping: the page's address_space * @index: the page index * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with * any new page allocations done using the specified allocation flags. * * If the page does not get brought uptodate, return -EIO. * * The function expects mapping->invalidate_lock to be already held. * * Return: up to date page on success, ERR_PTR() on failure. */ struct page *read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { return do_read_cache_page(mapping, index, NULL, NULL, gfp); } EXPORT_SYMBOL(read_cache_page_gfp); /* * Warn about a page cache invalidation failure during a direct I/O write. */ static void dio_warn_stale_pagecache(struct file *filp) { static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); char pathname[128]; char *path; errseq_set(&filp->f_mapping->wb_err, -EIO); if (__ratelimit(&_rs)) { path = file_path(filp, pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, current->comm); } } void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) { struct address_space *mapping = iocb->ki_filp->f_mapping; if (mapping->nrpages && invalidate_inode_pages2_range(mapping, iocb->ki_pos >> PAGE_SHIFT, (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) dio_warn_stale_pagecache(iocb->ki_filp); } ssize_t generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) { struct address_space *mapping = iocb->ki_filp->f_mapping; size_t write_len = iov_iter_count(from); ssize_t written; /* * If a page can not be invalidated, return 0 to fall back * to buffered write. */ written = kiocb_invalidate_pages(iocb, write_len); if (written) { if (written == -EBUSY) return 0; return written; } written = mapping->a_ops->direct_IO(iocb, from); /* * Finally, try again to invalidate clean pages which might have been * cached by non-direct readahead, or faulted in by get_user_pages() * if the source of the write was an mmap'ed region of the file * we're writing. Either one is a pretty crazy thing to do, * so we don't support it 100%. If this invalidation * fails, tough, the write still worked... * * Most of the time we do not need this since dio_complete() will do * the invalidation for us. However there are some file systems that * do not end up with dio_complete() being called, so let's not break * them by removing it completely. * * Noticeable example is a blkdev_direct_IO(). * * Skip invalidation for async writes or if mapping has no pages. */ if (written > 0) { struct inode *inode = mapping->host; loff_t pos = iocb->ki_pos; kiocb_invalidate_post_direct_write(iocb, written); pos += written; write_len -= written; if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { i_size_write(inode, pos); mark_inode_dirty(inode); } iocb->ki_pos = pos; } if (written != -EIOCBQUEUED) iov_iter_revert(from, write_len - iov_iter_count(from)); return written; } EXPORT_SYMBOL(generic_file_direct_write); ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) { struct file *file = iocb->ki_filp; loff_t pos = iocb->ki_pos; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; do { struct page *page; unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ void *fsdata = NULL; offset = (pos & (PAGE_SIZE - 1)); bytes = min_t(unsigned long, PAGE_SIZE - offset, iov_iter_count(i)); again: /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. */ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { status = -EFAULT; break; } if (fatal_signal_pending(current)) { status = -EINTR; break; } status = a_ops->write_begin(file, mapping, pos, bytes, &page, &fsdata); if (unlikely(status < 0)) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); copied = copy_page_from_iter_atomic(page, offset, bytes, i); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status != copied)) { iov_iter_revert(i, copied - max(status, 0L)); if (unlikely(status < 0)) break; } cond_resched(); if (unlikely(status == 0)) { /* * A short copy made ->write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */ if (copied) bytes = copied; goto again; } pos += status; written += status; balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); if (!written) return status; iocb->ki_pos += written; return written; } EXPORT_SYMBOL(generic_perform_write); /** * __generic_file_write_iter - write data to a file * @iocb: IO state structure (file, offset, etc.) * @from: iov_iter with data to write * * This function does all the work needed for actually writing data to a * file. It does all basic checks, removes SUID from the file, updates * modification times and calls proper subroutines depending on whether we * do direct IO or a standard buffered write. * * It expects i_rwsem to be grabbed unless we work on a block device or similar * object which does not need locking at all. * * This function does *not* take care of syncing data in case of O_SYNC write. * A caller has to handle it. This is mainly due to the fact that we want to * avoid syncing under i_rwsem. * * Return: * * number of bytes written, even for truncated writes * * negative error code if no data has been written at all */ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t ret; ret = file_remove_privs(file); if (ret) return ret; ret = file_update_time(file); if (ret) return ret; if (iocb->ki_flags & IOCB_DIRECT) { ret = generic_file_direct_write(iocb, from); /* * If the write stopped short of completing, fall back to * buffered writes. Some filesystems do this for writes to * holes, for example. For DAX files, a buffered write will * not succeed (even if it did, DAX does not handle dirty * page-cache pages correctly). */ if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode)) return ret; return direct_write_fallback(iocb, from, ret, generic_perform_write(iocb, from)); } return generic_perform_write(iocb, from); } EXPORT_SYMBOL(__generic_file_write_iter); /** * generic_file_write_iter - write data to a file * @iocb: IO state structure * @from: iov_iter with data to write * * This is a wrapper around __generic_file_write_iter() to be used by most * filesystems. It takes care of syncing the file in case of O_SYNC file * and acquires i_rwsem as needed. * Return: * * negative error code if no data has been written at all of * vfs_fsync_range() failed for a synchronous write * * number of bytes written, even for truncated writes */ ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) ret = __generic_file_write_iter(iocb, from); inode_unlock(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } EXPORT_SYMBOL(generic_file_write_iter); /** * filemap_release_folio() - Release fs-specific metadata on a folio. * @folio: The folio which the kernel is trying to free. * @gfp: Memory allocation flags (and I/O mode). * * The address_space is trying to release any data attached to a folio * (presumably at folio->private). * * This will also be called if the private_2 flag is set on a page, * indicating that the folio has other metadata associated with it. * * The @gfp argument specifies whether I/O may be performed to release * this page (__GFP_IO), and whether the call may block * (__GFP_RECLAIM & __GFP_FS). * * Return: %true if the release was successful, otherwise %false. */ bool filemap_release_folio(struct folio *folio, gfp_t gfp) { struct address_space * const mapping = folio->mapping; BUG_ON(!folio_test_locked(folio)); if (!folio_needs_release(folio)) return true; if (folio_test_writeback(folio)) return false; if (mapping && mapping->a_ops->release_folio) return mapping->a_ops->release_folio(folio, gfp); return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); #ifdef CONFIG_CACHESTAT_SYSCALL /** * filemap_cachestat() - compute the page cache statistics of a mapping * @mapping: The mapping to compute the statistics for. * @first_index: The starting page cache index. * @last_index: The final page index (inclusive). * @cs: the cachestat struct to write the result to. * * This will query the page cache statistics of a mapping in the * page range of [first_index, last_index] (inclusive). The statistics * queried include: number of dirty pages, number of pages marked for * writeback, and the number of (recently) evicted pages. */ static void filemap_cachestat(struct address_space *mapping, pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) { XA_STATE(xas, &mapping->i_pages, first_index); struct folio *folio; rcu_read_lock(); xas_for_each(&xas, folio, last_index) { unsigned long nr_pages; pgoff_t folio_first_index, folio_last_index; if (xas_retry(&xas, folio)) continue; if (xa_is_value(folio)) { /* page is evicted */ void *shadow = (void *)folio; bool workingset; /* not used */ int order = xa_get_order(xas.xa, xas.xa_index); nr_pages = 1 << order; folio_first_index = round_down(xas.xa_index, 1 << order); folio_last_index = folio_first_index + nr_pages - 1; /* Folios might straddle the range boundaries, only count covered pages */ if (folio_first_index < first_index) nr_pages -= first_index - folio_first_index; if (folio_last_index > last_index) nr_pages -= folio_last_index - last_index; cs->nr_evicted += nr_pages; #ifdef CONFIG_SWAP /* implies CONFIG_MMU */ if (shmem_mapping(mapping)) { /* shmem file - in swap cache */ swp_entry_t swp = radix_to_swp_entry(folio); shadow = get_shadow_from_swap_cache(swp); } #endif if (workingset_test_recent(shadow, true, &workingset)) cs->nr_recently_evicted += nr_pages; goto resched; } nr_pages = folio_nr_pages(folio); folio_first_index = folio_pgoff(folio); folio_last_index = folio_first_index + nr_pages - 1; /* Folios might straddle the range boundaries, only count covered pages */ if (folio_first_index < first_index) nr_pages -= first_index - folio_first_index; if (folio_last_index > last_index) nr_pages -= folio_last_index - last_index; /* page is in cache */ cs->nr_cache += nr_pages; if (folio_test_dirty(folio)) cs->nr_dirty += nr_pages; if (folio_test_writeback(folio)) cs->nr_writeback += nr_pages; resched: if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); } } rcu_read_unlock(); } /* * The cachestat(2) system call. * * cachestat() returns the page cache statistics of a file in the * bytes range specified by `off` and `len`: number of cached pages, * number of dirty pages, number of pages marked for writeback, * number of evicted pages, and number of recently evicted pages. * * An evicted page is a page that is previously in the page cache * but has been evicted since. A page is recently evicted if its last * eviction was recent enough that its reentry to the cache would * indicate that it is actively being used by the system, and that * there is memory pressure on the system. * * `off` and `len` must be non-negative integers. If `len` > 0, * the queried range is [`off`, `off` + `len`]. If `len` == 0, * we will query in the range from `off` to the end of the file. * * The `flags` argument is unused for now, but is included for future * extensibility. User should pass 0 (i.e no flag specified). * * Currently, hugetlbfs is not supported. * * Because the status of a page can change after cachestat() checks it * but before it returns to the application, the returned values may * contain stale information. * * return values: * zero - success * -EFAULT - cstat or cstat_range points to an illegal address * -EINVAL - invalid flags * -EBADF - invalid file descriptor * -EOPNOTSUPP - file descriptor is of a hugetlbfs file */ SYSCALL_DEFINE4(cachestat, unsigned int, fd, struct cachestat_range __user *, cstat_range, struct cachestat __user *, cstat, unsigned int, flags) { struct fd f = fdget(fd); struct address_space *mapping; struct cachestat_range csr; struct cachestat cs; pgoff_t first_index, last_index; if (!f.file) return -EBADF; if (copy_from_user(&csr, cstat_range, sizeof(struct cachestat_range))) { fdput(f); return -EFAULT; } /* hugetlbfs is not supported */ if (is_file_hugepages(f.file)) { fdput(f); return -EOPNOTSUPP; } if (flags != 0) { fdput(f); return -EINVAL; } first_index = csr.off >> PAGE_SHIFT; last_index = csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; memset(&cs, 0, sizeof(struct cachestat)); mapping = f.file->f_mapping; filemap_cachestat(mapping, first_index, last_index, &cs); fdput(f); if (copy_to_user(cstat, &cs, sizeof(struct cachestat))) return -EFAULT; return 0; } #endif /* CONFIG_CACHESTAT_SYSCALL */
linux-master
mm/filemap.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/idr.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/shrinker.h> #include <linux/memcontrol.h> /* defined in vmscan.c */ extern struct rw_semaphore shrinker_rwsem; extern struct list_head shrinker_list; static DEFINE_IDA(shrinker_debugfs_ida); static struct dentry *shrinker_debugfs_root; static unsigned long shrinker_count_objects(struct shrinker *shrinker, struct mem_cgroup *memcg, unsigned long *count_per_node) { unsigned long nr, total = 0; int nid; for_each_node(nid) { if (nid == 0 || (shrinker->flags & SHRINKER_NUMA_AWARE)) { struct shrink_control sc = { .gfp_mask = GFP_KERNEL, .nid = nid, .memcg = memcg, }; nr = shrinker->count_objects(shrinker, &sc); if (nr == SHRINK_EMPTY) nr = 0; } else { nr = 0; } count_per_node[nid] = nr; total += nr; } return total; } static int shrinker_debugfs_count_show(struct seq_file *m, void *v) { struct shrinker *shrinker = m->private; unsigned long *count_per_node; struct mem_cgroup *memcg; unsigned long total; bool memcg_aware; int ret, nid; count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); if (!count_per_node) return -ENOMEM; ret = down_read_killable(&shrinker_rwsem); if (ret) { kfree(count_per_node); return ret; } rcu_read_lock(); memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { if (memcg && !mem_cgroup_online(memcg)) continue; total = shrinker_count_objects(shrinker, memcg_aware ? memcg : NULL, count_per_node); if (total) { seq_printf(m, "%lu", mem_cgroup_ino(memcg)); for_each_node(nid) seq_printf(m, " %lu", count_per_node[nid]); seq_putc(m, '\n'); } if (!memcg_aware) { mem_cgroup_iter_break(NULL, memcg); break; } if (signal_pending(current)) { mem_cgroup_iter_break(NULL, memcg); ret = -EINTR; break; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); rcu_read_unlock(); up_read(&shrinker_rwsem); kfree(count_per_node); return ret; } DEFINE_SHOW_ATTRIBUTE(shrinker_debugfs_count); static int shrinker_debugfs_scan_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return nonseekable_open(inode, file); } static ssize_t shrinker_debugfs_scan_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { struct shrinker *shrinker = file->private_data; unsigned long nr_to_scan = 0, ino, read_len; struct shrink_control sc = { .gfp_mask = GFP_KERNEL, }; struct mem_cgroup *memcg = NULL; int nid; char kbuf[72]; ssize_t ret; read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1); if (copy_from_user(kbuf, buf, read_len)) return -EFAULT; kbuf[read_len] = '\0'; if (sscanf(kbuf, "%lu %d %lu", &ino, &nid, &nr_to_scan) != 3) return -EINVAL; if (nid < 0 || nid >= nr_node_ids) return -EINVAL; if (nr_to_scan == 0) return size; if (shrinker->flags & SHRINKER_MEMCG_AWARE) { memcg = mem_cgroup_get_from_ino(ino); if (!memcg || IS_ERR(memcg)) return -ENOENT; if (!mem_cgroup_online(memcg)) { mem_cgroup_put(memcg); return -ENOENT; } } else if (ino != 0) { return -EINVAL; } ret = down_read_killable(&shrinker_rwsem); if (ret) { mem_cgroup_put(memcg); return ret; } sc.nid = nid; sc.memcg = memcg; sc.nr_to_scan = nr_to_scan; sc.nr_scanned = nr_to_scan; shrinker->scan_objects(shrinker, &sc); up_read(&shrinker_rwsem); mem_cgroup_put(memcg); return size; } static const struct file_operations shrinker_debugfs_scan_fops = { .owner = THIS_MODULE, .open = shrinker_debugfs_scan_open, .write = shrinker_debugfs_scan_write, }; int shrinker_debugfs_add(struct shrinker *shrinker) { struct dentry *entry; char buf[128]; int id; lockdep_assert_held(&shrinker_rwsem); /* debugfs isn't initialized yet, add debugfs entries later. */ if (!shrinker_debugfs_root) return 0; id = ida_alloc(&shrinker_debugfs_ida, GFP_KERNEL); if (id < 0) return id; shrinker->debugfs_id = id; snprintf(buf, sizeof(buf), "%s-%d", shrinker->name, id); /* create debugfs entry */ entry = debugfs_create_dir(buf, shrinker_debugfs_root); if (IS_ERR(entry)) { ida_free(&shrinker_debugfs_ida, id); return PTR_ERR(entry); } shrinker->debugfs_entry = entry; debugfs_create_file("count", 0440, entry, shrinker, &shrinker_debugfs_count_fops); debugfs_create_file("scan", 0220, entry, shrinker, &shrinker_debugfs_scan_fops); return 0; } int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) { struct dentry *entry; char buf[128]; const char *new, *old; va_list ap; int ret = 0; va_start(ap, fmt); new = kvasprintf_const(GFP_KERNEL, fmt, ap); va_end(ap); if (!new) return -ENOMEM; down_write(&shrinker_rwsem); old = shrinker->name; shrinker->name = new; if (shrinker->debugfs_entry) { snprintf(buf, sizeof(buf), "%s-%d", shrinker->name, shrinker->debugfs_id); entry = debugfs_rename(shrinker_debugfs_root, shrinker->debugfs_entry, shrinker_debugfs_root, buf); if (IS_ERR(entry)) ret = PTR_ERR(entry); else shrinker->debugfs_entry = entry; } up_write(&shrinker_rwsem); kfree_const(old); return ret; } EXPORT_SYMBOL(shrinker_debugfs_rename); struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, int *debugfs_id) { struct dentry *entry = shrinker->debugfs_entry; lockdep_assert_held(&shrinker_rwsem); kfree_const(shrinker->name); shrinker->name = NULL; *debugfs_id = entry ? shrinker->debugfs_id : -1; shrinker->debugfs_entry = NULL; return entry; } void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id) { debugfs_remove_recursive(debugfs_entry); ida_free(&shrinker_debugfs_ida, debugfs_id); } static int __init shrinker_debugfs_init(void) { struct shrinker *shrinker; struct dentry *dentry; int ret = 0; dentry = debugfs_create_dir("shrinker", NULL); if (IS_ERR(dentry)) return PTR_ERR(dentry); shrinker_debugfs_root = dentry; /* Create debugfs entries for shrinkers registered at boot */ down_write(&shrinker_rwsem); list_for_each_entry(shrinker, &shrinker_list, list) if (!shrinker->debugfs_entry) { ret = shrinker_debugfs_add(shrinker); if (ret) break; } up_write(&shrinker_rwsem); return ret; } late_initcall(shrinker_debugfs_init);
linux-master
mm/shrinker_debug.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu-km.c - kernel memory based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <[email protected]> * * Chunks are allocated as a contiguous kernel memory using gfp * allocation. This is to be used on nommu architectures. * * To use percpu-km, * * - define CONFIG_NEED_PER_CPU_KM from the arch Kconfig. * * - CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK must not be defined. It's * not compatible with PER_CPU_KM. EMBED_FIRST_CHUNK should work * fine. * * - NUMA is not supported. When setting up the first chunk, * @cpu_distance_fn should be NULL or report all CPUs to be nearer * than or at LOCAL_DISTANCE. * * - It's best if the chunk size is power of two multiple of * PAGE_SIZE. Because each chunk is allocated as a contiguous * kernel memory block using alloc_pages(), memory will be wasted if * chunk size is not aligned. percpu-km code will whine about it. */ #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #error "contiguous percpu allocation is incompatible with paged first chunk" #endif #include <linux/log2.h> static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { /* nothing */ } static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) { return 0; } static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { /* nada */ } static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; struct pcpu_chunk *chunk; struct page *pages; unsigned long flags; int i; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; pages = alloc_pages(gfp, order_base_2(nr_pages)); if (!pages) { pcpu_free_chunk(chunk); return NULL; } for (i = 0; i < nr_pages; i++) pcpu_set_page_chunk(nth_page(pages, i), chunk); chunk->data = pages; chunk->base_addr = page_address(pages); spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_populated(chunk, 0, nr_pages); spin_unlock_irqrestore(&pcpu_lock, flags); pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT; if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) __free_pages(chunk->data, order_base_2(nr_pages)); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return virt_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { size_t nr_pages, alloc_pages; /* all units must be in a single group */ if (ai->nr_groups != 1) { pr_crit("can't handle more than one group\n"); return -EINVAL; } nr_pages = (ai->groups[0].nr_units * ai->unit_size) >> PAGE_SHIFT; alloc_pages = roundup_pow_of_two(nr_pages); if (alloc_pages > nr_pages) pr_warn("wasting %zu pages per chunk\n", alloc_pages - nr_pages); return 0; } static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) { return false; }
linux-master
mm/percpu-km.c
// SPDX-License-Identifier: GPL-2.0 /* * Slab allocator functions that are independent of the allocator strategy * * (C) 2012 Christoph Lameter <[email protected]> */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/poison.h> #include <linux/interrupt.h> #include <linux/memory.h> #include <linux/cache.h> #include <linux/compiler.h> #include <linux/kfence.h> #include <linux/module.h> #include <linux/cpu.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/dma-mapping.h> #include <linux/swiotlb.h> #include <linux/proc_fs.h> #include <linux/debugfs.h> #include <linux/kasan.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <linux/memcontrol.h> #include <linux/stackdepot.h> #include "internal.h" #include "slab.h" #define CREATE_TRACE_POINTS #include <trace/events/kmem.h> enum slab_state slab_state; LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; static LIST_HEAD(slab_caches_to_rcu_destroy); static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); static DECLARE_WORK(slab_caches_to_rcu_destroy_work, slab_caches_to_rcu_destroy_workfn); /* * Set of flags that will prevent slab merging */ #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB | SLAB_NO_MERGE | kasan_never_merge()) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_ACCOUNT) /* * Merge control. If this is set then no merging of slab caches will occur. */ static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); static int __init setup_slab_nomerge(char *str) { slab_nomerge = true; return 1; } static int __init setup_slab_merge(char *str) { slab_nomerge = false; return 1; } #ifdef CONFIG_SLUB __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); __setup_param("slub_merge", slub_merge, setup_slab_merge, 0); #endif __setup("slab_nomerge", setup_slab_nomerge); __setup("slab_merge", setup_slab_merge); /* * Determine the size of a slab object */ unsigned int kmem_cache_size(struct kmem_cache *s) { return s->object_size; } EXPORT_SYMBOL(kmem_cache_size); #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, unsigned int size) { if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { pr_err("kmem_cache_create(%s) integrity check failed\n", name); return -EINVAL; } WARN_ON(strchr(name, ' ')); /* It confuses parsers */ return 0; } #else static inline int kmem_cache_sanity_check(const char *name, unsigned int size) { return 0; } #endif /* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. */ static unsigned int calculate_alignment(slab_flags_t flags, unsigned int align, unsigned int size) { /* * If the user wants hardware cache aligned objects then follow that * suggestion if the object is sufficiently large. * * The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. */ if (flags & SLAB_HWCACHE_ALIGN) { unsigned int ralign; ralign = cache_line_size(); while (size <= ralign / 2) ralign /= 2; align = max(align, ralign); } align = max(align, arch_slab_minalign()); return ALIGN(align, sizeof(void *)); } /* * Find a mergeable slab cache */ int slab_unmergeable(struct kmem_cache *s) { if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) return 1; if (s->ctor) return 1; #ifdef CONFIG_HARDENED_USERCOPY if (s->usersize) return 1; #endif /* * We may have set a slab to be unmergeable during bootstrap. */ if (s->refcount < 0) return 1; return 0; } struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, slab_flags_t flags, const char *name, void (*ctor)(void *)) { struct kmem_cache *s; if (slab_nomerge) return NULL; if (ctor) return NULL; size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); flags = kmem_cache_flags(size, flags, name); if (flags & SLAB_NEVER_MERGE) return NULL; list_for_each_entry_reverse(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; if (size > s->size) continue; if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) continue; /* * Check if alignment is compatible. * Courtesy of Adrian Drzewiecki */ if ((s->size & ~(align - 1)) != s->size) continue; if (s->size - size >= sizeof(void *)) continue; if (IS_ENABLED(CONFIG_SLAB) && align && (align > s->align || s->align % align)) continue; return s; } return NULL; } static struct kmem_cache *create_cache(const char *name, unsigned int object_size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *), struct kmem_cache *root_cache) { struct kmem_cache *s; int err; if (WARN_ON(useroffset + usersize > object_size)) useroffset = usersize = 0; err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) goto out; s->name = name; s->size = s->object_size = object_size; s->align = align; s->ctor = ctor; #ifdef CONFIG_HARDENED_USERCOPY s->useroffset = useroffset; s->usersize = usersize; #endif err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; s->refcount = 1; list_add(&s->list, &slab_caches); return s; out_free_cache: kmem_cache_free(kmem_cache, s); out: return ERR_PTR(err); } /** * kmem_cache_create_usercopy - Create a cache with a region suitable * for copying to userspace * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @useroffset: Usercopy region offset * @usersize: Usercopy region size * @ctor: A constructor for the objects. * * Cannot be called within a interrupt, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. * * Return: a pointer to the cache on success, NULL on failure. */ struct kmem_cache * kmem_cache_create_usercopy(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)) { struct kmem_cache *s = NULL; const char *cache_name; int err; #ifdef CONFIG_SLUB_DEBUG /* * If no slub_debug was enabled globally, the static key is not yet * enabled by setup_slub_debug(). Enable it if the cache is being * created with any of the debugging flags passed explicitly. * It's also possible that this is the first cache created with * SLAB_STORE_USER and we should init stack_depot for it. */ if (flags & SLAB_DEBUG_FLAGS) static_branch_enable(&slub_debug_enabled); if (flags & SLAB_STORE_USER) stack_depot_init(); #endif mutex_lock(&slab_mutex); err = kmem_cache_sanity_check(name, size); if (err) { goto out_unlock; } /* Refuse requests with allocator specific flags */ if (flags & ~SLAB_FLAGS_PERMITTED) { err = -EINVAL; goto out_unlock; } /* * Some allocators will constraint the set of valid flags to a subset * of all flags. We expect them to define CACHE_CREATE_MASK in this * case, and we'll just provide them with a sanitized version of the * passed flags. */ flags &= CACHE_CREATE_MASK; /* Fail closed on bad usersize of useroffset values. */ if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || WARN_ON(!usersize && useroffset) || WARN_ON(size < usersize || size - usersize < useroffset)) usersize = useroffset = 0; if (!usersize) s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_unlock; cache_name = kstrdup_const(name, GFP_KERNEL); if (!cache_name) { err = -ENOMEM; goto out_unlock; } s = create_cache(cache_name, size, calculate_alignment(flags, align, size), flags, useroffset, usersize, ctor, NULL); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); } out_unlock: mutex_unlock(&slab_mutex); if (err) { if (flags & SLAB_PANIC) panic("%s: Failed to create slab '%s'. Error %d\n", __func__, name, err); else { pr_warn("%s(%s) failed with error %d\n", __func__, name, err); dump_stack(); } return NULL; } return s; } EXPORT_SYMBOL(kmem_cache_create_usercopy); /** * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. * * Cannot be called within a interrupt, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. * * Return: a pointer to the cache on success, NULL on failure. */ struct kmem_cache * kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, ctor); } EXPORT_SYMBOL(kmem_cache_create); #ifdef SLAB_SUPPORTS_SYSFS /* * For a given kmem_cache, kmem_cache_destroy() should only be called * once or there will be a use-after-free problem. The actual deletion * and release of the kobject does not need slab_mutex or cpu_hotplug_lock * protection. So they are now done without holding those locks. * * Note that there will be a slight delay in the deletion of sysfs files * if kmem_cache_release() is called indrectly from a work function. */ static void kmem_cache_release(struct kmem_cache *s) { sysfs_slab_unlink(s); sysfs_slab_release(s); } #else static void kmem_cache_release(struct kmem_cache *s) { slab_kmem_cache_release(s); } #endif static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) { LIST_HEAD(to_destroy); struct kmem_cache *s, *s2; /* * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the * @slab_caches_to_rcu_destroy list. The slab pages are freed * through RCU and the associated kmem_cache are dereferenced * while freeing the pages, so the kmem_caches should be freed only * after the pending RCU operations are finished. As rcu_barrier() * is a pretty slow operation, we batch all pending destructions * asynchronously. */ mutex_lock(&slab_mutex); list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); mutex_unlock(&slab_mutex); if (list_empty(&to_destroy)) return; rcu_barrier(); list_for_each_entry_safe(s, s2, &to_destroy, list) { debugfs_slab_release(s); kfence_shutdown_cache(s); kmem_cache_release(s); } } static int shutdown_cache(struct kmem_cache *s) { /* free asan quarantined objects */ kasan_cache_shutdown(s); if (__kmem_cache_shutdown(s) != 0) return -EBUSY; list_del(&s->list); if (s->flags & SLAB_TYPESAFE_BY_RCU) { list_add_tail(&s->list, &slab_caches_to_rcu_destroy); schedule_work(&slab_caches_to_rcu_destroy_work); } else { kfence_shutdown_cache(s); debugfs_slab_release(s); } return 0; } void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); kfree_const(s->name); kmem_cache_free(kmem_cache, s); } void kmem_cache_destroy(struct kmem_cache *s) { int refcnt; bool rcu_set; if (unlikely(!s) || !kasan_check_byte(s)) return; cpus_read_lock(); mutex_lock(&slab_mutex); rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; refcnt = --s->refcount; if (refcnt) goto out_unlock; WARN(shutdown_cache(s), "%s %s: Slab cache still has objects when called from %pS", __func__, s->name, (void *)_RET_IP_); out_unlock: mutex_unlock(&slab_mutex); cpus_read_unlock(); if (!refcnt && !rcu_set) kmem_cache_release(s); } EXPORT_SYMBOL(kmem_cache_destroy); /** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. * * Return: %0 if all slabs were released, non-zero otherwise */ int kmem_cache_shrink(struct kmem_cache *cachep) { kasan_cache_shrink(cachep); return __kmem_cache_shrink(cachep); } EXPORT_SYMBOL(kmem_cache_shrink); bool slab_is_available(void) { return slab_state >= UP; } #ifdef CONFIG_PRINTK /** * kmem_valid_obj - does the pointer reference a valid slab object? * @object: pointer to query. * * Return: %true if the pointer is to a not-yet-freed object from * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer * is to an already-freed object, and %false otherwise. */ bool kmem_valid_obj(void *object) { struct folio *folio; /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) return false; folio = virt_to_folio(object); return folio_test_slab(folio); } EXPORT_SYMBOL_GPL(kmem_valid_obj); static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { if (__kfence_obj_info(kpp, object, slab)) return; __kmem_obj_info(kpp, object, slab); } /** * kmem_dump_obj - Print available slab provenance information * @object: slab object for which to find provenance information. * * This function uses pr_cont(), so that the caller is expected to have * printed out whatever preamble is appropriate. The provenance information * depends on the type of object and on how much debugging is enabled. * For a slab-cache object, the fact that it is a slab object is printed, * and, if available, the slab name, return address, and stack trace from * the allocation and last free path of that object. * * This function will splat if passed a pointer to a non-slab object. * If you are not sure what type of object you have, you should instead * use mem_dump_obj(). */ void kmem_dump_obj(void *object) { char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; int i; struct slab *slab; unsigned long ptroffset; struct kmem_obj_info kp = { }; if (WARN_ON_ONCE(!virt_addr_valid(object))) return; slab = virt_to_slab(object); if (WARN_ON_ONCE(!slab)) { pr_cont(" non-slab memory.\n"); return; } kmem_obj_info(&kp, object, slab); if (kp.kp_slab_cache) pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); else pr_cont(" slab%s", cp); if (is_kfence_address(object)) pr_cont(" (kfence)"); if (kp.kp_objp) pr_cont(" start %px", kp.kp_objp); if (kp.kp_data_offset) pr_cont(" data offset %lu", kp.kp_data_offset); if (kp.kp_objp) { ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; pr_cont(" pointer offset %lu", ptroffset); } if (kp.kp_slab_cache && kp.kp_slab_cache->object_size) pr_cont(" size %u", kp.kp_slab_cache->object_size); if (kp.kp_ret) pr_cont(" allocated at %pS\n", kp.kp_ret); else pr_cont("\n"); for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) { if (!kp.kp_stack[i]) break; pr_info(" %pS\n", kp.kp_stack[i]); } if (kp.kp_free_stack[0]) pr_cont(" Free path:\n"); for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) { if (!kp.kp_free_stack[i]) break; pr_info(" %pS\n", kp.kp_free_stack[i]); } } EXPORT_SYMBOL_GPL(kmem_dump_obj); #endif /* Create a cache during boot when no slab services are available yet */ void __init create_boot_cache(struct kmem_cache *s, const char *name, unsigned int size, slab_flags_t flags, unsigned int useroffset, unsigned int usersize) { int err; unsigned int align = ARCH_KMALLOC_MINALIGN; s->name = name; s->size = s->object_size = size; /* * For power of two sizes, guarantee natural alignment for kmalloc * caches, regardless of SL*B debugging options. */ if (is_power_of_2(size)) align = max(align, size); s->align = calculate_alignment(flags, align, size); #ifdef CONFIG_HARDENED_USERCOPY s->useroffset = useroffset; s->usersize = usersize; #endif err = __kmem_cache_create(s, flags); if (err) panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", name, size, err); s->refcount = -1; /* Exempt from merging for now */ } static struct kmem_cache *__init create_kmalloc_cache(const char *name, unsigned int size, slab_flags_t flags) { struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); if (!s) panic("Out of memory when creating slab %s\n", name); create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); list_add(&s->list, &slab_caches); s->refcount = 1; return s; } struct kmem_cache * kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init = { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ }; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_RANDOM_KMALLOC_CACHES unsigned long random_kmalloc_seed __ro_after_init; EXPORT_SYMBOL(random_kmalloc_seed); #endif /* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power * of two cache sizes there. The size of larger slabs can be determined using * fls. */ static u8 size_index[24] __ro_after_init = { 3, /* 8 */ 4, /* 16 */ 5, /* 24 */ 5, /* 32 */ 6, /* 40 */ 6, /* 48 */ 6, /* 56 */ 6, /* 64 */ 1, /* 72 */ 1, /* 80 */ 1, /* 88 */ 1, /* 96 */ 7, /* 104 */ 7, /* 112 */ 7, /* 120 */ 7, /* 128 */ 2, /* 136 */ 2, /* 144 */ 2, /* 152 */ 2, /* 160 */ 2, /* 168 */ 2, /* 176 */ 2, /* 184 */ 2 /* 192 */ }; static inline unsigned int size_index_elem(unsigned int bytes) { return (bytes - 1) / 8; } /* * Find the kmem_cache structure that serves a given size of * allocation */ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller) { unsigned int index; if (size <= 192) { if (!size) return ZERO_SIZE_PTR; index = size_index[size_index_elem(size)]; } else { if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE)) return NULL; index = fls(size - 1); } return kmalloc_caches[kmalloc_type(flags, caller)][index]; } size_t kmalloc_size_roundup(size_t size) { struct kmem_cache *c; /* Short-circuit the 0 size case. */ if (unlikely(size == 0)) return 0; /* Short-circuit saturated "too-large" case. */ if (unlikely(size == SIZE_MAX)) return SIZE_MAX; /* Above the smaller buckets, size is a multiple of page size. */ if (size > KMALLOC_MAX_CACHE_SIZE) return PAGE_SIZE << get_order(size); /* * The flags don't matter since size_index is common to all. * Neither does the caller for just getting ->object_size. */ c = kmalloc_slab(size, GFP_KERNEL, 0); return c ? c->object_size : 0; } EXPORT_SYMBOL(kmalloc_size_roundup); #ifdef CONFIG_ZONE_DMA #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, #else #define KMALLOC_DMA_NAME(sz) #endif #ifdef CONFIG_MEMCG_KMEM #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz, #else #define KMALLOC_CGROUP_NAME(sz) #endif #ifndef CONFIG_SLUB_TINY #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, #else #define KMALLOC_RCL_NAME(sz) #endif #ifdef CONFIG_RANDOM_KMALLOC_CACHES #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz) #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz, #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz, #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz, #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz, #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz, #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz, #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz, #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz, #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz, #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz, #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz, #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz, #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz, #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz, #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz, #else // CONFIG_RANDOM_KMALLOC_CACHES #define KMALLOC_RANDOM_NAME(N, sz) #endif #define INIT_KMALLOC_INFO(__size, __short_size) \ { \ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ KMALLOC_RCL_NAME(__short_size) \ KMALLOC_CGROUP_NAME(__short_size) \ KMALLOC_DMA_NAME(__short_size) \ KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \ .size = __size, \ } /* * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time. * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is * kmalloc-2M. */ const struct kmalloc_info_struct kmalloc_info[] __initconst = { INIT_KMALLOC_INFO(0, 0), INIT_KMALLOC_INFO(96, 96), INIT_KMALLOC_INFO(192, 192), INIT_KMALLOC_INFO(8, 8), INIT_KMALLOC_INFO(16, 16), INIT_KMALLOC_INFO(32, 32), INIT_KMALLOC_INFO(64, 64), INIT_KMALLOC_INFO(128, 128), INIT_KMALLOC_INFO(256, 256), INIT_KMALLOC_INFO(512, 512), INIT_KMALLOC_INFO(1024, 1k), INIT_KMALLOC_INFO(2048, 2k), INIT_KMALLOC_INFO(4096, 4k), INIT_KMALLOC_INFO(8192, 8k), INIT_KMALLOC_INFO(16384, 16k), INIT_KMALLOC_INFO(32768, 32k), INIT_KMALLOC_INFO(65536, 64k), INIT_KMALLOC_INFO(131072, 128k), INIT_KMALLOC_INFO(262144, 256k), INIT_KMALLOC_INFO(524288, 512k), INIT_KMALLOC_INFO(1048576, 1M), INIT_KMALLOC_INFO(2097152, 2M) }; /* * Patch up the size_index table if we have strange large alignment * requirements for the kmalloc array. This is only the case for * MIPS it seems. The standard arches will not generate any code here. * * Largest permitted alignment is 256 bytes due to the way we * handle the index determination for the smaller caches. * * Make sure that nothing crazy happens if someone starts tinkering * around with ARCH_KMALLOC_MINALIGN */ void __init setup_kmalloc_cache_index_table(void) { unsigned int i; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || !is_power_of_2(KMALLOC_MIN_SIZE)); for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { unsigned int elem = size_index_elem(i); if (elem >= ARRAY_SIZE(size_index)) break; size_index[elem] = KMALLOC_SHIFT_LOW; } if (KMALLOC_MIN_SIZE >= 64) { /* * The 96 byte sized cache is not used if the alignment * is 64 byte. */ for (i = 64 + 8; i <= 96; i += 8) size_index[size_index_elem(i)] = 7; } if (KMALLOC_MIN_SIZE >= 128) { /* * The 192 byte sized cache is not used if the alignment * is 128 byte. Redirect kmalloc to use the 256 byte cache * instead. */ for (i = 128 + 8; i <= 192; i += 8) size_index[size_index_elem(i)] = 8; } } static unsigned int __kmalloc_minalign(void) { if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && is_swiotlb_allocated()) return ARCH_KMALLOC_MINALIGN; return dma_get_cache_alignment(); } void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { unsigned int minalign = __kmalloc_minalign(); unsigned int aligned_size = kmalloc_info[idx].size; int aligned_idx = idx; if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { flags |= SLAB_RECLAIM_ACCOUNT; } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { if (mem_cgroup_kmem_disabled()) { kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx]; return; } flags |= SLAB_ACCOUNT; } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) { flags |= SLAB_CACHE_DMA; } #ifdef CONFIG_RANDOM_KMALLOC_CACHES if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END) flags |= SLAB_NO_MERGE; #endif /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for * KMALLOC_NORMAL caches. */ if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL)) flags |= SLAB_NO_MERGE; if (minalign > ARCH_KMALLOC_MINALIGN) { aligned_size = ALIGN(aligned_size, minalign); aligned_idx = __kmalloc_index(aligned_size, false); } if (!kmalloc_caches[type][aligned_idx]) kmalloc_caches[type][aligned_idx] = create_kmalloc_cache( kmalloc_info[aligned_idx].name[type], aligned_size, flags); if (idx != aligned_idx) kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; } /* * Create the kmalloc array. Some of the regular kmalloc arrays * may already have been created because they were needed to * enable allocations for slab creation. */ void __init create_kmalloc_caches(slab_flags_t flags) { int i; enum kmalloc_cache_type type; /* * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined */ for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) { for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { if (!kmalloc_caches[type][i]) new_kmalloc_cache(i, type, flags); /* * Caches that are not of the two-to-the-power-of size. * These have to be created immediately after the * earlier power of two caches */ if (KMALLOC_MIN_SIZE <= 32 && i == 6 && !kmalloc_caches[type][1]) new_kmalloc_cache(1, type, flags); if (KMALLOC_MIN_SIZE <= 64 && i == 7 && !kmalloc_caches[type][2]) new_kmalloc_cache(2, type, flags); } } #ifdef CONFIG_RANDOM_KMALLOC_CACHES random_kmalloc_seed = get_random_u64(); #endif /* Kmalloc array is now usable */ slab_state = UP; } void free_large_kmalloc(struct folio *folio, void *object) { unsigned int order = folio_order(folio); if (WARN_ON_ONCE(order == 0)) pr_warn_once("object pointer: 0x%p\n", object); kmemleak_free(object); kasan_kfree_large(object); kmsan_kfree_large(object); mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); __free_pages(folio_page(folio, 0), order); } static void *__kmalloc_large_node(size_t size, gfp_t flags, int node); static __always_inline void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) { struct kmem_cache *s; void *ret; if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ret = __kmalloc_large_node(size, flags, node); trace_kmalloc(caller, ret, size, PAGE_SIZE << get_order(size), flags, node); return ret; } s = kmalloc_slab(size, flags, caller); if (unlikely(ZERO_OR_NULL_PTR(s))) return s; ret = __kmem_cache_alloc_node(s, flags, node, size, caller); ret = kasan_kmalloc(s, ret, size, flags); trace_kmalloc(caller, ret, size, s->size, flags, node); return ret; } void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, _RET_IP_); } EXPORT_SYMBOL(__kmalloc_node); void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); } EXPORT_SYMBOL(__kmalloc); void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, unsigned long caller) { return __do_kmalloc_node(size, flags, node, caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); /** * kfree - free previously allocated memory * @object: pointer returned by kmalloc() or kmem_cache_alloc() * * If @object is NULL, no operation is performed. */ void kfree(const void *object) { struct folio *folio; struct slab *slab; struct kmem_cache *s; trace_kfree(_RET_IP_, object); if (unlikely(ZERO_OR_NULL_PTR(object))) return; folio = virt_to_folio(object); if (unlikely(!folio_test_slab(folio))) { free_large_kmalloc(folio, (void *)object); return; } slab = folio_slab(folio); s = slab->slab_cache; __kmem_cache_free(s, (void *)object, _RET_IP_); } EXPORT_SYMBOL(kfree); /** * __ksize -- Report full size of underlying allocation * @object: pointer to the object * * This should only be used internally to query the true size of allocations. * It is not meant to be a way to discover the usable size of an allocation * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, * and/or FORTIFY_SOURCE. * * Return: size of the actual memory used by @object in bytes */ size_t __ksize(const void *object) { struct folio *folio; if (unlikely(object == ZERO_SIZE_PTR)) return 0; folio = virt_to_folio(object); if (unlikely(!folio_test_slab(folio))) { if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) return 0; if (WARN_ON(object != folio_address(folio))) return 0; return folio_size(folio); } #ifdef CONFIG_SLUB_DEBUG skip_orig_size_check(folio_slab(folio)->slab_cache, object); #endif return slab_ksize(folio_slab(folio)->slab_cache); } void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE, size, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } EXPORT_SYMBOL(kmalloc_trace); void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node); ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } EXPORT_SYMBOL(kmalloc_node_trace); gfp_t kmalloc_fix_flags(gfp_t flags) { gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; flags &= ~GFP_SLAB_BUG_MASK; pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", invalid_mask, &invalid_mask, flags, &flags); dump_stack(); return flags; } /* * To avoid unnecessary overhead, we pass through large allocation requests * directly to the page allocator. We use __GFP_COMP, because we will need to * know the allocation order to free the pages properly in kfree. */ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) { struct page *page; void *ptr = NULL; unsigned int order = get_order(size); if (unlikely(flags & GFP_SLAB_BUG_MASK)) flags = kmalloc_fix_flags(flags); flags |= __GFP_COMP; page = alloc_pages_node(node, flags, order); if (page) { ptr = page_address(page); mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, PAGE_SIZE << order); } ptr = kasan_kmalloc_large(ptr, size, flags); /* As ptr might get tagged, call kmemleak hook after KASAN. */ kmemleak_alloc(ptr, size, 1, flags); kmsan_kmalloc_large(ptr, size, flags); return ptr; } void *kmalloc_large(size_t size, gfp_t flags) { void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), flags, NUMA_NO_NODE); return ret; } EXPORT_SYMBOL(kmalloc_large); void *kmalloc_large_node(size_t size, gfp_t flags, int node) { void *ret = __kmalloc_large_node(size, flags, node); trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), flags, node); return ret; } EXPORT_SYMBOL(kmalloc_large_node); #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Randomize a generic freelist */ static void freelist_randomize(unsigned int *list, unsigned int count) { unsigned int rand; unsigned int i; for (i = 0; i < count; i++) list[i] = i; /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { rand = get_random_u32_below(i + 1); swap(list[i], list[rand]); } } /* Create a random sequence per cache */ int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { if (count < 2 || cachep->random_seq) return 0; cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); if (!cachep->random_seq) return -ENOMEM; freelist_randomize(cachep->random_seq, count); return 0; } /* Destroy the per-cache random freelist sequence */ void cache_random_seq_destroy(struct kmem_cache *cachep) { kfree(cachep->random_seq); cachep->random_seq = NULL; } #endif /* CONFIG_SLAB_FREELIST_RANDOM */ #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) #ifdef CONFIG_SLAB #define SLABINFO_RIGHTS (0600) #else #define SLABINFO_RIGHTS (0400) #endif static void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it * without _too_ many complaints. */ #ifdef CONFIG_DEBUG_SLAB seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); #else seq_puts(m, "slabinfo - version: 2.1\n"); #endif seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); #ifdef CONFIG_DEBUG_SLAB seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); #endif seq_putc(m, '\n'); } static void *slab_start(struct seq_file *m, loff_t *pos) { mutex_lock(&slab_mutex); return seq_list_start(&slab_caches, *pos); } static void *slab_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &slab_caches, pos); } static void slab_stop(struct seq_file *m, void *p) { mutex_unlock(&slab_mutex); } static void cache_show(struct kmem_cache *s, struct seq_file *m) { struct slabinfo sinfo; memset(&sinfo, 0, sizeof(sinfo)); get_slabinfo(s, &sinfo); seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, sinfo.active_objs, sinfo.num_objs, s->size, sinfo.objects_per_slab, (1 << sinfo.cache_order)); seq_printf(m, " : tunables %4u %4u %4u", sinfo.limit, sinfo.batchcount, sinfo.shared); seq_printf(m, " : slabdata %6lu %6lu %6lu", sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); slabinfo_show_stats(m, s); seq_putc(m, '\n'); } static int slab_show(struct seq_file *m, void *p) { struct kmem_cache *s = list_entry(p, struct kmem_cache, list); if (p == slab_caches.next) print_slabinfo_header(m); cache_show(s, m); return 0; } void dump_unreclaimable_slab(void) { struct kmem_cache *s; struct slabinfo sinfo; /* * Here acquiring slab_mutex is risky since we don't prefer to get * sleep in oom path. But, without mutex hold, it may introduce a * risk of crash. * Use mutex_trylock to protect the list traverse, dump nothing * without acquiring the mutex. */ if (!mutex_trylock(&slab_mutex)) { pr_warn("excessive unreclaimable slab but cannot dump stats\n"); return; } pr_info("Unreclaimable slab info:\n"); pr_info("Name Used Total\n"); list_for_each_entry(s, &slab_caches, list) { if (s->flags & SLAB_RECLAIM_ACCOUNT) continue; get_slabinfo(s, &sinfo); if (sinfo.num_objs > 0) pr_info("%-17s %10luKB %10luKB\n", s->name, (sinfo.active_objs * s->size) / 1024, (sinfo.num_objs * s->size) / 1024); } mutex_unlock(&slab_mutex); } /* * slabinfo_op - iterator that generates /proc/slabinfo * * Output layout: * cache-name * num-active-objs * total-objs * object size * num-active-slabs * total-slabs * num-pages-per-slab * + further values on SMP and with statistics enabled */ static const struct seq_operations slabinfo_op = { .start = slab_start, .next = slab_next, .stop = slab_stop, .show = slab_show, }; static int slabinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &slabinfo_op); } static const struct proc_ops slabinfo_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = slabinfo_open, .proc_read = seq_read, .proc_write = slabinfo_write, .proc_lseek = seq_lseek, .proc_release = seq_release, }; static int __init slab_proc_init(void) { proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops); return 0; } module_init(slab_proc_init); #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */ static __always_inline __realloc_size(2) void * __do_krealloc(const void *p, size_t new_size, gfp_t flags) { void *ret; size_t ks; /* Check for double-free before calling ksize. */ if (likely(!ZERO_OR_NULL_PTR(p))) { if (!kasan_check_byte(p)) return NULL; ks = ksize(p); } else ks = 0; /* If the object still fits, repoison it precisely. */ if (ks >= new_size) { p = kasan_krealloc((void *)p, new_size, flags); return (void *)p; } ret = kmalloc_track_caller(new_size, flags); if (ret && p) { /* Disable KASAN checks as the object's redzone is accessed. */ kasan_disable_current(); memcpy(ret, kasan_reset_tag(p), ks); kasan_enable_current(); } return ret; } /** * krealloc - reallocate memory. The contents will remain unchanged. * @p: object to reallocate memory for. * @new_size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * The contents of the object pointed to are preserved up to the * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored). * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size * is 0 and @p is not a %NULL pointer, the object pointed to is freed. * * Return: pointer to the allocated memory or %NULL in case of error */ void *krealloc(const void *p, size_t new_size, gfp_t flags) { void *ret; if (unlikely(!new_size)) { kfree(p); return ZERO_SIZE_PTR; } ret = __do_krealloc(p, new_size, flags); if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) kfree(p); return ret; } EXPORT_SYMBOL(krealloc); /** * kfree_sensitive - Clear sensitive information in memory before freeing * @p: object to free memory of * * The memory of the object @p points to is zeroed before freed. * If @p is %NULL, kfree_sensitive() does nothing. * * Note: this function zeroes the whole allocated buffer which can be a good * deal bigger than the requested buffer size passed to kmalloc(). So be * careful when using this function in performance sensitive code. */ void kfree_sensitive(const void *p) { size_t ks; void *mem = (void *)p; ks = ksize(mem); if (ks) { kasan_unpoison_range(mem, ks); memzero_explicit(mem, ks); } kfree(mem); } EXPORT_SYMBOL(kfree_sensitive); size_t ksize(const void *objp) { /* * We need to first check that the pointer to the object is valid. * The KASAN report printed from ksize() is more useful, then when * it's printed later when the behaviour could be undefined due to * a potential use-after-free or double-free. * * We use kasan_check_byte(), which is supported for the hardware * tag-based KASAN mode, unlike kasan_check_read/write(). * * If the pointed to memory is invalid, we return 0 to avoid users of * ksize() writing to and potentially corrupting the memory region. * * We want to perform the check before __ksize(), to avoid potentially * crashing in __ksize() due to accessing invalid metadata. */ if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) return 0; return kfence_ksize(objp) ?: __ksize(objp); } EXPORT_SYMBOL(ksize); /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); EXPORT_TRACEPOINT_SYMBOL(kfree); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); int should_failslab(struct kmem_cache *s, gfp_t gfpflags) { if (__should_failslab(s, gfpflags)) return -ENOMEM; return 0; } ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
linux-master
mm/slab_common.c
// SPDX-License-Identifier: GPL-2.0 /* * mm/pgtable-generic.c * * Generic pgtable methods declared in linux/pgtable.h * * Copyright (C) 2010 Linus Torvalds */ #include <linux/pagemap.h> #include <linux/hugetlb.h> #include <linux/pgtable.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mm_inline.h> #include <asm/pgalloc.h> #include <asm/tlb.h> /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but * very seldom) called out from the p?d_none_or_clear_bad macros. */ void pgd_clear_bad(pgd_t *pgd) { pgd_ERROR(*pgd); pgd_clear(pgd); } #ifndef __PAGETABLE_P4D_FOLDED void p4d_clear_bad(p4d_t *p4d) { p4d_ERROR(*p4d); p4d_clear(p4d); } #endif #ifndef __PAGETABLE_PUD_FOLDED void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); } #endif /* * Note that the pmd variant below can't be stub'ed out just as for p4d/pud * above. pmd folding is special and typically pmd_* macros refer to upper * level even when folded */ void pmd_clear_bad(pmd_t *pmd) { pmd_ERROR(*pmd); pmd_clear(pmd); } #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* * Only sets the access flags (dirty, accessed), as well as write * permission. Furthermore, we know it always gets set to a "more * permissive" setting, which allows most architectures to optimize * this. We return whether the PTE actually changed, which in turn * instructs the caller to do things like update__mmu_cache. This * used to be done in the caller, but sparc needs minor faults to * force that call on sun4c so we changed this macro slightly */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed = !pte_same(ptep_get(ptep), entry); if (changed) { set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_fix_spurious_fault(vma, address, ptep); } return changed; } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { int young; young = ptep_test_and_clear_young(vma, address, ptep); if (young) flush_tlb_page(vma, address); return young; } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm = (vma)->vm_mm; pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); if (pte_accessible(mm, pte)) flush_tlb_page(vma, address); return pte; } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { int changed = !pmd_same(*pmdp, entry); VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed) { set_pmd_at(vma->vm_mm, address, pmdp, entry); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; } #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { int young; VM_BUG_ON(address & ~HPAGE_PMD_MASK); young = pmdp_test_and_clear_young(vma, address, pmdp); if (young) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return young; } #endif #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pud_t *pudp) { pud_t pud; VM_BUG_ON(address & ~HPAGE_PUD_MASK); VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); return pud; } #endif #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(&pgtable->lru); else list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); pmd_huge_pte(mm, pmdp) = pgtable; } #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW /* no "address" argument so destroys page coloring of some arch */ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { pgtable_t pgtable; assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, struct page, lru); if (pmd_huge_pte(mm, pmdp)) list_del(&pgtable->lru); return pgtable; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return old; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { return pmdp_invalidate(vma, address, pmdp); } #endif #ifndef pmdp_collapse_flush pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { /* * pmd and hugepage pte format are same. So we could * use the same function. */ pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); /* collapse entails shooting down ptes not pmd */ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } #endif /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */ #ifndef pte_free_defer static void pte_free_now(struct rcu_head *head) { struct page *page; page = container_of(head, struct page, rcu_head); pte_free(NULL /* mm not passed and not used */, (pgtable_t)page); } void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { struct page *page; page = pgtable; call_rcu(&page->rcu_head, pte_free_now); } #endif /* pte_free_defer */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \ (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU)) /* * See the comment above ptep_get_lockless() in include/linux/pgtable.h: * the barriers in pmdp_get_lockless() cannot guarantee that the value in * pmd_high actually belongs with the value in pmd_low; but holding interrupts * off blocks the TLB flush between present updates, which guarantees that a * successful __pte_offset_map() points to a page from matched halves. */ static unsigned long pmdp_get_lockless_start(void) { unsigned long irqflags; local_irq_save(irqflags); return irqflags; } static void pmdp_get_lockless_end(unsigned long irqflags) { local_irq_restore(irqflags); } #else static unsigned long pmdp_get_lockless_start(void) { return 0; } static void pmdp_get_lockless_end(unsigned long irqflags) { } #endif pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) { unsigned long irqflags; pmd_t pmdval; rcu_read_lock(); irqflags = pmdp_get_lockless_start(); pmdval = pmdp_get_lockless(pmd); pmdp_get_lockless_end(irqflags); if (pmdvalp) *pmdvalp = pmdval; if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) goto nomap; if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) goto nomap; if (unlikely(pmd_bad(pmdval))) { pmd_clear_bad(pmd); goto nomap; } return __pte_map(&pmdval, addr); nomap: rcu_read_unlock(); return NULL; } pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) { pmd_t pmdval; pte_t *pte; pte = __pte_offset_map(pmd, addr, &pmdval); if (likely(pte)) *ptlp = pte_lockptr(mm, &pmdval); return pte; } /* * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation * __pte_offset_map_lock() below, is usually called with the pmd pointer for * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while * holding mmap_lock or vma lock for read or for write; or in truncate or rmap * context, while holding file's i_mmap_lock or anon_vma lock for read (or for * write). In a few cases, it may be used with pmd pointing to a pmd_t already * copied to or constructed on the stack. * * When successful, it returns the pte pointer for addr, with its page table * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent * modification by software, with a pointer to that spinlock in ptlp (in some * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's * struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards. * * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no * page table at *pmd: if, for example, the page table has just been removed, * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked * after acquiring the ptlock, and retried internally if it changed: so that a * page table can be safely removed or replaced by THP while holding its lock.) * * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above, * just returns the pte pointer for addr, its page table kmapped if necessary; * or NULL if there is no page table at *pmd. It does not attempt to lock the * page table, so cannot normally be used when the page table is to be updated, * or when entries read must be stable. But it does take rcu_read_lock(): so * that even when page table is racily removed, it remains a valid though empty * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s * afterwards. * * pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map(); * but when successful, it also outputs a pointer to the spinlock in ptlp - as * pte_offset_map_lock() does, but in this case without locking it. This helps * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time * act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock * pointer for the page table that it returns. In principle, the caller should * recheck *pmd once the lock is taken; in practice, no callsite needs that - * either the mmap_lock for write, or pte_same() check on contents, is enough. * * Note that free_pgtables(), used after unmapping detached vmas, or when * exiting the whole mm, does not take page table lock before freeing a page * table, and may not use RCU at all: "outsiders" like khugepaged should avoid * pte_offset_map() and co once the vma is detached from mm or mm_users is zero. */ pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) { spinlock_t *ptl; pmd_t pmdval; pte_t *pte; again: pte = __pte_offset_map(pmd, addr, &pmdval); if (unlikely(!pte)) return pte; ptl = pte_lockptr(mm, &pmdval); spin_lock(ptl); if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { *ptlp = ptl; return pte; } pte_unmap_unlock(pte, ptl); goto again; }
linux-master
mm/pgtable-generic.c
// SPDX-License-Identifier: GPL-2.0 /* * CMA DebugFS Interface * * Copyright (c) 2015 Sasha Levin <[email protected]> */ #include <linux/debugfs.h> #include <linux/cma.h> #include <linux/list.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm_types.h> #include "cma.h" struct cma_mem { struct hlist_node node; struct page *p; unsigned long n; }; static int cma_debugfs_get(void *data, u64 *val) { unsigned long *p = data; *val = *p; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n"); static int cma_used_get(void *data, u64 *val) { struct cma *cma = data; unsigned long used; spin_lock_irq(&cma->lock); /* pages counter is smaller than sizeof(int) */ used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); spin_unlock_irq(&cma->lock); *val = (u64)used << cma->order_per_bit; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n"); static int cma_maxchunk_get(void *data, u64 *val) { struct cma *cma = data; unsigned long maxchunk = 0; unsigned long start, end = 0; unsigned long bitmap_maxno = cma_bitmap_maxno(cma); spin_lock_irq(&cma->lock); for (;;) { start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); if (start >= bitmap_maxno) break; end = find_next_bit(cma->bitmap, bitmap_maxno, start); maxchunk = max(end - start, maxchunk); } spin_unlock_irq(&cma->lock); *val = (u64)maxchunk << cma->order_per_bit; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n"); static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem) { spin_lock(&cma->mem_head_lock); hlist_add_head(&mem->node, &cma->mem_head); spin_unlock(&cma->mem_head_lock); } static struct cma_mem *cma_get_entry_from_list(struct cma *cma) { struct cma_mem *mem = NULL; spin_lock(&cma->mem_head_lock); if (!hlist_empty(&cma->mem_head)) { mem = hlist_entry(cma->mem_head.first, struct cma_mem, node); hlist_del_init(&mem->node); } spin_unlock(&cma->mem_head_lock); return mem; } static int cma_free_mem(struct cma *cma, int count) { struct cma_mem *mem = NULL; while (count) { mem = cma_get_entry_from_list(cma); if (mem == NULL) return 0; if (mem->n <= count) { cma_release(cma, mem->p, mem->n); count -= mem->n; kfree(mem); } else if (cma->order_per_bit == 0) { cma_release(cma, mem->p, count); mem->p += count; mem->n -= count; count = 0; cma_add_to_cma_mem_list(cma, mem); } else { pr_debug("cma: cannot release partial block when order_per_bit != 0\n"); cma_add_to_cma_mem_list(cma, mem); break; } } return 0; } static int cma_free_write(void *data, u64 val) { int pages = val; struct cma *cma = data; return cma_free_mem(cma, pages); } DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n"); static int cma_alloc_mem(struct cma *cma, int count) { struct cma_mem *mem; struct page *p; mem = kzalloc(sizeof(*mem), GFP_KERNEL); if (!mem) return -ENOMEM; p = cma_alloc(cma, count, 0, false); if (!p) { kfree(mem); return -ENOMEM; } mem->p = p; mem->n = count; cma_add_to_cma_mem_list(cma, mem); return 0; } static int cma_alloc_write(void *data, u64 val) { int pages = val; struct cma *cma = data; return cma_alloc_mem(cma, pages); } DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n"); static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { struct dentry *tmp; tmp = debugfs_create_dir(cma->name, root_dentry); debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); debugfs_create_file("base_pfn", 0444, tmp, &cma->base_pfn, &cma_debugfs_fops); debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); debugfs_create_file("order_per_bit", 0444, tmp, &cma->order_per_bit, &cma_debugfs_fops); debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); cma->dfs_bitmap.array = (u32 *)cma->bitmap; cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32)); debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); } static int __init cma_debugfs_init(void) { struct dentry *cma_debugfs_root; int i; cma_debugfs_root = debugfs_create_dir("cma", NULL); for (i = 0; i < cma_area_count; i++) cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root); return 0; } late_initcall(cma_debugfs_init);
linux-master
mm/cma_debug.c
// SPDX-License-Identifier: GPL-2.0 /* * Memory Migration functionality - linux/mm/migrate.c * * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter * * Page migration was first developed in the context of the memory hotplug * project. The main authors of the migration code are: * * IWAMOTO Toshihiro <[email protected]> * Hirokazu Takahashi <[email protected]> * Dave Hansen <[email protected]> * Christoph Lameter */ #include <linux/migrate.h> #include <linux/export.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/mm_inline.h> #include <linux/nsproxy.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/writeback.h> #include <linux/mempolicy.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/backing-dev.h> #include <linux/compaction.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/gfp.h> #include <linux/pfn_t.h> #include <linux/memremap.h> #include <linux/userfaultfd_k.h> #include <linux/balloon_compaction.h> #include <linux/page_idle.h> #include <linux/page_owner.h> #include <linux/sched/mm.h> #include <linux/ptrace.h> #include <linux/oom.h> #include <linux/memory.h> #include <linux/random.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> #include <asm/tlbflush.h> #include <trace/events/migrate.h> #include "internal.h" bool isolate_movable_page(struct page *page, isolate_mode_t mode) { struct folio *folio = folio_get_nontail_page(page); const struct movable_operations *mops; /* * Avoid burning cycles with pages that are yet under __free_pages(), * or just got freed under us. * * In case we 'win' a race for a movable page being freed under us and * raise its refcount preventing __free_pages() from doing its job * the put_page() at the end of this block will take care of * release this page, thus avoiding a nasty leakage. */ if (!folio) goto out; if (unlikely(folio_test_slab(folio))) goto out_putfolio; /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ smp_rmb(); /* * Check movable flag before taking the page lock because * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. */ if (unlikely(!__folio_test_movable(folio))) goto out_putfolio; /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ smp_rmb(); if (unlikely(folio_test_slab(folio))) goto out_putfolio; /* * As movable pages are not isolated from LRU lists, concurrent * compaction threads can race against page migration functions * as well as race against the releasing a page. * * In order to avoid having an already isolated movable page * being (wrongly) re-isolated while it is under migration, * or to avoid attempting to isolate pages being released, * lets be sure we have the page lock * before proceeding with the movable page isolation steps. */ if (unlikely(!folio_trylock(folio))) goto out_putfolio; if (!folio_test_movable(folio) || folio_test_isolated(folio)) goto out_no_isolated; mops = folio_movable_ops(folio); VM_BUG_ON_FOLIO(!mops, folio); if (!mops->isolate_page(&folio->page, mode)) goto out_no_isolated; /* Driver shouldn't use PG_isolated bit of page->flags */ WARN_ON_ONCE(folio_test_isolated(folio)); folio_set_isolated(folio); folio_unlock(folio); return true; out_no_isolated: folio_unlock(folio); out_putfolio: folio_put(folio); out: return false; } static void putback_movable_folio(struct folio *folio) { const struct movable_operations *mops = folio_movable_ops(folio); mops->putback_page(&folio->page); folio_clear_isolated(folio); } /* * Put previously isolated pages back onto the appropriate lists * from where they were once taken off for compaction/migration. * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() * and isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { struct folio *folio; struct folio *folio2; list_for_each_entry_safe(folio, folio2, l, lru) { if (unlikely(folio_test_hugetlb(folio))) { folio_putback_active_hugetlb(folio); continue; } list_del(&folio->lru); /* * We isolated non-lru movable folio so here we can use * __PageMovable because LRU folio's mapping cannot have * PAGE_MAPPING_MOVABLE. */ if (unlikely(__folio_test_movable(folio))) { VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); folio_lock(folio); if (folio_test_movable(folio)) putback_movable_folio(folio); else folio_clear_isolated(folio); folio_unlock(folio); folio_put(folio); } else { node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), -folio_nr_pages(folio)); folio_putback_lru(folio); } } } /* * Restore a potential migration pte to a working pte entry */ static bool remove_migration_pte(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *old) { DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; pte_t old_pte; pte_t pte; swp_entry_t entry; struct page *new; unsigned long idx = 0; /* pgoff is invalid for ksm pages, but they are never large */ if (folio_test_large(folio) && !folio_test_hugetlb(folio)) idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; new = folio_page(folio, idx); #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); remove_migration_pmd(&pvmw, new); continue; } #endif folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); old_pte = ptep_get(pvmw.pte); if (pte_swp_soft_dirty(old_pte)) pte = pte_mksoft_dirty(pte); entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) pte = pte_mkold(pte); if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pte = pte_mkdirty(pte); if (is_writable_migration_entry(entry)) pte = pte_mkwrite(pte, vma); else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; if (unlikely(is_device_private_page(new))) { if (pte_write(pte)) entry = make_writable_device_private_entry( page_to_pfn(new)); else entry = make_readable_device_private_entry( page_to_pfn(new)); pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(old_pte)) pte = pte_swp_mkuffd_wp(pte); } #ifdef CONFIG_HUGETLB_PAGE if (folio_test_hugetlb(folio)) { unsigned int shift = huge_page_shift(hstate_vma(vma)); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) hugepage_add_anon_rmap(new, vma, pvmw.address, rmap_flags); else page_dup_file_rmap(new, true); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } else #endif { if (folio_test_anon(folio)) page_add_anon_rmap(new, vma, pvmw.address, rmap_flags); else page_add_file_rmap(new, vma, false); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), compound_order(new)); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, pvmw.address, pvmw.pte); } return true; } /* * Get rid of all migration entries and replace them by * references to the indicated page. */ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) { struct rmap_walk_control rwc = { .rmap_one = remove_migration_pte, .arg = src, }; if (locked) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); } /* * Something used the pte of a page under migration. We need to * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. */ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { spinlock_t *ptl; pte_t *ptep; pte_t pte; swp_entry_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return; pte = ptep_get(ptep); pte_unmap(ptep); if (!is_swap_pte(pte)) goto out; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto out; migration_entry_wait_on_locked(entry, ptl); return; out: spin_unlock(ptl); } #ifdef CONFIG_HUGETLB_PAGE /* * The vma read lock must be held upon entry. Holding that lock prevents either * the pte or the ptl from being freed. * * This function will release the vma lock before returning. */ void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) { spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); pte_t pte; hugetlb_vma_assert_locked(vma); spin_lock(ptl); pte = huge_ptep_get(ptep); if (unlikely(!is_hugetlb_entry_migration(pte))) { spin_unlock(ptl); hugetlb_vma_unlock_read(vma); } else { /* * If migration entry existed, safe to release vma lock * here because the pgtable page won't be freed without the * pgtable lock released. See comment right above pgtable * lock release in migration_entry_wait_on_locked(). */ hugetlb_vma_unlock_read(vma); migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); } } #endif #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) { spinlock_t *ptl; ptl = pmd_lock(mm, pmd); if (!is_pmd_migration_entry(*pmd)) goto unlock; migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); return; unlock: spin_unlock(ptl); } #endif static int folio_expected_refs(struct address_space *mapping, struct folio *folio) { int refs = 1; if (!mapping) return refs; refs += folio_nr_pages(folio); if (folio_test_private(folio)) refs++; return refs; } /* * Replace the page in the mapping. * * The number of remaining references must be: * 1 for anonymous pages without a mapping * 2 for pages with a mapping * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. */ int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); struct zone *oldzone, *newzone; int dirty; int expected_count = folio_expected_refs(mapping, folio) + extra_count; long nr = folio_nr_pages(folio); if (!mapping) { /* Anonymous page without mapping */ if (folio_ref_count(folio) != expected_count) return -EAGAIN; /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); return MIGRATEPAGE_SUCCESS; } oldzone = folio_zone(folio); newzone = folio_zone(newfolio); xas_lock_irq(&xas); if (!folio_ref_freeze(folio, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; } /* * Now we know that no one else is looking at the folio: * no turning back from here. */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; folio_ref_add(newfolio, nr); /* add cache reference */ if (folio_test_swapbacked(folio)) { __folio_set_swapbacked(newfolio); if (folio_test_swapcache(folio)) { folio_set_swapcache(newfolio); newfolio->private = folio_get_private(folio); } } else { VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); } /* Move dirty while page refs frozen and newpage not yet exposed */ dirty = folio_test_dirty(folio); if (dirty) { folio_clear_dirty(folio); folio_set_dirty(newfolio); } xas_store(&xas, newfolio); /* * Drop cache reference from old page by unfreezing * to one less reference. * We know this isn't the last reference. */ folio_ref_unfreeze(folio, expected_count - nr); xas_unlock(&xas); /* Leave irq disabled to prevent preemption while updating stats */ /* * If moved to a different zone then also account * the page for that zone. Other VM counters will be * taken care of when we establish references to the * new page and drop references to the old page. * * Note that anonymous pages are accounted for * via NR_FILE_PAGES and NR_ANON_MAPPED if they * are mapped to swap space. */ if (newzone != oldzone) { struct lruvec *old_lruvec, *new_lruvec; struct mem_cgroup *memcg; memcg = folio_memcg(folio); old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat); new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat); __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr); __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr); if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); if (folio_test_pmd_mappable(folio)) { __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr); __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr); } #endif if (dirty && mapping_can_writeback(mapping)) { __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr); __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr); __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable(); return MIGRATEPAGE_SUCCESS; } EXPORT_SYMBOL(folio_migrate_mapping); /* * The expected number of remaining references is the same as that * of folio_migrate_mapping(). */ int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) { XA_STATE(xas, &mapping->i_pages, folio_index(src)); int expected_count; xas_lock_irq(&xas); expected_count = 2 + folio_has_private(src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; } dst->index = src->index; dst->mapping = src->mapping; folio_get(dst); xas_store(&xas, dst); folio_ref_unfreeze(src, expected_count - 1); xas_unlock_irq(&xas); return MIGRATEPAGE_SUCCESS; } /* * Copy the flags and some other ancillary information */ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) { int cpupid; if (folio_test_error(folio)) folio_set_error(newfolio); if (folio_test_referenced(folio)) folio_set_referenced(newfolio); if (folio_test_uptodate(folio)) folio_mark_uptodate(newfolio); if (folio_test_clear_active(folio)) { VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio); folio_set_active(newfolio); } else if (folio_test_clear_unevictable(folio)) folio_set_unevictable(newfolio); if (folio_test_workingset(folio)) folio_set_workingset(newfolio); if (folio_test_checked(folio)) folio_set_checked(newfolio); /* * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via * migration entries. We can still have PG_anon_exclusive set on an * effectively unmapped and unreferenced first sub-pages of an * anonymous THP: we can simply copy it here via PG_mappedtodisk. */ if (folio_test_mappedtodisk(folio)) folio_set_mappedtodisk(newfolio); /* Move dirty on pages not done by folio_migrate_mapping() */ if (folio_test_dirty(folio)) folio_set_dirty(newfolio); if (folio_test_young(folio)) folio_set_young(newfolio); if (folio_test_idle(folio)) folio_set_idle(newfolio); /* * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ cpupid = page_cpupid_xchg_last(&folio->page, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); if (f_toptier != t_toptier) cpupid = -1; } page_cpupid_xchg_last(&newfolio->page, cpupid); folio_migrate_ksm(newfolio, folio); /* * Please do not reorder this without considering how mm/ksm.c's * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). */ if (folio_test_swapcache(folio)) folio_clear_swapcache(folio); folio_clear_private(folio); /* page->private contains hugetlb specific flags */ if (!folio_test_hugetlb(folio)) folio->private = NULL; /* * If any waiters have accumulated on the new page then * wake them up. */ if (folio_test_writeback(newfolio)) folio_end_writeback(newfolio); /* * PG_readahead shares the same bit with PG_reclaim. The above * end_page_writeback() may clear PG_readahead mistakenly, so set the * bit after that. */ if (folio_test_readahead(folio)) folio_set_readahead(newfolio); folio_copy_owner(newfolio, folio); if (!folio_test_hugetlb(folio)) mem_cgroup_migrate(folio, newfolio); } EXPORT_SYMBOL(folio_migrate_flags); void folio_migrate_copy(struct folio *newfolio, struct folio *folio) { folio_copy(newfolio, folio); folio_migrate_flags(newfolio, folio); } EXPORT_SYMBOL(folio_migrate_copy); /************************************************************ * Migration functions ***********************************************************/ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode, int extra_count) { int rc; BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ rc = folio_migrate_mapping(mapping, dst, src, extra_count); if (rc != MIGRATEPAGE_SUCCESS) return rc; if (mode != MIGRATE_SYNC_NO_COPY) folio_migrate_copy(dst, src); else folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } /** * migrate_folio() - Simple folio migration. * @mapping: The address_space containing the folio. * @dst: The folio to migrate the data to. * @src: The folio containing the current data. * @mode: How to migrate the page. * * Common logic to directly migrate a single LRU folio suitable for * folios that do not use PagePrivate/PagePrivate2. * * Folios are locked upon entry and exit. */ int migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { return migrate_folio_extra(mapping, dst, src, mode, 0); } EXPORT_SYMBOL(migrate_folio); #ifdef CONFIG_BUFFER_HEAD /* Returns true if all buffers are successfully locked */ static bool buffer_migrate_lock_buffers(struct buffer_head *head, enum migrate_mode mode) { struct buffer_head *bh = head; struct buffer_head *failed_bh; do { if (!trylock_buffer(bh)) { if (mode == MIGRATE_ASYNC) goto unlock; if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) goto unlock; lock_buffer(bh); } bh = bh->b_this_page; } while (bh != head); return true; unlock: /* We failed to lock the buffer and cannot stall. */ failed_bh = bh; bh = head; while (bh != failed_bh) { unlock_buffer(bh); bh = bh->b_this_page; } return false; } static int __buffer_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode, bool check_refs) { struct buffer_head *bh, *head; int rc; int expected_count; head = folio_buffers(src); if (!head) return migrate_folio(mapping, dst, src, mode); /* Check whether page does not have extra refs before we do more work */ expected_count = folio_expected_refs(mapping, src); if (folio_ref_count(src) != expected_count) return -EAGAIN; if (!buffer_migrate_lock_buffers(head, mode)) return -EAGAIN; if (check_refs) { bool busy; bool invalidated = false; recheck_buffers: busy = false; spin_lock(&mapping->private_lock); bh = head; do { if (atomic_read(&bh->b_count)) { busy = true; break; } bh = bh->b_this_page; } while (bh != head); if (busy) { if (invalidated) { rc = -EAGAIN; goto unlock_buffers; } spin_unlock(&mapping->private_lock); invalidate_bh_lrus(); invalidated = true; goto recheck_buffers; } } rc = folio_migrate_mapping(mapping, dst, src, 0); if (rc != MIGRATEPAGE_SUCCESS) goto unlock_buffers; folio_attach_private(dst, folio_detach_private(src)); bh = head; do { folio_set_bh(bh, dst, bh_offset(bh)); bh = bh->b_this_page; } while (bh != head); if (mode != MIGRATE_SYNC_NO_COPY) folio_migrate_copy(dst, src); else folio_migrate_flags(dst, src); rc = MIGRATEPAGE_SUCCESS; unlock_buffers: if (check_refs) spin_unlock(&mapping->private_lock); bh = head; do { unlock_buffer(bh); bh = bh->b_this_page; } while (bh != head); return rc; } /** * buffer_migrate_folio() - Migration function for folios with buffers. * @mapping: The address space containing @src. * @dst: The folio to migrate to. * @src: The folio to migrate from. * @mode: How to migrate the folio. * * This function can only be used if the underlying filesystem guarantees * that no other references to @src exist. For example attached buffer * heads are accessed only under the folio lock. If your filesystem cannot * provide this guarantee, buffer_migrate_folio_norefs() may be more * appropriate. * * Return: 0 on success or a negative errno on failure. */ int buffer_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { return __buffer_migrate_folio(mapping, dst, src, mode, false); } EXPORT_SYMBOL(buffer_migrate_folio); /** * buffer_migrate_folio_norefs() - Migration function for folios with buffers. * @mapping: The address space containing @src. * @dst: The folio to migrate to. * @src: The folio to migrate from. * @mode: How to migrate the folio. * * Like buffer_migrate_folio() except that this variant is more careful * and checks that there are also no buffer head references. This function * is the right one for mappings where buffer heads are directly looked * up and referenced (such as block device mappings). * * Return: 0 on success or a negative errno on failure. */ int buffer_migrate_folio_norefs(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { return __buffer_migrate_folio(mapping, dst, src, mode, true); } EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs); #endif /* CONFIG_BUFFER_HEAD */ int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { int ret; ret = folio_migrate_mapping(mapping, dst, src, 0); if (ret != MIGRATEPAGE_SUCCESS) return ret; if (folio_get_private(src)) folio_attach_private(dst, folio_detach_private(src)); if (mode != MIGRATE_SYNC_NO_COPY) folio_migrate_copy(dst, src); else folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } EXPORT_SYMBOL_GPL(filemap_migrate_folio); /* * Writeback a folio to clean the dirty state */ static int writeout(struct address_space *mapping, struct folio *folio) { struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = 1, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1 }; int rc; if (!mapping->a_ops->writepage) /* No write method for the address space */ return -EINVAL; if (!folio_clear_dirty_for_io(folio)) /* Someone else already triggered a write */ return -EAGAIN; /* * A dirty folio may imply that the underlying filesystem has * the folio on some queue. So the folio must be clean for * migration. Writeout may mean we lose the lock and the * folio state is no longer what we checked for earlier. * At this point we know that the migration attempt cannot * be successful. */ remove_migration_ptes(folio, folio, false); rc = mapping->a_ops->writepage(&folio->page, &wbc); if (rc != AOP_WRITEPAGE_ACTIVATE) /* unlocked. Relock */ folio_lock(folio); return (rc < 0) ? -EIO : -EAGAIN; } /* * Default handling if a filesystem does not provide a migration function. */ static int fallback_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { if (folio_test_dirty(src)) { /* Only writeback folios in full synchronous migration */ switch (mode) { case MIGRATE_SYNC: case MIGRATE_SYNC_NO_COPY: break; default: return -EBUSY; } return writeout(mapping, src); } /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ if (!filemap_release_folio(src, GFP_KERNEL)) return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; return migrate_folio(mapping, dst, src, mode); } /* * Move a page to a newly allocated page * The page is locked and all ptes have been successfully removed. * * The new page will have replaced the old page if this function * is successful. * * Return value: * < 0 - error code * MIGRATEPAGE_SUCCESS - success */ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) { int rc = -EAGAIN; bool is_lru = !__PageMovable(&src->page); VM_BUG_ON_FOLIO(!folio_test_locked(src), src); VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); if (likely(is_lru)) { struct address_space *mapping = folio_mapping(src); if (!mapping) rc = migrate_folio(mapping, dst, src, mode); else if (mapping->a_ops->migrate_folio) /* * Most folios have a mapping and most filesystems * provide a migrate_folio callback. Anonymous folios * are part of swap space which also has its own * migrate_folio callback. This is the most common path * for page migration. */ rc = mapping->a_ops->migrate_folio(mapping, dst, src, mode); else rc = fallback_migrate_folio(mapping, dst, src, mode); } else { const struct movable_operations *mops; /* * In case of non-lru page, it could be released after * isolation step. In that case, we shouldn't try migration. */ VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); if (!folio_test_movable(src)) { rc = MIGRATEPAGE_SUCCESS; folio_clear_isolated(src); goto out; } mops = folio_movable_ops(src); rc = mops->migrate_page(&dst->page, &src->page, mode); WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && !folio_test_isolated(src)); } /* * When successful, old pagecache src->mapping must be cleared before * src is freed; but stats require that PageAnon be left as PageAnon. */ if (rc == MIGRATEPAGE_SUCCESS) { if (__PageMovable(&src->page)) { VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); /* * We clear PG_movable under page_lock so any compactor * cannot try to migrate this page. */ folio_clear_isolated(src); } /* * Anonymous and movable src->mapping will be cleared by * free_pages_prepare so don't reset it here for keeping * the type to work PageAnon, for example. */ if (!folio_mapping_flags(src)) src->mapping = NULL; if (likely(!folio_is_zone_device(dst))) flush_dcache_folio(dst); } out: return rc; } /* * To record some information during migration, we use some unused * fields (mapping and private) of struct folio of the newly allocated * destination folio. This is safe because nobody is using them * except us. */ union migration_ptr { struct anon_vma *anon_vma; struct address_space *mapping; }; static void __migrate_folio_record(struct folio *dst, unsigned long page_was_mapped, struct anon_vma *anon_vma) { union migration_ptr ptr = { .anon_vma = anon_vma }; dst->mapping = ptr.mapping; dst->private = (void *)page_was_mapped; } static void __migrate_folio_extract(struct folio *dst, int *page_was_mappedp, struct anon_vma **anon_vmap) { union migration_ptr ptr = { .mapping = dst->mapping }; *anon_vmap = ptr.anon_vma; *page_was_mappedp = (unsigned long)dst->private; dst->mapping = NULL; dst->private = NULL; } /* Restore the source folio to the original state upon failure */ static void migrate_folio_undo_src(struct folio *src, int page_was_mapped, struct anon_vma *anon_vma, bool locked, struct list_head *ret) { if (page_was_mapped) remove_migration_ptes(src, src, false); /* Drop an anon_vma reference if we took one */ if (anon_vma) put_anon_vma(anon_vma); if (locked) folio_unlock(src); if (ret) list_move_tail(&src->lru, ret); } /* Restore the destination folio to the original state upon failure */ static void migrate_folio_undo_dst(struct folio *dst, bool locked, free_folio_t put_new_folio, unsigned long private) { if (locked) folio_unlock(dst); if (put_new_folio) put_new_folio(dst, private); else folio_put(dst); } /* Cleanup src folio upon migration success */ static void migrate_folio_done(struct folio *src, enum migrate_reason reason) { /* * Compaction can migrate also non-LRU pages which are * not accounted to NR_ISOLATED_*. They can be recognized * as __PageMovable */ if (likely(!__folio_test_movable(src))) mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + folio_is_file_lru(src), -folio_nr_pages(src)); if (reason != MR_MEMORY_FAILURE) /* We release the page in page_handle_poison. */ folio_put(src); } /* Obtain the lock on page, remove all ptes. */ static int migrate_folio_unmap(new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, struct folio *src, struct folio **dstp, enum migrate_mode mode, enum migrate_reason reason, struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; bool is_lru = !__PageMovable(&src->page); bool locked = false; bool dst_locked = false; if (folio_ref_count(src) == 1) { /* Folio was freed from under us. So we are done. */ folio_clear_active(src); folio_clear_unevictable(src); /* free_pages_prepare() will clear PG_isolated. */ list_del(&src->lru); migrate_folio_done(src, reason); return MIGRATEPAGE_SUCCESS; } dst = get_new_folio(src, private); if (!dst) return -ENOMEM; *dstp = dst; dst->private = NULL; if (!folio_trylock(src)) { if (mode == MIGRATE_ASYNC) goto out; /* * It's not safe for direct compaction to call lock_page. * For example, during page readahead pages are added locked * to the LRU. Later, when the IO completes the pages are * marked uptodate and unlocked. However, the queueing * could be merging multiple pages for one bio (e.g. * mpage_readahead). If an allocation happens for the * second or third page, the process can end up locking * the same page twice and deadlocking. Rather than * trying to be clever about what pages can be locked, * avoid the use of lock_page for direct compaction * altogether. */ if (current->flags & PF_MEMALLOC) goto out; /* * In "light" mode, we can wait for transient locks (eg * inserting a page into the page table), but it's not * worth waiting for I/O. */ if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) goto out; folio_lock(src); } locked = true; if (folio_test_writeback(src)) { /* * Only in the case of a full synchronous migration is it * necessary to wait for PageWriteback. In the async case, * the retry loop is too short and in the sync-light case, * the overhead of stalling is too much */ switch (mode) { case MIGRATE_SYNC: case MIGRATE_SYNC_NO_COPY: break; default: rc = -EBUSY; goto out; } folio_wait_writeback(src); } /* * By try_to_migrate(), src->mapcount goes down to 0 here. In this case, * we cannot notice that anon_vma is freed while we migrate a page. * This get_anon_vma() delays freeing anon_vma pointer until the end * of migration. File cache pages are no problem because of page_lock() * File Caches may use write_page() or lock_page() in migration, then, * just care Anon page here. * * Only folio_get_anon_vma() understands the subtleties of * getting a hold on an anon_vma from outside one of its mms. * But if we cannot get anon_vma, then we won't need it anyway, * because that implies that the anon page is no longer mapped * (and cannot be remapped so long as we hold the page lock). */ if (folio_test_anon(src) && !folio_test_ksm(src)) anon_vma = folio_get_anon_vma(src); /* * Block others from accessing the new page when we get around to * establishing additional references. We are usually the only one * holding a reference to dst at this point. We used to have a BUG * here if folio_trylock(dst) fails, but would like to allow for * cases where there might be a race with the previous use of dst. * This is much like races on refcount of oldpage: just don't BUG(). */ if (unlikely(!folio_trylock(dst))) goto out; dst_locked = true; if (unlikely(!is_lru)) { __migrate_folio_record(dst, page_was_mapped, anon_vma); return MIGRATEPAGE_UNMAP; } /* * Corner case handling: * 1. When a new swap-cache page is read into, it is added to the LRU * and treated as swapcache but it has no rmap yet. * Calling try_to_unmap() against a src->mapping==NULL page will * trigger a BUG. So handle it here. * 2. An orphaned page (see truncate_cleanup_page) might have * fs-private metadata. The page can be picked up due to memory * offlining. Everywhere else except page reclaim, the page is * invisible to the vm, so the page can not be migrated. So try to * free the metadata, so the page can be freed. */ if (!src->mapping) { if (folio_test_private(src)) { try_to_free_buffers(src); goto out; } } else if (folio_mapped(src)) { /* Establish migration ptes */ VM_BUG_ON_FOLIO(folio_test_anon(src) && !folio_test_ksm(src) && !anon_vma, src); try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); page_was_mapped = 1; } if (!folio_mapped(src)) { __migrate_folio_record(dst, page_was_mapped, anon_vma); return MIGRATEPAGE_UNMAP; } out: /* * A folio that has not been unmapped will be restored to * right list unless we want to retry. */ if (rc == -EAGAIN) ret = NULL; migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); return rc; } /* Migrate the folio to the newly allocated folio in dst. */ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, struct folio *src, struct folio *dst, enum migrate_mode mode, enum migrate_reason reason, struct list_head *ret) { int rc; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; bool is_lru = !__PageMovable(&src->page); struct list_head *prev; __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); prev = dst->lru.prev; list_del(&dst->lru); rc = move_to_new_folio(dst, src, mode); if (rc) goto out; if (unlikely(!is_lru)) goto out_unlock_both; /* * When successful, push dst to LRU immediately: so that if it * turns out to be an mlocked page, remove_migration_ptes() will * automatically build up the correct dst->mlock_count for it. * * We would like to do something similar for the old page, when * unsuccessful, and other cases when a page has been temporarily * isolated from the unevictable LRU: but this case is the easiest. */ folio_add_lru(dst); if (page_was_mapped) lru_add_drain(); if (page_was_mapped) remove_migration_ptes(src, dst, false); out_unlock_both: folio_unlock(dst); set_page_owner_migrate_reason(&dst->page, reason); /* * If migration is successful, decrease refcount of dst, * which will not free the page because new page owner increased * refcounter. */ folio_put(dst); /* * A folio that has been migrated has all references removed * and will be freed. */ list_del(&src->lru); /* Drop an anon_vma reference if we took one */ if (anon_vma) put_anon_vma(anon_vma); folio_unlock(src); migrate_folio_done(src, reason); return rc; out: /* * A folio that has not been migrated will be restored to * right list unless we want to retry. */ if (rc == -EAGAIN) { list_add(&dst->lru, prev); __migrate_folio_record(dst, page_was_mapped, anon_vma); return rc; } migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); migrate_folio_undo_dst(dst, true, put_new_folio, private); return rc; } /* * Counterpart of unmap_and_move_page() for hugepage migration. * * This function doesn't wait the completion of hugepage I/O * because there is no race between I/O and migration for hugepage. * Note that currently hugepage I/O occurs only in direct I/O * where no lock is held and PG_writeback is irrelevant, * and writeback status of all subpages are counted in the reference * count of the head page (i.e. if all subpages of a 2MB hugepage are * under direct I/O, the reference of the head page is 512 and a bit more.) * This means that when we try to migrate hugepage whose subpages are * doing direct I/O, some references remain after try_to_unmap() and * hugepage migration fails without data corruption. * * There is also no race when direct I/O is issued on the page under migration, * because then pte is replaced with migration swap entry and direct I/O code * will wait in the page fault for migration to complete. */ static int unmap_and_move_huge_page(new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, struct folio *src, int force, enum migrate_mode mode, int reason, struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ folio_putback_active_hugetlb(src); return MIGRATEPAGE_SUCCESS; } dst = get_new_folio(src, private); if (!dst) return -ENOMEM; if (!folio_trylock(src)) { if (!force) goto out; switch (mode) { case MIGRATE_SYNC: case MIGRATE_SYNC_NO_COPY: break; default: goto out; } folio_lock(src); } /* * Check for pages which are in the process of being freed. Without * folio_mapping() set, hugetlbfs specific move page routine will not * be called and we could leak usage counts for subpools. */ if (hugetlb_folio_subpool(src) && !folio_mapping(src)) { rc = -EBUSY; goto out_unlock; } if (folio_test_anon(src)) anon_vma = folio_get_anon_vma(src); if (unlikely(!folio_trylock(dst))) goto put_anon; if (folio_mapped(src)) { enum ttu_flags ttu = 0; if (!folio_test_anon(src)) { /* * In shared mappings, try_to_unmap could potentially * call huge_pmd_unshare. Because of this, take * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ mapping = hugetlb_page_mapping_lock_write(&src->page); if (unlikely(!mapping)) goto unlock_put_anon; ttu = TTU_RMAP_LOCKED; } try_to_migrate(src, ttu); page_was_mapped = 1; if (ttu & TTU_RMAP_LOCKED) i_mmap_unlock_write(mapping); } if (!folio_mapped(src)) rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) remove_migration_ptes(src, rc == MIGRATEPAGE_SUCCESS ? dst : src, false); unlock_put_anon: folio_unlock(dst); put_anon: if (anon_vma) put_anon_vma(anon_vma); if (rc == MIGRATEPAGE_SUCCESS) { move_hugetlb_state(src, dst, reason); put_new_folio = NULL; } out_unlock: folio_unlock(src); out: if (rc == MIGRATEPAGE_SUCCESS) folio_putback_active_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); /* * If migration was not successful and there's a freeing callback, use * it. Otherwise, put_page() will drop the reference grabbed during * isolation. */ if (put_new_folio) put_new_folio(dst, private); else folio_putback_active_hugetlb(dst); return rc; } static inline int try_split_folio(struct folio *folio, struct list_head *split_folios) { int rc; folio_lock(folio); rc = split_folio_to_list(folio, split_folios); folio_unlock(folio); if (!rc) list_move_tail(&folio->lru, split_folios); return rc; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR #else #define NR_MAX_BATCHED_MIGRATION 512 #endif #define NR_MAX_MIGRATE_PAGES_RETRY 10 #define NR_MAX_MIGRATE_ASYNC_RETRY 3 #define NR_MAX_MIGRATE_SYNC_RETRY \ (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) struct migrate_pages_stats { int nr_succeeded; /* Normal and large folios migrated successfully, in units of base pages */ int nr_failed_pages; /* Normal and large folios failed to be migrated, in units of base pages. Untried folios aren't counted */ int nr_thp_succeeded; /* THP migrated successfully */ int nr_thp_failed; /* THP failed to be migrated */ int nr_thp_split; /* THP split before migrating */ }; /* * Returns the number of hugetlb folios that were not migrated, or an error code * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable * any more because the list has become empty or no retryable hugetlb folios * exist any more. It is caller's responsibility to call putback_movable_pages() * only if ret != 0. */ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, struct migrate_pages_stats *stats, struct list_head *ret_folios) { int retry = 1; int nr_failed = 0; int nr_retry_pages = 0; int pass = 0; struct folio *folio, *folio2; int rc, nr_pages; for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) { retry = 0; nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { if (!folio_test_hugetlb(folio)) continue; nr_pages = folio_nr_pages(folio); cond_resched(); /* * Migratability of hugepages depends on architectures and * their size. This check is necessary because some callers * of hugepage migration like soft offline and memory * hotremove don't walk through page tables or check whether * the hugepage is pmd-based or not before kicking migration. */ if (!hugepage_migration_supported(folio_hstate(folio))) { nr_failed++; stats->nr_failed_pages += nr_pages; list_move_tail(&folio->lru, ret_folios); continue; } rc = unmap_and_move_huge_page(get_new_folio, put_new_folio, private, folio, pass > 2, mode, reason, ret_folios); /* * The rules are: * Success: hugetlb folio will be put back * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list */ switch(rc) { case -ENOMEM: /* * When memory is low, don't bother to try to migrate * other folios, just exit. */ stats->nr_failed_pages += nr_pages + nr_retry_pages; return -ENOMEM; case -EAGAIN: retry++; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: stats->nr_succeeded += nr_pages; break; default: /* * Permanent failure (-EBUSY, etc.): * unlike -EAGAIN case, the failed folio is * removed from migration folio list and not * retried in the next outer loop. */ nr_failed++; stats->nr_failed_pages += nr_pages; break; } } } /* * nr_failed is number of hugetlb folios failed to be migrated. After * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb * folios as failed. */ nr_failed += retry; stats->nr_failed_pages += nr_retry_pages; return nr_failed; } /* * migrate_pages_batch() first unmaps folios in the from list as many as * possible, then move the unmapped folios. * * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a * lock or bit when we have locked more than one folio. Which may cause * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the * length of the from list must be <= 1. */ static int migrate_pages_batch(struct list_head *from, new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, struct list_head *ret_folios, struct list_head *split_folios, struct migrate_pages_stats *stats, int nr_pass) { int retry = 1; int thp_retry = 1; int nr_failed = 0; int nr_retry_pages = 0; int pass = 0; bool is_thp = false; struct folio *folio, *folio2, *dst = NULL, *dst2; int rc, rc_saved = 0, nr_pages; LIST_HEAD(unmap_folios); LIST_HEAD(dst_folios); bool nosplit = (reason == MR_NUMA_MISPLACED); VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && !list_empty(from) && !list_is_singular(from)); for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; thp_retry = 0; nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); /* * Large folio migration might be unsupported or * the allocation might be failed so we should retry * on the same folio with the large folio split * to normal folios. * * Split folios are put in split_folios, and * we will migrate them after the rest of the * list is processed. */ if (!thp_migration_supported() && is_thp) { nr_failed++; stats->nr_thp_failed++; if (!try_split_folio(folio, split_folios)) { stats->nr_thp_split++; continue; } stats->nr_failed_pages += nr_pages; list_move_tail(&folio->lru, ret_folios); continue; } rc = migrate_folio_unmap(get_new_folio, put_new_folio, private, folio, &dst, mode, reason, ret_folios); /* * The rules are: * Success: folio will be freed * Unmap: folio will be put on unmap_folios list, * dst folio put on dst_folios list * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list */ switch(rc) { case -ENOMEM: /* * When memory is low, don't bother to try to migrate * other folios, move unmapped folios, then exit. */ nr_failed++; stats->nr_thp_failed += is_thp; /* Large folio NUMA faulting doesn't split to retry. */ if (folio_test_large(folio) && !nosplit) { int ret = try_split_folio(folio, split_folios); if (!ret) { stats->nr_thp_split += is_thp; break; } else if (reason == MR_LONGTERM_PIN && ret == -EAGAIN) { /* * Try again to split large folio to * mitigate the failure of longterm pinning. */ retry++; thp_retry += is_thp; nr_retry_pages += nr_pages; /* Undo duplicated failure counting. */ nr_failed--; stats->nr_thp_failed -= is_thp; break; } } stats->nr_failed_pages += nr_pages + nr_retry_pages; /* nr_failed isn't updated for not used */ stats->nr_thp_failed += thp_retry; rc_saved = rc; if (list_empty(&unmap_folios)) goto out; else goto move; case -EAGAIN: retry++; thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: stats->nr_succeeded += nr_pages; stats->nr_thp_succeeded += is_thp; break; case MIGRATEPAGE_UNMAP: list_move_tail(&folio->lru, &unmap_folios); list_add_tail(&dst->lru, &dst_folios); break; default: /* * Permanent failure (-EBUSY, etc.): * unlike -EAGAIN case, the failed folio is * removed from migration folio list and not * retried in the next outer loop. */ nr_failed++; stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } } } nr_failed += retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; move: /* Flush TLBs for all unmapped folios */ try_to_unmap_flush(); retry = 1; for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; thp_retry = 0; nr_retry_pages = 0; dst = list_first_entry(&dst_folios, struct folio, lru); dst2 = list_next_entry(dst, lru); list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); rc = migrate_folio_move(put_new_folio, private, folio, dst, mode, reason, ret_folios); /* * The rules are: * Success: folio will be freed * -EAGAIN: stay on the unmap_folios list * Other errno: put on ret_folios list */ switch(rc) { case -EAGAIN: retry++; thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: stats->nr_succeeded += nr_pages; stats->nr_thp_succeeded += is_thp; break; default: nr_failed++; stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } dst = dst2; dst2 = list_next_entry(dst, lru); } } nr_failed += retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; rc = rc_saved ? : nr_failed; out: /* Cleanup remaining folios */ dst = list_first_entry(&dst_folios, struct folio, lru); dst2 = list_next_entry(dst, lru); list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; __migrate_folio_extract(dst, &page_was_mapped, &anon_vma); migrate_folio_undo_src(folio, page_was_mapped, anon_vma, true, ret_folios); list_del(&dst->lru); migrate_folio_undo_dst(dst, true, put_new_folio, private); dst = dst2; dst2 = list_next_entry(dst, lru); } return rc; } static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, struct list_head *ret_folios, struct list_head *split_folios, struct migrate_pages_stats *stats) { int rc, nr_failed = 0; LIST_HEAD(folios); struct migrate_pages_stats astats; memset(&astats, 0, sizeof(astats)); /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, reason, &folios, split_folios, &astats, NR_MAX_MIGRATE_ASYNC_RETRY); stats->nr_succeeded += astats.nr_succeeded; stats->nr_thp_succeeded += astats.nr_thp_succeeded; stats->nr_thp_split += astats.nr_thp_split; if (rc < 0) { stats->nr_failed_pages += astats.nr_failed_pages; stats->nr_thp_failed += astats.nr_thp_failed; list_splice_tail(&folios, ret_folios); return rc; } stats->nr_thp_failed += astats.nr_thp_split; nr_failed += astats.nr_thp_split; /* * Fall back to migrate all failed folios one by one synchronously. All * failed folios except split THPs will be retried, so their failure * isn't counted */ list_splice_tail_init(&folios, from); while (!list_empty(from)) { list_move(from->next, &folios); rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, private, mode, reason, ret_folios, split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); list_splice_tail_init(&folios, ret_folios); if (rc < 0) return rc; nr_failed += rc; } return nr_failed; } /* * migrate_pages - migrate the folios specified in a list, to the free folios * supplied as the target for the page migration * * @from: The list of folios to be migrated. * @get_new_folio: The function used to allocate free folios to be used * as the target of the folio migration. * @put_new_folio: The function used to free target folios if migration * fails, or NULL if no special handling is necessary. * @private: Private data to be passed on to get_new_folio() * @mode: The migration mode that specifies the constraints for * folio migration, if any. * @reason: The reason for folio migration. * @ret_succeeded: Set to the number of folios migrated successfully if * the caller passes a non-NULL pointer. * * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios * are movable any more because the list has become empty or no retryable folios * exist any more. It is caller's responsibility to call putback_movable_pages() * only if ret != 0. * * Returns the number of {normal folio, large folio, hugetlb} that were not * migrated, or an error code. The number of large folio splits will be * considered as the number of non-migrated large folio, no matter how many * split folios of the large folio are migrated successfully. */ int migrate_pages(struct list_head *from, new_folio_t get_new_folio, free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { int rc, rc_gather; int nr_pages; struct folio *folio, *folio2; LIST_HEAD(folios); LIST_HEAD(ret_folios); LIST_HEAD(split_folios); struct migrate_pages_stats stats; trace_mm_migrate_pages_start(mode, reason); memset(&stats, 0, sizeof(stats)); rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, mode, reason, &stats, &ret_folios); if (rc_gather < 0) goto out; again: nr_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { /* Retried hugetlb folios will be kept in list */ if (folio_test_hugetlb(folio)) { list_move_tail(&folio->lru, &ret_folios); continue; } nr_pages += folio_nr_pages(folio); if (nr_pages >= NR_MAX_BATCHED_MIGRATION) break; } if (nr_pages >= NR_MAX_BATCHED_MIGRATION) list_cut_before(&folios, from, &folio2->lru); else list_splice_init(from, &folios); if (mode == MIGRATE_ASYNC) rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, private, mode, reason, &ret_folios, &split_folios, &stats, NR_MAX_MIGRATE_PAGES_RETRY); else rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, private, mode, reason, &ret_folios, &split_folios, &stats); list_splice_tail_init(&folios, &ret_folios); if (rc < 0) { rc_gather = rc; list_splice_tail(&split_folios, &ret_folios); goto out; } if (!list_empty(&split_folios)) { /* * Failure isn't counted since all split folios of a large folio * is counted as 1 failure already. And, we only try to migrate * with minimal effort, force MIGRATE_ASYNC mode and retry once. */ migrate_pages_batch(&split_folios, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1); list_splice_tail_init(&split_folios, &ret_folios); } rc_gather += rc; if (!list_empty(from)) goto again; out: /* * Put the permanent failure folio back to migration list, they * will be put back to the right list by the caller. */ list_splice(&ret_folios, from); /* * Return 0 in case all split folios of fail-to-migrate large folios * are migrated successfully. */ if (list_empty(from)) rc_gather = 0; count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded); count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages); count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded); count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed); count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split); trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages, stats.nr_thp_succeeded, stats.nr_thp_failed, stats.nr_thp_split, mode, reason); if (ret_succeeded) *ret_succeeded = stats.nr_succeeded; return rc_gather; } struct folio *alloc_migration_target(struct folio *src, unsigned long private) { struct migration_target_control *mtc; gfp_t gfp_mask; unsigned int order = 0; int nid; int zidx; mtc = (struct migration_target_control *)private; gfp_mask = mtc->gfp_mask; nid = mtc->nid; if (nid == NUMA_NO_NODE) nid = folio_nid(src); if (folio_test_hugetlb(src)) { struct hstate *h = folio_hstate(src); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); return alloc_hugetlb_folio_nodemask(h, nid, mtc->nmask, gfp_mask); } if (folio_test_large(src)) { /* * clear __GFP_RECLAIM to make the migration callback * consistent with regular THP allocations. */ gfp_mask &= ~__GFP_RECLAIM; gfp_mask |= GFP_TRANSHUGE; order = folio_order(src); } zidx = zone_idx(folio_zone(src)); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; return __folio_alloc(gfp_mask, order, nid, mtc->nmask); } #ifdef CONFIG_NUMA static int store_status(int __user *status, int start, int value, int nr) { while (nr-- > 0) { if (put_user(value, status + start)) return -EFAULT; start++; } return 0; } static int do_move_pages_to_node(struct mm_struct *mm, struct list_head *pagelist, int node) { int err; struct migration_target_control mtc = { .nid = node, .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, }; err = migrate_pages(pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); if (err) putback_movable_pages(pagelist); return err; } /* * Resolves the given address to a struct page, isolates it from the LRU and * puts it to the given pagelist. * Returns: * errno - if the page cannot be found/isolated * 0 - when it doesn't have to be migrated because it is already on the * target node * 1 - when it has been queued */ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, int node, struct list_head *pagelist, bool migrate_all) { struct vm_area_struct *vma; unsigned long addr; struct page *page; int err; bool isolated; mmap_read_lock(mm); addr = (unsigned long)untagged_addr_remote(mm, p); err = -EFAULT; vma = vma_lookup(mm, addr); if (!vma || !vma_migratable(vma)) goto out; /* FOLL_DUMP to ignore special (like zero) pages */ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); err = PTR_ERR(page); if (IS_ERR(page)) goto out; err = -ENOENT; if (!page) goto out; if (is_zone_device_page(page)) goto out_putpage; err = 0; if (page_to_nid(page) == node) goto out_putpage; err = -EACCES; if (page_mapcount(page) > 1 && !migrate_all) goto out_putpage; if (PageHuge(page)) { if (PageHead(page)) { isolated = isolate_hugetlb(page_folio(page), pagelist); err = isolated ? 1 : -EBUSY; } } else { struct page *head; head = compound_head(page); isolated = isolate_lru_page(head); if (!isolated) { err = -EBUSY; goto out_putpage; } err = 1; list_add_tail(&head->lru, pagelist); mod_node_page_state(page_pgdat(head), NR_ISOLATED_ANON + page_is_file_lru(head), thp_nr_pages(head)); } out_putpage: /* * Either remove the duplicate refcount from * isolate_lru_page() or drop the page ref if it was * not isolated. */ put_page(page); out: mmap_read_unlock(mm); return err; } static int move_pages_and_store_status(struct mm_struct *mm, int node, struct list_head *pagelist, int __user *status, int start, int i, unsigned long nr_pages) { int err; if (list_empty(pagelist)) return 0; err = do_move_pages_to_node(mm, pagelist, node); if (err) { /* * Positive err means the number of failed * pages to migrate. Since we are going to * abort and return the number of non-migrated * pages, so need to include the rest of the * nr_pages that have not been attempted as * well. */ if (err > 0) err += nr_pages - i; return err; } return store_status(status, start, node, i - start); } /* * Migrate an array of page address onto an array of nodes and fill * the corresponding array of status. */ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) { int current_node = NUMA_NO_NODE; LIST_HEAD(pagelist); int start, i; int err = 0, err1; lru_cache_disable(); for (i = start = 0; i < nr_pages; i++) { const void __user *p; int node; err = -EFAULT; if (get_user(p, pages + i)) goto out_flush; if (get_user(node, nodes + i)) goto out_flush; err = -ENODEV; if (node < 0 || node >= MAX_NUMNODES) goto out_flush; if (!node_state(node, N_MEMORY)) goto out_flush; err = -EACCES; if (!node_isset(node, task_nodes)) goto out_flush; if (current_node == NUMA_NO_NODE) { current_node = node; start = i; } else if (node != current_node) { err = move_pages_and_store_status(mm, current_node, &pagelist, status, start, i, nr_pages); if (err) goto out; start = i; current_node = node; } /* * Errors in the page lookup or isolation are not fatal and we simply * report them via status */ err = add_page_for_migration(mm, p, current_node, &pagelist, flags & MPOL_MF_MOVE_ALL); if (err > 0) { /* The page is successfully queued for migration */ continue; } /* * The move_pages() man page does not have an -EEXIST choice, so * use -EFAULT instead. */ if (err == -EEXIST) err = -EFAULT; /* * If the page is already on the target node (!err), store the * node, otherwise, store the err. */ err = store_status(status, i, err ? : current_node, 1); if (err) goto out_flush; err = move_pages_and_store_status(mm, current_node, &pagelist, status, start, i, nr_pages); if (err) { /* We have accounted for page i */ if (err > 0) err--; goto out; } current_node = NUMA_NO_NODE; } out_flush: /* Make sure we do not overwrite the existing error */ err1 = move_pages_and_store_status(mm, current_node, &pagelist, status, start, i, nr_pages); if (err >= 0) err = err1; out: lru_cache_enable(); return err; } /* * Determine the nodes of an array of pages and store it in an array of status. */ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, const void __user **pages, int *status) { unsigned long i; mmap_read_lock(mm); for (i = 0; i < nr_pages; i++) { unsigned long addr = (unsigned long)(*pages); struct vm_area_struct *vma; struct page *page; int err = -EFAULT; vma = vma_lookup(mm, addr); if (!vma) goto set_status; /* FOLL_DUMP to ignore special (like zero) pages */ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); err = PTR_ERR(page); if (IS_ERR(page)) goto set_status; err = -ENOENT; if (!page) goto set_status; if (!is_zone_device_page(page)) err = page_to_nid(page); put_page(page); set_status: *status = err; pages++; status++; } mmap_read_unlock(mm); } static int get_compat_pages_array(const void __user *chunk_pages[], const void __user * __user *pages, unsigned long chunk_nr) { compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages; compat_uptr_t p; int i; for (i = 0; i < chunk_nr; i++) { if (get_user(p, pages32 + i)) return -EFAULT; chunk_pages[i] = compat_ptr(p); } return 0; } /* * Determine the nodes of a user array of pages and store it in * a user array of status. */ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages, const void __user * __user *pages, int __user *status) { #define DO_PAGES_STAT_CHUNK_NR 16UL const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR]; int chunk_status[DO_PAGES_STAT_CHUNK_NR]; while (nr_pages) { unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR); if (in_compat_syscall()) { if (get_compat_pages_array(chunk_pages, pages, chunk_nr)) break; } else { if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages))) break; } do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status); if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status))) break; pages += chunk_nr; status += chunk_nr; nr_pages -= chunk_nr; } return nr_pages ? -EFAULT : 0; } static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes) { struct task_struct *task; struct mm_struct *mm; /* * There is no need to check if current process has the right to modify * the specified process when they are same. */ if (!pid) { mmget(current->mm); *mem_nodes = cpuset_mems_allowed(current); return current->mm; } /* Find the mm_struct */ rcu_read_lock(); task = find_task_by_vpid(pid); if (!task) { rcu_read_unlock(); return ERR_PTR(-ESRCH); } get_task_struct(task); /* * Check if this process has the right to modify the specified * process. Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); mm = ERR_PTR(-EPERM); goto out; } rcu_read_unlock(); mm = ERR_PTR(security_task_movememory(task)); if (IS_ERR(mm)) goto out; *mem_nodes = cpuset_mems_allowed(task); mm = get_task_mm(task); out: put_task_struct(task); if (!mm) mm = ERR_PTR(-EINVAL); return mm; } /* * Move a list of pages in the address space of the currently executing * process. */ static int kernel_move_pages(pid_t pid, unsigned long nr_pages, const void __user * __user *pages, const int __user *nodes, int __user *status, int flags) { struct mm_struct *mm; int err; nodemask_t task_nodes; /* Check flags */ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; mm = find_mm_struct(pid, &task_nodes); if (IS_ERR(mm)) return PTR_ERR(mm); if (nodes) err = do_pages_move(mm, task_nodes, nr_pages, pages, nodes, status, flags); else err = do_pages_stat(mm, nr_pages, pages, status); mmput(mm); return err; } SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, const void __user * __user *, pages, const int __user *, nodes, int __user *, status, int, flags) { return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags); } #ifdef CONFIG_NUMA_BALANCING /* * Returns true if this is a safe migration target node for misplaced NUMA * pages. Currently it only checks the watermarks which is crude. */ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, unsigned long nr_migrate_pages) { int z; for (z = pgdat->nr_zones - 1; z >= 0; z--) { struct zone *zone = pgdat->node_zones + z; if (!managed_zone(zone)) continue; /* Avoid waking kswapd by allocating pages_to_migrate pages. */ if (!zone_watermark_ok(zone, 0, high_wmark_pages(zone) + nr_migrate_pages, ZONE_MOVABLE, 0)) continue; return true; } return false; } static struct folio *alloc_misplaced_dst_folio(struct folio *src, unsigned long data) { int nid = (int) data; int order = folio_order(src); gfp_t gfp = __GFP_THISNODE; if (order > 0) gfp |= GFP_TRANSHUGE_LIGHT; else { gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; gfp &= ~__GFP_RECLAIM; } return __folio_alloc_node(gfp, order, nid); } static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) { int nr_pages = thp_nr_pages(page); int order = compound_order(page); VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); /* Do not migrate THP mapped by multiple processes */ if (PageTransHuge(page) && total_mapcount(page) > 1) return 0; /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)) return 0; for (z = pgdat->nr_zones - 1; z >= 0; z--) { if (managed_zone(pgdat->node_zones + z)) break; } wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); return 0; } if (!isolate_lru_page(page)) return 0; mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), nr_pages); /* * Isolating the page has taken another reference, so the * caller's reference can be safely dropped without the page * disappearing underneath us during migration. */ put_page(page); return 1; } /* * Attempt to migrate a misplaced page to the specified destination * node. Caller is expected to have an elevated reference count on * the page that will be dropped by this function before returning. */ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { pg_data_t *pgdat = NODE_DATA(node); int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); int nr_pages = thp_nr_pages(page); /* * Don't migrate file pages that are mapped in multiple processes * with execute permissions as they are probably shared libraries. */ if (page_mapcount(page) != 1 && page_is_file_lru(page) && (vma->vm_flags & VM_EXEC)) goto out; /* * Also do not migrate dirty pages as not all filesystems can move * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. */ if (page_is_file_lru(page) && PageDirty(page)) goto out; isolated = numamigrate_isolate_page(pgdat, page); if (!isolated) goto out; list_add(&page->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { if (!list_empty(&migratepages)) { list_del(&page->lru); mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), -nr_pages); putback_lru_page(page); } isolated = 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded); } BUG_ON(!list_empty(&migratepages)); return isolated; out: put_page(page); return 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */
linux-master
mm/migrate.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mmzone.c * * management codes for pgdats, zones and page flags */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/mmzone.h> struct pglist_data *first_online_pgdat(void) { return NODE_DATA(first_online_node); } struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) { int nid = next_online_node(pgdat->node_id); if (nid == MAX_NUMNODES) return NULL; return NODE_DATA(nid); } /* * next_zone - helper magic for for_each_zone() */ struct zone *next_zone(struct zone *zone) { pg_data_t *pgdat = zone->zone_pgdat; if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) zone++; else { pgdat = next_online_pgdat(pgdat); if (pgdat) zone = pgdat->node_zones; else zone = NULL; } return zone; } static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { #ifdef CONFIG_NUMA return node_isset(zonelist_node_idx(zref), *nodes); #else return 1; #endif /* CONFIG_NUMA */ } /* Returns the next zone at or below highest_zoneidx in a zonelist */ struct zoneref *__next_zones_zonelist(struct zoneref *z, enum zone_type highest_zoneidx, nodemask_t *nodes) { /* * Find the next suitable zone to use for the allocation. * Only filter based on nodemask if it's set */ if (unlikely(nodes == NULL)) while (zonelist_zone_idx(z) > highest_zoneidx) z++; else while (zonelist_zone_idx(z) > highest_zoneidx || (z->zone && !zref_in_nodemask(z, nodes))) z++; return z; } void lruvec_init(struct lruvec *lruvec) { enum lru_list lru; memset(lruvec, 0, sizeof(struct lruvec)); spin_lock_init(&lruvec->lru_lock); for_each_lru(lru) INIT_LIST_HEAD(&lruvec->lists[lru]); /* * The "Unevictable LRU" is imaginary: though its size is maintained, * it is never scanned, and unevictable pages are not threaded on it * (so that their lru fields can be reused to hold mlock_count). * Poison its list head, so that any operations on it would crash. */ list_del(&lruvec->lists[LRU_UNEVICTABLE]); lru_gen_init_lruvec(lruvec); } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) int page_cpupid_xchg_last(struct page *page, int cpupid) { unsigned long old_flags, flags; int last_cpupid; old_flags = READ_ONCE(page->flags); do { flags = old_flags; last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); return last_cpupid; } #endif
linux-master
mm/mmzone.c
/* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <[email protected]> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <[email protected]> 2001 * File methods by Dave McCracken <[email protected]> 2003, 2004 * Anonymous methods by Andrea Arcangeli <[email protected]> 2004 * Contributions by Hugh Dickins 2003, 2004 */ /* * Lock ordering in mm: * * inode->i_rwsem (while writing or truncating, not reading or faulting) * mm->mmap_lock * mapping->invalidate_lock (in filemap_fault) * page->flags PG_locked (lock_page) * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) * vma_start_write * mapping->i_mmap_rwsem * anon_vma->rwsem * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in block_dirty_folio) * folio_lock_memcg move_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * i_pages lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within bdi.wb->list_lock in __sync_single_inode) * * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) * ->tasklist_lock * pte map lock * * hugetlbfs PageHuge() take locks in this order: * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * vma_lock (hugetlb specific lock for pmd_sharing) * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) * page->flags PG_locked (lock_page) */ #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/backing-dev.h> #include <linux/page_idle.h> #include <linux/memremap.h> #include <linux/userfaultfd_k.h> #include <linux/mm_inline.h> #include <asm/tlbflush.h> #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include <trace/events/migrate.h> #include "internal.h" static struct kmem_cache *anon_vma_cachep; static struct kmem_cache *anon_vma_chain_cachep; static inline struct anon_vma *anon_vma_alloc(void) { struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); anon_vma->num_children = 0; anon_vma->num_active_vmas = 0; anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; } static inline void anon_vma_free(struct anon_vma *anon_vma) { VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() * * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); } static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) { return kmem_cache_alloc(anon_vma_chain_cachep, gfp); } static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) { kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); } static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); } /** * __anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, which * is handled inline by anon_vma_prepare(). But if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in folio_lock_anon_vma_read() * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. * * This must be called with the mmap_lock held for reading. */ int __anon_vma_prepare(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *anon_vma, *allocated; struct anon_vma_chain *avc; might_sleep(); avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; anon_vma->num_children++; /* self-parent link for new root */ allocated = anon_vma; } anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); anon_vma->num_active_vmas++; allocated = NULL; avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); return 0; out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; } /* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) up_write(&root->rwsem); root = new_root; down_write(&root->rwsem); } return root; } static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) up_write(&root->rwsem); } /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before * call, we can identify this case by checking (!dst->anon_vma && * src->anon_vma). * * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find * and reuse existing anon_vma which has no vmas and only one child anon_vma. * This prevents degradation of anon_vma hierarchy to endless linear chain in * case of constantly forking task. On the other hand, an anon_vma with more * than one child isn't reused even if there was no alive vma, thus rmap * walker has a good chance of avoiding scanning the whole hierarchy when it * searches where page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); /* * Reuse existing anon_vma if it has no vma and only one * anon_vma child. * * Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && src->anon_vma && anon_vma->num_children < 2 && anon_vma->num_active_vmas == 0) dst->anon_vma = anon_vma; } if (dst->anon_vma) dst->anon_vma->num_active_vmas++; unlock_anon_vma_root(root); return 0; enomem_failure: /* * dst->anon_vma is dropped here otherwise its num_active_vmas can * be incorrectly decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } /* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ error = anon_vma_clone(vma, pvma); if (error) return error; /* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; anon_vma->num_active_vmas++; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* * The root anon_vma's rwsem is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. */ get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); anon_vma->parent->num_children++; anon_vma_unlock_write(anon_vma); return 0; out_error_free_anon_vma: put_anon_vma(anon_vma); out_error: unlink_anon_vmas(vma); return -ENOMEM; } void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; struct anon_vma *root = NULL; /* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); /* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { anon_vma->parent->num_children--; continue; } list_del(&avc->same_vma); anon_vma_chain_free(avc); } if (vma->anon_vma) { vma->anon_vma->num_active_vmas--; /* * vma would still be needed after unlink, and anon_vma will be prepared * when handle fault. */ vma->anon_vma = NULL; } unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() * needing to write-acquire the anon_vma->root->rwsem. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; VM_WARN_ON(anon_vma->num_children); VM_WARN_ON(anon_vma->num_active_vmas); put_anon_vma(anon_vma); list_del(&avc->same_vma); anon_vma_chain_free(avc); } } static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); } /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against page_remove_rmap() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * * In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * * All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from * page_remove_rmap() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. */ struct anon_vma *folio_get_anon_vma(struct folio *folio) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* * If this folio is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } out: rcu_read_unlock(); return anon_vma; } /* * Similar to folio_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with folio_get_anon_vma() and then block on the mutex * on !rwc->try_lock case. */ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } if (rwc && rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); anon_vma_lock_read(anon_vma); if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because * we'll deadlock on the anon_vma_lock_write() recursion. */ anon_vma_unlock_read(anon_vma); __put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); return anon_vma; } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */ void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; if (!tlb_ubc->flush_required) return; arch_tlbbatch_flush(&tlb_ubc->arch); tlb_ubc->flush_required = false; tlb_ubc->writable = false; } /* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); } /* * Bits 0-14 of mm->tlb_flush_batched record pending generations. * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. */ #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 #define TLB_FLUSH_BATCH_PENDING_MASK \ ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) #define TLB_FLUSH_BATCH_PENDING_LARGE \ (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; int batch; bool writable = pte_dirty(pteval); if (!pte_accessible(mm, pteval)) return; arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); tlb_ubc->flush_required = true; /* * Ensure compiler does not re-order the setting of tlb_flush_batched * before the PTE is cleared. */ barrier(); batch = atomic_read(&mm->tlb_flush_batched); retry: if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { /* * Prevent `pending' from catching up with `flushed' because of * overflow. Reset `pending' and `flushed' to be 1 and 0 if * `pending' becomes large. */ if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) goto retry; } else { atomic_inc(&mm->tlb_flush_batched); } /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; } /* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { if (!(flags & TTU_BATCH_FLUSH)) return false; return arch_tlbbatch_should_defer(mm); } /* * Reclaim unmaps pages under the PTL but do not flush the TLB prior to * releasing the PTL if TLB flushes are batched. It's possible for a parallel * operation such as mprotect or munmap to race between reclaim unmapping * the page and flushing the page. If this race occurs, it potentially allows * access to data via a stale TLB entry. Tracking all mm's that have TLB * batching in flight would be expensive during reclaim so instead track * whether TLB batching occurred in the past and if so then do a flush here * if required. This will cost one additional flush per reclaim cycle paid * by the first operation at risk such as mprotect and mumap. * * This must be called under the PTL so that an access to tlb_flush_batched * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise * via the PTL. */ void flush_tlb_batched_pending(struct mm_struct *mm) { int batch = atomic_read(&mm->tlb_flush_batched); int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; if (pending != flushed) { arch_flush_tlb_batched_pending(mm); /* * If the new TLB flushing is pending during flushing, leave * mm->tlb_flush_batched as is, to avoid losing flushing. */ atomic_cmpxchg(&mm->tlb_flush_batched, batch, pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); } } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { } static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /* * At what user virtual address is page expected in vma? * Caller should check the page is actually part of the vma. */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { struct folio *folio = page_folio(page); if (folio_test_anon(folio)) { struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !page__anon_vma || vma->anon_vma->root != page__anon_vma->root) return -EFAULT; } else if (!vma->vm_file) { return -EFAULT; } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; } return vma_address(page, vma); } /* * Returns the actual pmd_t* where we expect 'address' to be mapped from, or * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* * represents. */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); out: return pmd; } struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: folio_referenced_arg will be passed */ static bool folio_referenced_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; if ((vma->vm_flags & VM_LOCKED) && (!folio_test_large(folio) || !pvmw.pte)) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma, !pvmw.pte); page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } if (pvmw.pte) { if (lru_gen_enabled() && pte_young(ptep_get(pvmw.pte))) { lru_gen_look_around(&pvmw); referenced++; } if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) referenced++; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) referenced++; } else { /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); } pra->mapcount--; } if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) referenced++; if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; } if (!pra->mapcount) return false; /* To break the loop */ return true; } static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) { struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; /* * Ignore references from this mapping if it has no recency. If the * folio has been used in another mapping, we will catch it; if this * other mapping is already gone, the unmap path will have set the * referenced flag or activated the folio in zap_pte_range(). */ if (!vma_has_recency(vma)) return true; /* * If we are reclaiming on behalf of a cgroup, skip counting on behalf * of references from different cgroups. */ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; } /** * folio_referenced() - Test if the folio was referenced. * @folio: The folio to test. * @is_locked: Caller holds lock on the folio. * @memcg: target memory cgroup * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * * Quick test_and_clear_referenced for all mappings of a folio, * * Return: The number of mappings which referenced the folio. Return -1 if * the function bailed out due to rmap lock contention. */ int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { int we_locked = 0; struct folio_referenced_arg pra = { .mapcount = folio_mapcount(folio), .memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = folio_referenced_one, .arg = (void *)&pra, .anon_lock = folio_lock_anon_vma_read, .try_lock = true, .invalid_vma = invalid_folio_referenced_vma, }; *vm_flags = 0; if (!pra.mapcount) return 0; if (!folio_raw_mapping(folio)) return 0; if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { we_locked = folio_trylock(folio); if (!we_locked) return 1; } rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; if (we_locked) folio_unlock(folio); return rwc.contended ? -1 : pra.referenced; } static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) { int cleaned = 0; struct vm_area_struct *vma = pvmw->vma; struct mmu_notifier_range range; unsigned long address = pvmw->address; /* * We have to assume the worse case ie pmd for invalidation. Note that * the folio can not be freed from this function. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma->vm_mm, address, vma_address_end(pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(pvmw)) { int ret = 0; address = pvmw->address; if (pvmw->pte) { pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); if (!pte_dirty(entry) && !pte_write(entry)) continue; flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(vma->vm_mm, address, pte, entry); ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd = pvmw->pmd; pmd_t entry; if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); ret = 1; #else /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); #endif } if (ret) cleaned++; } mmu_notifier_invalidate_range_end(&range); return cleaned; } static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); int *cleaned = arg; *cleaned += page_vma_mkclean_one(&pvmw); return true; } static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) return false; return true; } int folio_mkclean(struct folio *folio) { int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; BUG_ON(!folio_test_locked(folio)); if (!folio_mapped(folio)) return 0; mapping = folio_mapping(folio); if (!mapping) return 0; rmap_walk(folio, &rwc); return cleaned; } EXPORT_SYMBOL_GPL(folio_mkclean); /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) * within the @vma of shared mappings. And since clean PTEs * should also be readonly, write protects them too. * @pfn: start pfn. * @nr_pages: number of physically contiguous pages srarting with @pfn. * @pgoff: page offset that the @pfn mapped with. * @vma: vma that @pfn mapped within. * * Returns the number of cleaned PTEs (including PMDs). */ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .pfn = pfn, .nr_pages = nr_pages, .pgoff = pgoff, .vma = vma, .flags = PVMW_SYNC, }; if (invalid_mkclean_vma(vma, NULL)) return 0; pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); return page_vma_mkclean_one(&pvmw); } int folio_total_mapcount(struct folio *folio) { int mapcount = folio_entire_mapcount(folio); int nr_pages; int i; /* In the common case, avoid the loop when no pages mapped by PTE */ if (folio_nr_pages_mapped(folio) == 0) return mapcount; /* * Add all the PTE mappings of those pages mapped by PTE. * Limit the loop to folio_nr_pages_mapped()? * Perhaps: given all the raciness, that may be a good or a bad idea. */ nr_pages = folio_nr_pages(folio); for (i = 0; i < nr_pages; i++) mapcount += atomic_read(&folio_page(folio, i)->_mapcount); /* But each of those _mapcounts was based on -1 */ mapcount += nr_pages; return mapcount; } /** * page_move_anon_rmap - move a page to our anon_vma * @page: the page to move to our anon_vma * @vma: the vma the page belongs to * * When a page belongs exclusively to one process after a COW event, * that page can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling * processes. */ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; struct folio *folio = page_folio(page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); anon_vma += PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg folio_referenced()'s * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); SetPageAnonExclusive(page); } /** * __page_set_anon_rmap - set up new anonymous rmap * @folio: Folio which contains page. * @page: Page to add to rmap. * @vma: VM area to add page to. * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */ static void __page_set_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (folio_test_anon(folio)) goto out; /* * If the page isn't exclusively mapped into this vma, * we must use the _oldest_ possible anon_vma for the * page mapping! */ if (!exclusive) anon_vma = anon_vma->root; /* * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); out: if (exclusive) SetPageAnonExclusive(page); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition * @folio: The folio containing @page. * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address) { /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against page_add_anon_rmap because the caller * always holds the page locked. * * We have exclusion against page_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), page); } /** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * @flags: the rmap flags * * The caller needs to hold the pte lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that PageAnon is not being upgraded racily to PageKsm * (but PageKsm is never downgraded to PageAnon). */ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { struct folio *folio = page_folio(page); atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool compound = flags & RMAP_COMPOUND; bool first = true; /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { first = atomic_inc_and_test(&page->_mapcount); nr = first; if (first && folio_test_large(folio)) { nr = atomic_inc_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { /* Raced ahead of a remove of COMPOUND_MAPPED */ nr = 0; } } } VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); if (likely(!folio_test_ksm(folio))) { /* address might be in next vma when migration races vma_merge */ if (first) __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); else __page_check_anon_rmap(folio, page, vma, address); } mlock_vma_folio(folio, vma, compound); } /** * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * * Like page_add_anon_rmap() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio does not have to be locked. * * If the folio is large, it is accounted as a THP. As the folio * is new, it's assumed to be mapped exclusively by a single process. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { int nr; VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __folio_set_swapbacked(folio); if (likely(!folio_test_pmd_mappable(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); nr = 1; } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); nr = folio_nr_pages(folio); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } /** * folio_add_file_rmap_range - add pte mapping to page range of a folio * @folio: The folio to add the mapping to * @page: The first page to add * @nr_pages: The number of pages which will be mapped * @vma: the vm area in which the mapping is added * @compound: charge the page as compound or small page * * The page range of folio is defined by [first_page, first_page + nr_pages) * * The caller needs to hold the pte lock. */ void folio_add_file_rmap_range(struct folio *folio, struct page *page, unsigned int nr_pages, struct vm_area_struct *vma, bool compound) { atomic_t *mapped = &folio->_nr_pages_mapped; unsigned int nr_pmdmapped = 0, first; int nr = 0; VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { do { first = atomic_inc_and_test(&page->_mapcount); if (first && folio_test_large(folio)) { first = atomic_inc_return_relaxed(mapped); first = (first < COMPOUND_MAPPED); } if (first) nr++; } while (page++, --nr_pages > 0); } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { /* Raced ahead of a remove of COMPOUND_MAPPED */ nr = 0; } } } if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); mlock_vma_folio(folio, vma, compound); } /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @compound: charge the page as compound or small page * * The caller needs to hold the pte lock. */ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, bool compound) { struct folio *folio = page_folio(page); unsigned int nr_pages; VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); if (likely(!compound)) nr_pages = 1; else nr_pages = folio_nr_pages(folio); folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); } /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * @vma: the vm area from which the mapping is removed * @compound: uncharge the page as compound or small page * * The caller needs to hold the pte lock. */ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, bool compound) { struct folio *folio = page_folio(page); atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool last; enum node_stat_item idx; VM_BUG_ON_PAGE(compound && !PageHead(page), page); /* Hugetlb pages are not counted in NR_*MAPPED */ if (unlikely(folio_test_hugetlb(folio))) { /* hugetlb pages are always mapped with pmds */ atomic_dec(&folio->_entire_mapcount); return; } /* Is page being unmapped by PTE? Is this its last map to be removed? */ if (likely(!compound)) { last = atomic_add_negative(-1, &page->_mapcount); nr = last; if (last && folio_test_large(folio)) { nr = atomic_dec_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { /* An add of COMPOUND_MAPPED raced ahead */ nr = 0; } } } if (nr_pmdmapped) { if (folio_test_anon(folio)) idx = NR_ANON_THPS; else if (folio_test_swapbacked(folio)) idx = NR_SHMEM_PMDMAPPED; else idx = NR_FILE_PMDMAPPED; __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); } if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; __lruvec_stat_mod_folio(folio, idx, -nr); /* * Queue anon THP for deferred split if at least one * page of the folio is unmapped and at least one page * is still mapped. */ if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) if (!compound || nr < nr_pmdmapped) deferred_split_folio(folio); } /* * It would be tidy to reset folio_test_anon mapping when fully * unmapped, but that might overwrite a racing page_add_anon_rmap * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. */ munlock_vma_folio(folio, vma, compound); } /* * @arg: enum ttu_flags will be passed to this argument */ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and page_remove_rmap(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; if (flags & TTU_SPLIT_HUGE_PMD) split_huge_pmd_address(vma, address, false, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the folio can not be freed in this function as call of * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); /* * If the folio is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma, false); page_vma_mapped_walk_done(&pvmw); ret = false; break; } pfn = pte_pfn(ptep_get(pvmw.pte)); subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * The try_to_unmap() is only passed a hugetlb page * in the case where the hugetlb page is poisoned. */ VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and fail * if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* * Now the pte is cleared. If this pte was uffd-wp armed, * we may want to replace a none pte with a marker pte if * it's file-backed, so we don't lose the tracking info. */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval); } else { dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(&folio->page)); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ if (unlikely(folio_test_swapbacked(folio) != folio_test_swapcache(folio))) { WARN_ON_ONCE(1); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* MADV_FREE page check */ if (!folio_test_swapbacked(folio)) { int ref_count, map_count; /* * Synchronize with gup_pte_range(): * - clear PTE; barrier; read refcount * - inc refcount; barrier; read PTE */ smp_mb(); ref_count = folio_ref_count(folio); map_count = folio_mapcount(folio); /* * Order reads for page refcount and dirty flag * (see comments in __remove_mapping()). */ smp_rmb(); /* * The only page refs must be one from isolation * plus the rmap(s) (dropped by discard:). */ if (ref_count == 1 + map_count && !folio_test_dirty(folio)) { dec_mm_counter(mm, MM_ANONPAGES); goto discard; } /* * If the folio was redirtied, it cannot be * discarded. Remap the page to page table. */ set_pte_at(mm, address, pvmw.pte, pteval); folio_set_swapbacked(folio); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* * This is a locked file-backed folio, * so it cannot be removed from the page * cache and replaced by a new folio before * mmu_notifier_invalidate_range_end, so no * concurrent thread might update its page table * to point at a new folio while a device is * still using this folio. * * See Documentation/mm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return vma_is_temporary_stack(vma); } static int folio_not_mapped(struct folio *folio) { return !folio_mapped(folio); } /** * try_to_unmap - Try to remove all page table mappings to a folio. * @folio: The folio to unmap. * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * folio. It is the caller's responsibility to check if the folio is * still mapped if needed (use TTU_SYNC to prevent accounting races). * * Context: Caller must hold the folio lock. */ void try_to_unmap(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } /* * @arg: enum ttu_flags will be passed to this argument. * * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs * containing migration entries. */ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and page_remove_rmap(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * unmap_page() in mm/huge_memory.c is the only user of migration with * TTU_SPLIT_HUGE_PMD and it wants to freeze. */ if (flags & TTU_SPLIT_HUGE_PMD) split_huge_pmd_address(vma, address, true, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { subpage = folio_page(folio, pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); if (set_pmd_migration_entry(&pvmw, subpage)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } continue; } #endif /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); pfn = pte_pfn(ptep_get(pvmw.pte)); if (folio_is_zone_device(folio)) { /* * Our PTE is a non-present device exclusive entry and * calculating the subpage as for the common case would * result in an invalid pointer. * * Since only PAGE_SIZE pages can currently be * migrated, just set it to page. This will need to be * changed when hugepage migrations to device private * memory are supported. */ VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); subpage = &folio->page; } else { subpage = folio_page(folio, pfn - folio_pfn(folio)); } address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and * fail if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (folio_is_device_private(folio)) { unsigned long pfn = folio_pfn(folio); swp_entry_t entry; pte_t swp_pte; if (anon_exclusive) BUG_ON(page_try_share_anon_rmap(subpage)); /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = pte_to_swp_entry(pteval); if (is_writable_device_private_entry(entry)) entry = make_writable_migration_entry(pfn); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry(pfn); else entry = make_readable_migration_entry(pfn); swp_pte = swp_entry_to_pte(entry); /* * pteval maps a zone device page and is therefore * a swap pte. */ if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); trace_set_migration_pte(pvmw.address, pte_val(swp_pte), compound_order(&folio->page)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval); } else { dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(&folio->page)); } else { swp_entry_t entry; pte_t swp_pte; if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, pteval); else set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(subpage)) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, pteval); else set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry( page_to_pfn(subpage)); else entry = make_readable_migration_entry( page_to_pfn(subpage)); if (pte_young(pteval)) entry = make_migration_entry_young(entry); if (pte_dirty(pteval)) entry = make_migration_entry_dirty(entry); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte); else set_pte_at(mm, address, pvmw.pte, swp_pte); trace_set_migration_pte(address, pte_val(swp_pte), compound_order(&folio->page)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * try_to_migrate - try to replace all page table mappings with swap entries * @folio: the folio to replace page table entries for * @flags: action and flags * * Tries to remove all the page table entries which are mapping this folio and * replace them with special swap entries. Caller must hold the folio lock. */ void try_to_migrate(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_migrate_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; /* * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC | TTU_BATCH_FLUSH))) return; if (folio_is_zone_device(folio) && (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) return; /* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ if (!folio_test_ksm(folio) && folio_test_anon(folio)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE struct make_exclusive_args { struct mm_struct *mm; unsigned long address; void *owner; bool valid; }; static bool page_make_device_exclusive_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *priv) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); struct make_exclusive_args *args = priv; pte_t pteval; struct page *subpage; bool ret = true; struct mmu_notifier_range range; swp_entry_t entry; pte_t swp_pte; pte_t ptent; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, address + folio_size(folio)), args->owner); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); ptent = ptep_get(pvmw.pte); if (!pte_present(ptent)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } subpage = folio_page(folio, pte_pfn(ptent) - folio_pfn(folio)); address = pvmw.address; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(ptent)); pteval = ptep_clear_flush(vma, address, pvmw.pte); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* * Check that our target page is still mapped at the expected * address. */ if (args->mm == mm && args->address == address && pte_write(pteval)) args->valid = true; /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_device_exclusive_entry( page_to_pfn(subpage)); else entry = make_readable_device_exclusive_entry( page_to_pfn(subpage)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); /* * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ page_remove_rmap(subpage, vma, false); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * folio_make_device_exclusive - Mark the folio exclusively owned by a device. * @folio: The folio to replace page table entries for. * @mm: The mm_struct where the folio is expected to be mapped. * @address: Address where the folio is expected to be mapped. * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks * * Tries to remove all the page table entries which are mapping this * folio and replace them with special device exclusive swap entries to * grant a device exclusive access to the folio. * * Context: Caller must hold the folio lock. * Return: false if the page is still mapped, or if it could not be unmapped * from the expected address. Otherwise returns true (success). */ static bool folio_make_device_exclusive(struct folio *folio, struct mm_struct *mm, unsigned long address, void *owner) { struct make_exclusive_args args = { .mm = mm, .address = address, .owner = owner, .valid = false, }; struct rmap_walk_control rwc = { .rmap_one = page_make_device_exclusive_one, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, .arg = &args, }; /* * Restrict to anonymous folios for now to avoid potential writeback * issues. */ if (!folio_test_anon(folio)) return false; rmap_walk(folio, &rwc); return args.valid && !folio_mapcount(folio); } /** * make_device_exclusive_range() - Mark a range for exclusive use by a device * @mm: mm_struct of associated target process * @start: start of the region to mark for exclusive device access * @end: end address of region * @pages: returns the pages which were successfully marked for exclusive access * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering * * Returns: number of pages found in the range by GUP. A page is marked for * exclusive access only if the page pointer is non-NULL. * * This function finds ptes mapping page(s) to the given address range, locks * them and replaces mappings with special swap entries preventing userspace CPU * access. On fault these entries are replaced with the original mapping after * calling MMU notifiers. * * A driver using this to program access from a device must use a mmu notifier * critical section to hold a device specific lock during programming. Once * programming is complete it should drop the page lock and reference after * which point CPU access to the page will revoke the exclusive access. */ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct page **pages, void *owner) { long npages = (end - start) >> PAGE_SHIFT; long i; npages = get_user_pages_remote(mm, start, npages, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, pages, NULL); if (npages < 0) return npages; for (i = 0; i < npages; i++, start += PAGE_SIZE) { struct folio *folio = page_folio(pages[i]); if (PageTail(pages[i]) || !folio_trylock(folio)) { folio_put(folio); pages[i] = NULL; continue; } if (!folio_make_device_exclusive(folio, mm, start, owner)) { folio_unlock(folio); folio_put(folio); pages[i] = NULL; } } return npages; } EXPORT_SYMBOL_GPL(make_device_exclusive_range); #endif void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); } static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; if (rwc->anon_lock) return rwc->anon_lock(folio, rwc); /* * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = folio_anon_vma(folio); if (!anon_vma) return NULL; if (anon_vma_trylock_read(anon_vma)) goto out; if (rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } anon_vma_lock_read(anon_vma); out: return anon_vma; } /* * rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma * chains contained in the anon_vma struct it points to. */ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; if (locked) { anon_vma = folio_anon_vma(folio); /* anon_vma disappear under us? */ VM_BUG_ON_FOLIO(!anon_vma, folio); } else { anon_vma = rmap_walk_anon_lock(folio, rwc); } if (!anon_vma) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) break; if (rwc->done && rwc->done(folio)) break; } if (!locked) anon_vma_unlock_read(anon_vma); } /* * rmap_walk_file - do something to file page using the object-based rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct address_space *mapping = folio_mapping(folio); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; /* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_rwsem. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!mapping) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; if (rwc->try_lock) { rwc->contended = true; return; } i_mmap_lock_read(mapping); } lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) goto done; if (rwc->done && rwc->done(folio)) goto done; } done: if (!locked) i_mmap_unlock_read(mapping); } void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) rmap_walk_ksm(folio, rwc); else if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, false); else rmap_walk_file(folio, rwc, false); } /* Like rmap_walk, but caller holds relevant rmap lock */ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, true); else rmap_walk_file(folio, rwc, true); } #ifdef CONFIG_HUGETLB_PAGE /* * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. * * RMAP_COMPOUND is ignored. */ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { struct folio *folio = page_folio(page); struct anon_vma *anon_vma = vma->anon_vma; int first; BUG_ON(!folio_test_locked(folio)); BUG_ON(!anon_vma); /* address might be in next vma when migration races vma_merge */ first = atomic_inc_and_test(&folio->_entire_mapcount); VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (first) __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); } void hugepage_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } #endif /* CONFIG_HUGETLB_PAGE */
linux-master
mm/rmap.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/hugetlb.h> /* * We want to know the real level where a entry is located ignoring any * folding of levels which may be happening. For example if p4d is folded then * a missing entry found at level 1 (p4d) is actually at level 0 (pgd). */ static int real_depth(int depth) { if (depth == 3 && PTRS_PER_PMD == 1) depth = 2; if (depth == 2 && PTRS_PER_PUD == 1) depth = 1; if (depth == 1 && PTRS_PER_P4D == 1) depth = 0; return depth; } static int walk_pte_range_inner(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { const struct mm_walk_ops *ops = walk->ops; int err = 0; for (;;) { err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; if (addr >= end - PAGE_SIZE) break; addr += PAGE_SIZE; pte++; } return err; } static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { pte_t *pte; int err = 0; spinlock_t *ptl; if (walk->no_vma) { /* * pte_offset_map() might apply user-specific validation. * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear), * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them. */ if (walk->mm == &init_mm || addr >= TASK_SIZE) pte = pte_offset_kernel(pmd, addr); else pte = pte_offset_map(pmd, addr); if (pte) { err = walk_pte_range_inner(pte, addr, end, walk); if (walk->mm != &init_mm && addr < TASK_SIZE) pte_unmap(pte); } } else { pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (pte) { err = walk_pte_range_inner(pte, addr, end, walk); pte_unmap_unlock(pte, ptl); } } if (!pte) walk->action = ACTION_AGAIN; return err; } #ifdef CONFIG_ARCH_HAS_HUGEPD static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, unsigned long end, struct mm_walk *walk, int pdshift) { int err = 0; const struct mm_walk_ops *ops = walk->ops; int shift = hugepd_shift(*phpd); int page_size = 1 << shift; if (!ops->pte_entry) return 0; if (addr & (page_size - 1)) return 0; for (;;) { pte_t *pte; spin_lock(&walk->mm->page_table_lock); pte = hugepte_offset(*phpd, addr, pdshift); err = ops->pte_entry(pte, addr, addr + page_size, walk); spin_unlock(&walk->mm->page_table_lock); if (err) break; if (addr >= end - page_size) break; addr += page_size; } return err; } #else static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, unsigned long end, struct mm_walk *walk, int pdshift) { return 0; } #endif static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; const struct mm_walk_ops *ops = walk->ops; int err = 0; int depth = real_depth(3); pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } walk->action = ACTION_SUBTREE; /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ if (ops->pmd_entry) err = ops->pmd_entry(pmd, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; /* * Check this here so we only break down trans_huge * pages when we _need_ to */ if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || walk->action == ACTION_CONTINUE || !(ops->pte_entry)) continue; if (walk->vma) split_huge_pmd(walk->vma, pmd, addr); if (is_hugepd(__hugepd(pmd_val(*pmd)))) err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); else err = walk_pte_range(pmd, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; } while (pmd++, addr = next, addr != end); return err; } static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, struct mm_walk *walk) { pud_t *pud; unsigned long next; const struct mm_walk_ops *ops = walk->ops; int err = 0; int depth = real_depth(2); pud = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); if (pud_none(*pud)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } walk->action = ACTION_SUBTREE; if (ops->pud_entry) err = ops->pud_entry(pud, addr, next, walk); if (err) break; if (walk->action == ACTION_AGAIN) goto again; if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || walk->action == ACTION_CONTINUE || !(ops->pmd_entry || ops->pte_entry)) continue; if (walk->vma) split_huge_pud(walk->vma, pud, addr); if (pud_none(*pud)) goto again; if (is_hugepd(__hugepd(pud_val(*pud)))) err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT); else err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); return err; } static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) { p4d_t *p4d; unsigned long next; const struct mm_walk_ops *ops = walk->ops; int err = 0; int depth = real_depth(1); p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } if (ops->p4d_entry) { err = ops->p4d_entry(p4d, addr, next, walk); if (err) break; } if (is_hugepd(__hugepd(p4d_val(*p4d)))) err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT); else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; } while (p4d++, addr = next, addr != end); return err; } static int walk_pgd_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { pgd_t *pgd; unsigned long next; const struct mm_walk_ops *ops = walk->ops; int err = 0; if (walk->pgd) pgd = walk->pgd + pgd_index(addr); else pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { if (ops->pte_hole) err = ops->pte_hole(addr, next, 0, walk); if (err) break; continue; } if (ops->pgd_entry) { err = ops->pgd_entry(pgd, addr, next, walk); if (err) break; } if (is_hugepd(__hugepd(pgd_val(*pgd)))) err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT); else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; } while (pgd++, addr = next, addr != end); return err; } #ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) { unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); return boundary < end ? boundary : end; } static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct hstate *h = hstate_vma(vma); unsigned long next; unsigned long hmask = huge_page_mask(h); unsigned long sz = huge_page_size(h); pte_t *pte; const struct mm_walk_ops *ops = walk->ops; int err = 0; hugetlb_vma_lock_read(vma); do { next = hugetlb_entry_end(h, addr, end); pte = hugetlb_walk(vma, addr & hmask, sz); if (pte) err = ops->hugetlb_entry(pte, hmask, addr, next, walk); else if (ops->pte_hole) err = ops->pte_hole(addr, next, -1, walk); if (err) break; } while (addr = next, addr != end); hugetlb_vma_unlock_read(vma); return err; } #else /* CONFIG_HUGETLB_PAGE */ static int walk_hugetlb_range(unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; } #endif /* CONFIG_HUGETLB_PAGE */ /* * Decide whether we really walk over the current vma on [@start, @end) * or skip it via the returned value. Return 0 if we do walk over the * current vma, and return 1 if we skip the vma. Negative values means * error, where we abort the current walk. */ static int walk_page_test(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; const struct mm_walk_ops *ops = walk->ops; if (ops->test_walk) return ops->test_walk(start, end, walk); /* * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP * range, so we don't walk over it as we do for normal vmas. However, * Some callers are interested in handling hole range and they don't * want to just ignore any single address range. Such users certainly * define their ->pte_hole() callbacks, so let's delegate them to handle * vma(VM_PFNMAP). */ if (vma->vm_flags & VM_PFNMAP) { int err = 1; if (ops->pte_hole) err = ops->pte_hole(start, end, -1, walk); return err ? err : 1; } return 0; } static int __walk_page_range(unsigned long start, unsigned long end, struct mm_walk *walk) { int err = 0; struct vm_area_struct *vma = walk->vma; const struct mm_walk_ops *ops = walk->ops; if (ops->pre_vma) { err = ops->pre_vma(start, end, walk); if (err) return err; } if (is_vm_hugetlb_page(vma)) { if (ops->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); if (ops->post_vma) ops->post_vma(walk); return err; } static inline void process_mm_walk_lock(struct mm_struct *mm, enum page_walk_lock walk_lock) { if (walk_lock == PGWALK_RDLOCK) mmap_assert_locked(mm); else mmap_assert_write_locked(mm); } static inline void process_vma_walk_lock(struct vm_area_struct *vma, enum page_walk_lock walk_lock) { #ifdef CONFIG_PER_VMA_LOCK switch (walk_lock) { case PGWALK_WRLOCK: vma_start_write(vma); break; case PGWALK_WRLOCK_VERIFY: vma_assert_write_locked(vma); break; case PGWALK_RDLOCK: /* PGWALK_RDLOCK is handled by process_mm_walk_lock */ break; } #endif } /** * walk_page_range - walk page table with caller specific callbacks * @mm: mm_struct representing the target process of page table walk * @start: start address of the virtual address range * @end: end address of the virtual address range * @ops: operation to call during the walk * @private: private data for callbacks' usage * * Recursively walk the page table tree of the process represented by @mm * within the virtual address range [@start, @end). During walking, we can do * some caller-specific works for each entry, by setting up pmd_entry(), * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these * callbacks, the associated entries/pages are just ignored. * The return values of these callbacks are commonly defined like below: * * - 0 : succeeded to handle the current entry, and if you don't reach the * end address yet, continue to walk. * - >0 : succeeded to handle the current entry, and return to the caller * with caller specific value. * - <0 : failed to handle the current entry, and return to the caller * with error code. * * Before starting to walk page table, some callers want to check whether * they really want to walk over the current vma, typically by checking * its vm_flags. walk_page_test() and @ops->test_walk() are used for this * purpose. * * If operations need to be staged before and committed after a vma is walked, * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), * since it is intended to handle commit-type operations, can't return any * errors. * * struct mm_walk keeps current values of some common data like vma and pmd, * which are useful for the access from callbacks. If you want to pass some * caller-specific data to callbacks, @private should be helpful. * * Locking: * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock, * because these function traverse vma list and/or access to vma's data. */ int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private) { int err = 0; unsigned long next; struct vm_area_struct *vma; struct mm_walk walk = { .ops = ops, .mm = mm, .private = private, }; if (start >= end) return -EINVAL; if (!walk.mm) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); vma = find_vma(walk.mm, start); do { if (!vma) { /* after the last vma */ walk.vma = NULL; next = end; if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else if (start < vma->vm_start) { /* outside vma */ walk.vma = NULL; next = min(end, vma->vm_start); if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ process_vma_walk_lock(vma, ops->walk_lock); walk.vma = vma; next = min(end, vma->vm_end); vma = find_vma(mm, vma->vm_end); err = walk_page_test(start, next, &walk); if (err > 0) { /* * positive return values are purely for * controlling the pagewalk, so should never * be passed to the callers. */ err = 0; continue; } if (err < 0) break; err = __walk_page_range(start, next, &walk); } if (err) break; } while (start = next, start < end); return err; } /** * walk_page_range_novma - walk a range of pagetables not backed by a vma * @mm: mm_struct representing the target process of page table walk * @start: start address of the virtual address range * @end: end address of the virtual address range * @ops: operation to call during the walk * @pgd: pgd to walk if different from mm->pgd * @private: private data for callbacks' usage * * Similar to walk_page_range() but can walk any page tables even if they are * not backed by VMAs. Because 'unusual' entries may be walked this function * will also not lock the PTEs for the pte_entry() callback. This is useful for * walking the kernel pages tables or page tables for firmware. */ int walk_page_range_novma(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, pgd_t *pgd, void *private) { struct mm_walk walk = { .ops = ops, .mm = mm, .pgd = pgd, .private = private, .no_vma = true }; if (start >= end || !walk.mm) return -EINVAL; mmap_assert_write_locked(walk.mm); return walk_pgd_range(start, end, &walk); } int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .mm = vma->vm_mm, .vma = vma, .private = private, }; if (start >= end || !walk.mm) return -EINVAL; if (start < vma->vm_start || end > vma->vm_end) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(start, end, &walk); } int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .mm = vma->vm_mm, .vma = vma, .private = private, }; if (!walk.mm) return -EINVAL; process_mm_walk_lock(walk.mm, ops->walk_lock); process_vma_walk_lock(vma, ops->walk_lock); return __walk_page_range(vma->vm_start, vma->vm_end, &walk); } /** * walk_page_mapping - walk all memory areas mapped into a struct address_space. * @mapping: Pointer to the struct address_space * @first_index: First page offset in the address_space * @nr: Number of incremental page offsets to cover * @ops: operation to call during the walk * @private: private data for callbacks' usage * * This function walks all memory areas mapped into a struct address_space. * The walk is limited to only the given page-size index range, but if * the index boundaries cross a huge page-table entry, that entry will be * included. * * Also see walk_page_range() for additional information. * * Locking: * This function can't require that the struct mm_struct::mmap_lock is held, * since @mapping may be mapped by multiple processes. Instead * @mapping->i_mmap_rwsem must be held. This might have implications in the * callbacks, and it's up tho the caller to ensure that the * struct mm_struct::mmap_lock is not needed. * * Also this means that a caller can't rely on the struct * vm_area_struct::vm_flags to be constant across a call, * except for immutable flags. Callers requiring this shouldn't use * this function. * * Return: 0 on success, negative error code on failure, positive number on * caller defined premature termination. */ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, const struct mm_walk_ops *ops, void *private) { struct mm_walk walk = { .ops = ops, .private = private, }; struct vm_area_struct *vma; pgoff_t vba, vea, cba, cea; unsigned long start_addr, end_addr; int err = 0; lockdep_assert_held(&mapping->i_mmap_rwsem); vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, first_index + nr - 1) { /* Clip to the vma */ vba = vma->vm_pgoff; vea = vba + vma_pages(vma); cba = first_index; cba = max(cba, vba); cea = first_index + nr; cea = min(cea, vea); start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; if (start_addr >= end_addr) continue; walk.vma = vma; walk.mm = vma->vm_mm; err = walk_page_test(vma->vm_start, vma->vm_end, &walk); if (err > 0) { err = 0; break; } else if (err < 0) break; err = __walk_page_range(start_addr, end_addr, &walk); if (err) break; } return err; }
linux-master
mm/pagewalk.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic hugetlb support. * (C) Nadia Yvette Chambers, April 2004 */ #include <linux/list.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/mmu_notifier.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/compiler.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/memblock.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/sched/mm.h> #include <linux/mmdebug.h> #include <linux/sched/signal.h> #include <linux/rmap.h> #include <linux/string_helpers.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/jhash.h> #include <linux/numa.h> #include <linux/llist.h> #include <linux/cma.h> #include <linux/migrate.h> #include <linux/nospec.h> #include <linux/delayacct.h> #include <linux/memory.h> #include <linux/mm_inline.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <linux/io.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/node.h> #include <linux/page_owner.h> #include "internal.h" #include "hugetlb_vmemmap.h" int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; #ifdef CONFIG_CMA static struct cma *hugetlb_cma[MAX_NUMNODES]; static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) { return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 1 << order); } #else static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) { return false; } #endif static unsigned long hugetlb_cma_size __initdata; __initdata LIST_HEAD(huge_boot_pages); /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static bool __initdata parsed_valid_hugepagesz = true; static bool __initdata parsed_default_hugepagesz; static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, * free_huge_pages, and surplus_huge_pages. */ DEFINE_SPINLOCK(hugetlb_lock); /* * Serializes faults on the same logical page. This is used to * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); static void hugetlb_vma_lock_free(struct vm_area_struct *vma); static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end); static inline bool subpool_is_free(struct hugepage_subpool *spool) { if (spool->count) return false; if (spool->max_hpages != -1) return spool->used_hpages == 0; if (spool->min_hpages != -1) return spool->rsv_hpages == spool->min_hpages; return true; } static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, unsigned long irq_flags) { spin_unlock_irqrestore(&spool->lock, irq_flags); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations based on minimum size and * free the subpool */ if (subpool_is_free(spool)) { if (spool->min_hpages != -1) hugetlb_acct_memory(spool->hstate, -spool->min_hpages); kfree(spool); } } struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages) { struct hugepage_subpool *spool; spool = kzalloc(sizeof(*spool), GFP_KERNEL); if (!spool) return NULL; spin_lock_init(&spool->lock); spool->count = 1; spool->max_hpages = max_hpages; spool->hstate = h; spool->min_hpages = min_hpages; if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { kfree(spool); return NULL; } spool->rsv_hpages = min_hpages; return spool; } void hugepage_put_subpool(struct hugepage_subpool *spool) { unsigned long flags; spin_lock_irqsave(&spool->lock, flags); BUG_ON(!spool->count); spool->count--; unlock_or_release_subpool(spool, flags); } /* * Subpool accounting for allocating and reserving pages. * Return -ENOMEM if there are not enough resources to satisfy the * request. Otherwise, return the number of pages by which the * global pools must be adjusted (upward). The returned value may * only be different than the passed value (delta) in the case where * a subpool minimum size must be maintained. */ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return ret; spin_lock_irq(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) spool->used_hpages += delta; else { ret = -ENOMEM; goto unlock_ret; } } /* minimum size accounting */ if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on * behalf of subpool. Return difference. */ ret = delta - spool->rsv_hpages; spool->rsv_hpages = 0; } else { ret = 0; /* reserves already accounted for */ spool->rsv_hpages -= delta; } } unlock_ret: spin_unlock_irq(&spool->lock); return ret; } /* * Subpool accounting for freeing and unreserving pages. * Return the number of global page reservations that must be dropped. * The return value may only be different than the passed value (delta) * in the case where a subpool minimum size must be maintained. */ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; unsigned long flags; if (!spool) return delta; spin_lock_irqsave(&spool->lock, flags); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; /* minimum size accounting */ if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else ret = spool->rsv_hpages + delta - spool->min_hpages; spool->rsv_hpages += delta; if (spool->rsv_hpages > spool->min_hpages) spool->rsv_hpages = spool->min_hpages; } /* * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ unlock_or_release_subpool(spool, flags); return ret; } static inline struct hugepage_subpool *subpool_inode(struct inode *inode) { return HUGETLBFS_SB(inode->i_sb)->spool; } static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) { return subpool_inode(file_inode(vma->vm_file)); } /* * hugetlb vma_lock helper routines */ void hugetlb_vma_lock_read(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_read(&vma_lock->rw_sema); } } void hugetlb_vma_unlock_read(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; up_read(&vma_lock->rw_sema); } } void hugetlb_vma_lock_write(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_write(&vma_lock->rw_sema); } } void hugetlb_vma_unlock_write(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; up_write(&vma_lock->rw_sema); } } int hugetlb_vma_trylock_write(struct vm_area_struct *vma) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; if (!__vma_shareable_lock(vma)) return 1; return down_write_trylock(&vma_lock->rw_sema); } void hugetlb_vma_assert_locked(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; lockdep_assert_held(&vma_lock->rw_sema); } } void hugetlb_vma_lock_release(struct kref *kref) { struct hugetlb_vma_lock *vma_lock = container_of(kref, struct hugetlb_vma_lock, refs); kfree(vma_lock); } static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) { struct vm_area_struct *vma = vma_lock->vma; /* * vma_lock structure may or not be released as a result of put, * it certainly will no longer be attached to vma so clear pointer. * Semaphore synchronizes access to vma_lock->vma field. */ vma_lock->vma = NULL; vma->vm_private_data = NULL; up_write(&vma_lock->rw_sema); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); } static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) { if (__vma_shareable_lock(vma)) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; __hugetlb_vma_unlock_write_put(vma_lock); } } static void hugetlb_vma_lock_free(struct vm_area_struct *vma) { /* * Only present in sharable vmas. */ if (!vma || !__vma_shareable_lock(vma)) return; if (vma->vm_private_data) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; down_write(&vma_lock->rw_sema); __hugetlb_vma_unlock_write_put(vma_lock); } } static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) { struct hugetlb_vma_lock *vma_lock; /* Only establish in (flags) sharable vmas */ if (!vma || !(vma->vm_flags & VM_MAYSHARE)) return; /* Should never get here with non-NULL vm_private_data */ if (vma->vm_private_data) return; vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); if (!vma_lock) { /* * If we can not allocate structure, then vma can not * participate in pmd sharing. This is only a possible * performance enhancement and memory saving issue. * However, the lock is also used to synchronize page * faults with truncation. If the lock is not present, * unlikely races could leave pages in a file past i_size * until the file is removed. Warn in the unlikely case of * allocation failure. */ pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); return; } kref_init(&vma_lock->refs); init_rwsem(&vma_lock->rw_sema); vma_lock->vma = vma; vma->vm_private_data = vma_lock; } /* Helper that removes a struct file_region from the resv_map cache and returns * it for use. */ static struct file_region * get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) { struct file_region *nrg; VM_BUG_ON(resv->region_cache_count <= 0); resv->region_cache_count--; nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); nrg->from = from; nrg->to = to; return nrg; } static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, struct file_region *rg) { #ifdef CONFIG_CGROUP_HUGETLB nrg->reservation_counter = rg->reservation_counter; nrg->css = rg->css; if (rg->css) css_get(rg->css); #endif } /* Helper that records hugetlb_cgroup uncharge info. */ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) { #ifdef CONFIG_CGROUP_HUGETLB if (h_cg) { nrg->reservation_counter = &h_cg->rsvd_hugepage[hstate_index(h)]; nrg->css = &h_cg->css; /* * The caller will hold exactly one h_cg->css reference for the * whole contiguous reservation region. But this area might be * scattered when there are already some file_regions reside in * it. As a result, many file_regions may share only one css * reference. In order to ensure that one file_region must hold * exactly one h_cg->css reference, we should do css_get for * each file_region and leave the reference held by caller * untouched. */ css_get(&h_cg->css); if (!resv->pages_per_hpage) resv->pages_per_hpage = pages_per_huge_page(h); /* pages_per_hpage should be the same for all entries in * a resv_map. */ VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); } else { nrg->reservation_counter = NULL; nrg->css = NULL; } #endif } static void put_uncharge_info(struct file_region *rg) { #ifdef CONFIG_CGROUP_HUGETLB if (rg->css) css_put(rg->css); #endif } static bool has_same_uncharge_info(struct file_region *rg, struct file_region *org) { #ifdef CONFIG_CGROUP_HUGETLB return rg->reservation_counter == org->reservation_counter && rg->css == org->css; #else return true; #endif } static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) { struct file_region *nrg, *prg; prg = list_prev_entry(rg, link); if (&prg->link != &resv->regions && prg->to == rg->from && has_same_uncharge_info(prg, rg)) { prg->to = rg->to; list_del(&rg->link); put_uncharge_info(rg); kfree(rg); rg = prg; } nrg = list_next_entry(rg, link); if (&nrg->link != &resv->regions && nrg->from == rg->to && has_same_uncharge_info(nrg, rg)) { nrg->from = rg->from; list_del(&rg->link); put_uncharge_info(rg); kfree(rg); } } static inline long hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, long to, struct hstate *h, struct hugetlb_cgroup *cg, long *regions_needed) { struct file_region *nrg; if (!regions_needed) { nrg = get_file_region_entry_from_cache(map, from, to); record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); list_add(&nrg->link, rg); coalesce_file_region(map, nrg); } else *regions_needed += 1; return to - from; } /* * Must be called with resv->lock held. * * Calling this with regions_needed != NULL will count the number of pages * to be added but will not modify the linked list. And regions_needed will * indicate the number of file_regions needed in the cache to carry out to add * the regions for this range. */ static long add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) { long add = 0; struct list_head *head = &resv->regions; long last_accounted_offset = f; struct file_region *iter, *trg = NULL; struct list_head *rg = NULL; if (regions_needed) *regions_needed = 0; /* In this loop, we essentially handle an entry for the range * [last_accounted_offset, iter->from), at every iteration, with some * bounds checking. */ list_for_each_entry_safe(iter, trg, head, link) { /* Skip irrelevant regions that start before our range. */ if (iter->from < f) { /* If this region ends after the last accounted offset, * then we need to update last_accounted_offset. */ if (iter->to > last_accounted_offset) last_accounted_offset = iter->to; continue; } /* When we find a region that starts beyond our range, we've * finished. */ if (iter->from >= t) { rg = iter->link.prev; break; } /* Add an entry for last_accounted_offset -> iter->from, and * update last_accounted_offset. */ if (iter->from > last_accounted_offset) add += hugetlb_resv_map_add(resv, iter->link.prev, last_accounted_offset, iter->from, h, h_cg, regions_needed); last_accounted_offset = iter->to; } /* Handle the case where our range extends beyond * last_accounted_offset. */ if (!rg) rg = head->prev; if (last_accounted_offset < t) add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, t, h, h_cg, regions_needed); return add; } /* Must be called with resv->lock acquired. Will drop lock to allocate entries. */ static int allocate_file_region_entries(struct resv_map *resv, int regions_needed) __must_hold(&resv->lock) { LIST_HEAD(allocated_regions); int to_allocate = 0, i = 0; struct file_region *trg = NULL, *rg = NULL; VM_BUG_ON(regions_needed < 0); /* * Check for sufficient descriptors in the cache to accommodate * the number of in progress add operations plus regions_needed. * * This is a while loop because when we drop the lock, some other call * to region_add or region_del may have consumed some region_entries, * so we keep looping here until we finally have enough entries for * (adds_in_progress + regions_needed). */ while (resv->region_cache_count < (resv->adds_in_progress + regions_needed)) { to_allocate = resv->adds_in_progress + regions_needed - resv->region_cache_count; /* At this point, we should have enough entries in the cache * for all the existing adds_in_progress. We should only be * needing to allocate for regions_needed. */ VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); spin_unlock(&resv->lock); for (i = 0; i < to_allocate; i++) { trg = kmalloc(sizeof(*trg), GFP_KERNEL); if (!trg) goto out_of_memory; list_add(&trg->link, &allocated_regions); } spin_lock(&resv->lock); list_splice(&allocated_regions, &resv->region_cache); resv->region_cache_count += to_allocate; } return 0; out_of_memory: list_for_each_entry_safe(rg, trg, &allocated_regions, link) { list_del(&rg->link); kfree(rg); } return -ENOMEM; } /* * Add the huge page range represented by [f, t) to the reserve * map. Regions will be taken from the cache to fill in this range. * Sufficient regions should exist in the cache due to the previous * call to region_chg with the same range, but in some cases the cache will not * have sufficient entries due to races with other code doing region_add or * region_del. The extra needed entries will be allocated. * * regions_needed is the out value provided by a previous call to region_chg. * * Return the number of new huge pages added to the map. This number is greater * than or equal to zero. If file_region entries needed to be allocated for * this operation and we were not able to allocate, it returns -ENOMEM. * region_add of regions of length 1 never allocate file_regions and cannot * fail; region_chg will always allocate at least 1 entry and a region_add for * 1 page will only require at most 1 entry. */ static long region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) { long add = 0, actual_regions_needed = 0; spin_lock(&resv->lock); retry: /* Count how many regions are actually needed to execute this add. */ add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed); /* * Check for sufficient descriptors in the cache to accommodate * this add operation. Note that actual_regions_needed may be greater * than in_regions_needed, as the resv_map may have been modified since * the region_chg call. In this case, we need to make sure that we * allocate extra entries, such that we have enough for all the * existing adds_in_progress, plus the excess needed for this * operation. */ if (actual_regions_needed > in_regions_needed && resv->region_cache_count < resv->adds_in_progress + (actual_regions_needed - in_regions_needed)) { /* region_add operation of range 1 should never need to * allocate file_region entries. */ VM_BUG_ON(t - f <= 1); if (allocate_file_region_entries( resv, actual_regions_needed - in_regions_needed)) { return -ENOMEM; } goto retry; } add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); resv->adds_in_progress -= in_regions_needed; spin_unlock(&resv->lock); return add; } /* * Examine the existing reserve map and determine how many * huge pages in the specified range [f, t) are NOT currently * represented. This routine is called before a subsequent * call to region_add that will actually modify the reserve * map to add the specified range [f, t). region_chg does * not change the number of huge pages represented by the * map. A number of new file_region structures is added to the cache as a * placeholder, for the subsequent region_add call to use. At least 1 * file_region structure is added. * * out_regions_needed is the number of regions added to the * resv->adds_in_progress. This value needs to be provided to a follow up call * to region_add or region_abort for proper accounting. * * Returns the number of huge pages that need to be added to the existing * reservation map for the range [f, t). This number is greater or equal to * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) { long chg = 0; spin_lock(&resv->lock); /* Count how many hugepages in this range are NOT represented. */ chg = add_reservation_in_range(resv, f, t, NULL, NULL, out_regions_needed); if (*out_regions_needed == 0) *out_regions_needed = 1; if (allocate_file_region_entries(resv, *out_regions_needed)) return -ENOMEM; resv->adds_in_progress += *out_regions_needed; spin_unlock(&resv->lock); return chg; } /* * Abort the in progress add operation. The adds_in_progress field * of the resv_map keeps track of the operations in progress between * calls to region_chg and region_add. Operations are sometimes * aborted after the call to region_chg. In such cases, region_abort * is called to decrement the adds_in_progress counter. regions_needed * is the value returned by the region_chg call, it is used to decrement * the adds_in_progress counter. * * NOTE: The range arguments [f, t) are not needed or used in this * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */ static void region_abort(struct resv_map *resv, long f, long t, long regions_needed) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); resv->adds_in_progress -= regions_needed; spin_unlock(&resv->lock); } /* * Delete the specified range [f, t) from the reserve map. If the * t parameter is LONG_MAX, this indicates that ALL regions after f * should be deleted. Locate the regions which intersect [f, t) * and either trim, delete or split the existing regions. * * Returns the number of huge pages deleted from the reserve map. * In the normal case, the return value is zero or more. In the * case where a region must be split, a new region descriptor must * be allocated. If the allocation fails, -ENOMEM will be returned. * NOTE: If the parameter t == LONG_MAX, then we will never split * a region and possibly return -ENOMEM. Callers specifying * t == LONG_MAX do not need to check for -ENOMEM error. */ static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; struct file_region *nrg = NULL; long del = 0; retry: spin_lock(&resv->lock); list_for_each_entry_safe(rg, trg, head, link) { /* * Skip regions before the range to be deleted. file_region * ranges are normally of the form [from, to). However, there * may be a "placeholder" entry in the map which is of the form * (from, to) with from == to. Check for placeholder entries * at the beginning of the range to be deleted. */ if (rg->to <= f && (rg->to != rg->from || rg->to != f)) continue; if (rg->from >= t) break; if (f > rg->from && t < rg->to) { /* Must split region */ /* * Check for an entry in the cache before dropping * lock and attempting allocation. */ if (!nrg && resv->region_cache_count > resv->adds_in_progress) { nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); resv->region_cache_count--; } if (!nrg) { spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; } del += t - f; hugetlb_cgroup_uncharge_file_region( resv, rg, t - f, false); /* New entry for end of split region */ nrg->from = t; nrg->to = rg->to; copy_hugetlb_cgroup_uncharge_info(nrg, rg); INIT_LIST_HEAD(&nrg->link); /* Original entry is trimmed */ rg->to = f; list_add(&nrg->link, &rg->link); nrg = NULL; break; } if (f <= rg->from && t >= rg->to) { /* Remove entire region */ del += rg->to - rg->from; hugetlb_cgroup_uncharge_file_region(resv, rg, rg->to - rg->from, true); list_del(&rg->link); kfree(rg); continue; } if (f <= rg->from) { /* Trim beginning of region */ hugetlb_cgroup_uncharge_file_region(resv, rg, t - rg->from, false); del += t - rg->from; rg->from = t; } else { /* Trim end of region */ hugetlb_cgroup_uncharge_file_region(resv, rg, rg->to - f, false); del += rg->to - f; rg->to = f; } } spin_unlock(&resv->lock); kfree(nrg); return del; } /* * A rare out of memory error was encountered which prevented removal of * the reserve map region for a page. The huge page itself was free'ed * and removed from the page cache. This routine will adjust the subpool * usage count, and the global reserve count if needed. By incrementing * these counts, the reserve map entry which could not be deleted will * appear as a "reserved" entry instead of simply dangling with incorrect * counts. */ void hugetlb_fix_reserve_counts(struct inode *inode) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; bool reserved = false; rsv_adjust = hugepage_subpool_get_pages(spool, 1); if (rsv_adjust > 0) { struct hstate *h = hstate_inode(inode); if (!hugetlb_acct_memory(h, 1)) reserved = true; } else if (!rsv_adjust) { reserved = true; } if (!reserved) pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); } /* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). */ static long region_count(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg; long chg = 0; spin_lock(&resv->lock); /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { long seg_from; long seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } spin_unlock(&resv->lock); return chg; } /* * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); } pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) { return vma_hugecache_offset(hstate_vma(vma), vma, address); } EXPORT_SYMBOL_GPL(linear_hugepage_index); /** * vma_kernel_pagesize - Page size granularity for this VMA. * @vma: The user mapping. * * Folios in this VMA will be aligned to, and at least the size of the * number of bytes returned by this function. * * Return: The default size of the folios allocated when backing a VMA. */ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->pagesize) return vma->vm_ops->pagesize(vma); return PAGE_SIZE; } EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific 'strong' * version of this symbol is required. */ __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); } /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of hugetlb_dup_vma_private() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */ static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; } static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; } static void resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, struct hugetlb_cgroup *h_cg, struct hstate *h) { #ifdef CONFIG_CGROUP_HUGETLB if (!h_cg || !h) { resv_map->reservation_counter = NULL; resv_map->pages_per_hpage = 0; resv_map->css = NULL; } else { resv_map->reservation_counter = &h_cg->rsvd_hugepage[hstate_index(h)]; resv_map->pages_per_hpage = pages_per_huge_page(h); resv_map->css = &h_cg->css; } #endif } struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); kfree(rg); return NULL; } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); resv_map->adds_in_progress = 0; /* * Initialize these to 0. On shared mappings, 0's here indicate these * fields don't do cgroup accounting. On private mappings, these will be * re-initialized to the proper values, to indicate that hugetlb cgroup * reservations are to be un-charged from here. */ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache); resv_map->region_cache_count = 1; return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct list_head *head = &resv_map->region_cache; struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { list_del(&rg->link); kfree(rg); } VM_BUG_ON(resv_map->adds_in_progress); kfree(resv_map); } static inline struct resv_map *inode_resv_map(struct inode *inode) { /* * At inode evict time, i_mapping may not point to the original * address space within the inode. This original address space * contains the pointer to the resv_map. So, always use the * address space embedded within the inode. * The VERY common case is inode->mapping == &inode->i_data but, * this may not be true for device special inodes. */ return (struct resv_map *)(&inode->i_data)->private_data; } static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; return inode_resv_map(inode); } else { return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); } } static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); } static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } void hugetlb_dup_vma_private(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); /* * Clear vm_private_data * - For shared mappings this is a per-vma semaphore that may be * allocated in a subsequent call to hugetlb_vm_op_open. * Before clearing, make sure pointer is not associated with vma * as this will leak the structure. This is the case when called * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already * been called to allocate a new structure. * - For MAP_PRIVATE mappings, this is the reserve map which does * not apply to children. Faults generated by the children are * not guaranteed to succeed, even if read-only. */ if (vma->vm_flags & VM_MAYSHARE) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; if (vma_lock && vma_lock->vma != vma) vma->vm_private_data = NULL; } else vma->vm_private_data = NULL; } /* * Reset and decrement one ref on hugepage private reservation. * Called with mm->mmap_lock writer semaphore held. * This function should be only used by move_vma() and operate on * same sized vma. It should never come here with last ref on the * reservation. */ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) { /* * Clear the old hugetlb private page reservation. * It has already been transferred to new_vma. * * During a mremap() operation of a hugetlb vma we call move_vma() * which copies vma into new_vma and unmaps vma. After the copy * operation both new_vma and vma share a reference to the resv_map * struct, and at that point vma is about to be unmapped. We don't * want to return the reservation to the pool at unmap of vma because * the reservation still lives on in new_vma, so simply decrement the * ref here and remove the resv_map reference from this vma. */ struct resv_map *reservations = vma_resv_map(vma); if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { resv_map_put_hugetlb_cgroup_uncharge_info(reservations); kref_put(&reservations->refs, resv_map_release); } hugetlb_dup_vma_private(vma); } /* Returns true if the VMA has associated reserve pages */ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { /* * This address is already reserved by other process(chg == 0), * so, we should decrement reserved count. Without decrementing, * reserve count remains after releasing inode, because this * allocated page will go into page cache and is regarded as * coming from reserved pool in releasing step. Currently, we * don't have any other solution to deal with this situation * properly, so add work-around here. */ if (vma->vm_flags & VM_MAYSHARE && chg == 0) return true; else return false; } /* Shared mappings always use reserves */ if (vma->vm_flags & VM_MAYSHARE) { /* * We know VM_NORESERVE is not set. Therefore, there SHOULD * be a region map for all pages. The only situation where * there is no region map is if a hole was punched via * fallocate. In this case, there really are no reserves to * use. This situation is indicated if chg != 0. */ if (chg) return false; else return true; } /* * Only the process that called mmap() has reserves for * private mappings. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { /* * Like the shared case above, a hole punch or truncate * could have been performed on the private mapping. * Examine the value of chg to determine if reserves * actually exist or were previously consumed. * Very Subtle - The value of chg comes from a previous * call to vma_needs_reserves(). The reserve map for * private mappings has different (opposite) semantics * than that of shared mappings. vma_needs_reserves() * has already taken this difference in semantics into * account. Therefore, the meaning of chg is the same * as in the shared case above. Code could easily be * combined, but keeping it separate draws attention to * subtle differences. */ if (chg) return false; else return true; } return false; } static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) { int nid = folio_nid(folio); lockdep_assert_held(&hugetlb_lock); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); list_move(&folio->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; folio_set_hugetlb_freed(folio); } static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, int nid) { struct folio *folio; bool pin = !!(current->flags & PF_MEMALLOC_PIN); lockdep_assert_held(&hugetlb_lock); list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { if (pin && !folio_is_longterm_pinnable(folio)) continue; if (folio_test_hwpoison(folio)) continue; list_move(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); folio_clear_hugetlb_freed(folio); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return folio; } return NULL; } static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { unsigned int cpuset_mems_cookie; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; int node = NUMA_NO_NODE; zonelist = node_zonelist(nid, gfp_mask); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { struct folio *folio; if (!cpuset_zone_allowed(zone, gfp_mask)) continue; /* * no need to ask again on the same node. Pool is node rather than * zone aware */ if (zone_to_nid(zone) == node) continue; node = zone_to_nid(zone); folio = dequeue_hugetlb_folio_node_exact(h, node); if (folio) return folio; } if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return NULL; } static unsigned long available_huge_pages(struct hstate *h) { return h->free_huge_pages - h->resv_huge_pages; } static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) { struct folio *folio = NULL; struct mempolicy *mpol; gfp_t gfp_mask; nodemask_t *nodemask; int nid; /* * A child process with MAP_PRIVATE mappings created by their parent * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) goto err; /* If reserves cannot be used, ensure enough pages are in the pool */ if (avoid_reserve && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); if (mpol_is_preferred_many(mpol)) { folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); /* Fallback to all nodes if page==NULL */ nodemask = NULL; } if (!folio) folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } mpol_cond_put(mpol); return folio; err: return NULL; } /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different * nodes_allowed previously, so h->next_node_to_{alloc|free} might * be outside of *nodes_allowed. Ensure that we use an allowed * node for alloc or free. */ static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { nid = next_node_in(nid, *nodes_allowed); VM_BUG_ON(nid >= MAX_NUMNODES); return nid; } static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) { if (!node_isset(nid, *nodes_allowed)) nid = next_node_allowed(nid, nodes_allowed); return nid; } /* * returns the previously saved node ["this node"] from which to * allocate a persistent huge page for the pool and advance the * next node from which to allocate, handling wrap at end of node * mask. */ static int hstate_next_node_to_alloc(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); return nid; } /* * helper for remove_pool_huge_page() - return the previously saved * node ["this node"] from which to free a huge page. Advance the * next node id whether or not we find a free huge page to free so * that the next attempt to free addresses the next node. */ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); return nid; } #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ nr_nodes--) #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_free(hs, mask)) || 1); \ nr_nodes--) /* used to demote non-gigantic_huge pages as well */ static void __destroy_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) { int i; int nr_pages = 1 << order; struct page *p; atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); for (i = 1; i < nr_pages; i++) { p = folio_page(folio, i); p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE; p->mapping = NULL; clear_compound_head(p); if (!demote) set_page_refcounted(p); } __folio_clear_head(folio); } static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio, unsigned int order) { __destroy_compound_gigantic_folio(folio, order, true); } #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static void destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) { __destroy_compound_gigantic_folio(folio, order, false); } static void free_gigantic_folio(struct folio *folio, unsigned int order) { /* * If the page isn't allocated using the cma allocator, * cma_release() returns false. */ #ifdef CONFIG_CMA int nid = folio_nid(folio); if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order)) return; #endif free_contig_range(folio_pfn(folio), 1 << order); } #ifdef CONFIG_CONTIG_ALLOC static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { struct page *page; unsigned long nr_pages = pages_per_huge_page(h); if (nid == NUMA_NO_NODE) nid = numa_mem_id(); #ifdef CONFIG_CMA { int node; if (hugetlb_cma[nid]) { page = cma_alloc(hugetlb_cma[nid], nr_pages, huge_page_order(h), true); if (page) return page_folio(page); } if (!(gfp_mask & __GFP_THISNODE)) { for_each_node_mask(node, *nodemask) { if (node == nid || !hugetlb_cma[node]) continue; page = cma_alloc(hugetlb_cma[node], nr_pages, huge_page_order(h), true); if (page) return page_folio(page); } } } #endif page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); return page ? page_folio(page) : NULL; } #else /* !CONFIG_CONTIG_ALLOC */ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } #endif /* CONFIG_CONTIG_ALLOC */ #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } static inline void free_gigantic_folio(struct folio *folio, unsigned int order) { } static inline void destroy_compound_gigantic_folio(struct folio *folio, unsigned int order) { } #endif static inline void __clear_hugetlb_destructor(struct hstate *h, struct folio *folio) { lockdep_assert_held(&hugetlb_lock); folio_clear_hugetlb(folio); } /* * Remove hugetlb folio from lists. * If vmemmap exists for the folio, update dtor so that the folio appears * as just a compound page. Otherwise, wait until after allocating vmemmap * to update dtor. * * A reference is held on the folio, except in the case of demote. * * Must be called with hugetlb lock held. */ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus, bool demote) { int nid = folio_nid(folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); lockdep_assert_held(&hugetlb_lock); if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; list_del(&folio->lru); if (folio_test_hugetlb_freed(folio)) { h->free_huge_pages--; h->free_huge_pages_node[nid]--; } if (adjust_surplus) { h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } /* * We can only clear the hugetlb destructor after allocating vmemmap * pages. Otherwise, someone (memory error handling) may try to write * to tail struct pages. */ if (!folio_test_hugetlb_vmemmap_optimized(folio)) __clear_hugetlb_destructor(h, folio); /* * In the case of demote we do not ref count the page as it will soon * be turned into a page of smaller size. */ if (!demote) folio_ref_unfreeze(folio, 1); h->nr_huge_pages--; h->nr_huge_pages_node[nid]--; } static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus) { __remove_hugetlb_folio(h, folio, adjust_surplus, false); } static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, bool adjust_surplus) { __remove_hugetlb_folio(h, folio, adjust_surplus, true); } static void add_hugetlb_folio(struct hstate *h, struct folio *folio, bool adjust_surplus) { int zeroed; int nid = folio_nid(folio); VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); lockdep_assert_held(&hugetlb_lock); INIT_LIST_HEAD(&folio->lru); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; if (adjust_surplus) { h->surplus_huge_pages++; h->surplus_huge_pages_node[nid]++; } folio_set_hugetlb(folio); folio_change_private(folio, NULL); /* * We have to set hugetlb_vmemmap_optimized again as above * folio_change_private(folio, NULL) cleared it. */ folio_set_hugetlb_vmemmap_optimized(folio); /* * This folio is about to be managed by the hugetlb allocator and * should have no users. Drop our reference, and check for others * just in case. */ zeroed = folio_put_testzero(folio); if (unlikely(!zeroed)) /* * It is VERY unlikely soneone else has taken a ref * on the folio. In this case, we simply return as * free_huge_folio() will be called when this other ref * is dropped. */ return; arch_clear_hugepage_flags(&folio->page); enqueue_hugetlb_folio(h, folio); } static void __update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio) { bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return; /* * If we don't know which subpages are hwpoisoned, we can't free * the hugepage, so it's leaked intentionally. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return; if (hugetlb_vmemmap_restore(h, &folio->page)) { spin_lock_irq(&hugetlb_lock); /* * If we cannot allocate vmemmap pages, just refuse to free the * page and put the page back on the hugetlb free list and treat * as a surplus page. */ add_hugetlb_folio(h, folio, true); spin_unlock_irq(&hugetlb_lock); return; } /* * Move PageHWPoison flag from head page to the raw error pages, * which makes any healthy subpages reusable. */ if (unlikely(folio_test_hwpoison(folio))) folio_clear_hugetlb_hwpoison(folio); /* * If vmemmap pages were allocated above, then we need to clear the * hugetlb destructor under the hugetlb lock. */ if (clear_dtor) { spin_lock_irq(&hugetlb_lock); __clear_hugetlb_destructor(h, folio); spin_unlock_irq(&hugetlb_lock); } /* * Non-gigantic pages demoted from CMA allocated gigantic pages * need to be given back to CMA in free_gigantic_folio. */ if (hstate_is_gigantic(h) || hugetlb_cma_folio(folio, huge_page_order(h))) { destroy_compound_gigantic_folio(folio, huge_page_order(h)); free_gigantic_folio(folio, huge_page_order(h)); } else { __free_pages(&folio->page, huge_page_order(h)); } } /* * As update_and_free_hugetlb_folio() can be called under any context, so we cannot * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate * the vmemmap pages. * * free_hpage_workfn() locklessly retrieves the linked list of pages to be * freed and frees them one-by-one. As the page->mapping pointer is going * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node * structure of a lockless linked list of huge pages to be freed. */ static LLIST_HEAD(hpage_freelist); static void free_hpage_workfn(struct work_struct *work) { struct llist_node *node; node = llist_del_all(&hpage_freelist); while (node) { struct page *page; struct hstate *h; page = container_of((struct address_space **)node, struct page, mapping); node = node->next; page->mapping = NULL; /* * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in * folio_hstate() is going to trigger because a previous call to * remove_hugetlb_folio() will clear the hugetlb bit, so do * not use folio_hstate() directly. */ h = size_to_hstate(page_size(page)); __update_and_free_hugetlb_folio(h, page_folio(page)); cond_resched(); } } static DECLARE_WORK(free_hpage_work, free_hpage_workfn); static inline void flush_free_hpage_work(struct hstate *h) { if (hugetlb_vmemmap_optimizable(h)) flush_work(&free_hpage_work); } static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, bool atomic) { if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { __update_and_free_hugetlb_folio(h, folio); return; } /* * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. * * Only call schedule_work() if hpage_freelist is previously * empty. Otherwise, schedule_work() had been called but the workfn * hasn't retrieved the list yet. */ if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) schedule_work(&free_hpage_work); } static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) { struct page *page, *t_page; struct folio *folio; list_for_each_entry_safe(page, t_page, list, lru) { folio = page_folio(page); update_and_free_hugetlb_folio(h, folio, false); cond_resched(); } } struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; for_each_hstate(h) { if (huge_page_size(h) == size) return h; } return NULL; } void free_huge_folio(struct folio *folio) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ struct hstate *h = folio_hstate(folio); int nid = folio_nid(folio); struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); bool restore_reserve; unsigned long flags; VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); hugetlb_set_folio_subpool(folio, NULL); if (folio_test_anon(folio)) __ClearPageAnonExclusive(&folio->page); folio->mapping = NULL; restore_reserve = folio_test_hugetlb_restore_reserve(folio); folio_clear_hugetlb_restore_reserve(folio); /* * If HPageRestoreReserve was set on page, page allocation consumed a * reservation. If the page was associated with a subpool, there * would have been a page reserved in the subpool before allocation * via hugepage_subpool_get_pages(). Since we are 'restoring' the * reservation, do not call hugepage_subpool_put_pages() as this will * remove the reserved page from the subpool. */ if (!restore_reserve) { /* * A return code of zero implies that the subpool will be * under its minimum size if the reservation is not restored * after page is free. Therefore, force restore_reserve * operation. */ if (hugepage_subpool_put_pages(spool, 1) == 0) restore_reserve = true; } spin_lock_irqsave(&hugetlb_lock, flags); folio_clear_hugetlb_migratable(folio); hugetlb_cgroup_uncharge_folio(hstate_index(h), pages_per_huge_page(h), folio); hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), pages_per_huge_page(h), folio); if (restore_reserve) h->resv_huge_pages++; if (folio_test_hugetlb_temporary(folio)) { remove_hugetlb_folio(h, folio, false); spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else if (h->surplus_huge_pages_node[nid]) { /* remove the page from active list */ remove_hugetlb_folio(h, folio, true); spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else { arch_clear_hugepage_flags(&folio->page); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } } /* * Must be called with the hugetlb lock held */ static void __prep_account_new_huge_page(struct hstate *h, int nid) { lockdep_assert_held(&hugetlb_lock); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; } static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) { hugetlb_vmemmap_optimize(h, &folio->page); INIT_LIST_HEAD(&folio->lru); folio_set_hugetlb(folio); hugetlb_set_folio_subpool(folio, NULL); set_hugetlb_cgroup(folio, NULL); set_hugetlb_cgroup_rsvd(folio, NULL); } static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) { __prep_new_hugetlb_folio(h, folio); spin_lock_irq(&hugetlb_lock); __prep_account_new_huge_page(h, nid); spin_unlock_irq(&hugetlb_lock); } static bool __prep_compound_gigantic_folio(struct folio *folio, unsigned int order, bool demote) { int i, j; int nr_pages = 1 << order; struct page *p; __folio_clear_reserved(folio); for (i = 0; i < nr_pages; i++) { p = folio_page(folio, i); /* * For gigantic hugepages allocated through bootmem at * boot, it's safer to be consistent with the not-gigantic * hugepages and clear the PG_reserved bit from all tail pages * too. Otherwise drivers using get_user_pages() to access tail * pages may get the reference counting wrong if they see * PG_reserved set on a tail page (despite the head page not * having PG_reserved set). Enforcing this consistency between * head and tail pages allows drivers to optimize away a check * on the head page when they need know if put_page() is needed * after get_user_pages(). */ if (i != 0) /* head page cleared above */ __ClearPageReserved(p); /* * Subtle and very unlikely * * Gigantic 'page allocators' such as memblock or cma will * return a set of pages with each page ref counted. We need * to turn this set of pages into a compound page with tail * page ref counts set to zero. Code such as speculative page * cache adding could take a ref on a 'to be' tail page. * We need to respect any increased ref count, and only set * the ref count to zero if count is currently 1. If count * is not 1, we return an error. An error return indicates * the set of pages can not be converted to a gigantic page. * The caller who allocated the pages should then discard the * pages using the appropriate free interface. * * In the case of demote, the ref count will be zero. */ if (!demote) { if (!page_ref_freeze(p, 1)) { pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); goto out_error; } } else { VM_BUG_ON_PAGE(page_count(p), p); } if (i != 0) set_compound_head(p, &folio->page); } __folio_set_head(folio); /* we rely on prep_new_hugetlb_folio to set the destructor */ folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); return true; out_error: /* undo page modifications made above */ for (j = 0; j < i; j++) { p = folio_page(folio, j); if (j != 0) clear_compound_head(p); set_page_refcounted(p); } /* need to clear PG_reserved on remaining tail pages */ for (; j < nr_pages; j++) { p = folio_page(folio, j); __ClearPageReserved(p); } return false; } static bool prep_compound_gigantic_folio(struct folio *folio, unsigned int order) { return __prep_compound_gigantic_folio(folio, order, false); } static bool prep_compound_gigantic_folio_for_demote(struct folio *folio, unsigned int order) { return __prep_compound_gigantic_folio(folio, order, true); } /* * PageHuge() only returns true for hugetlbfs pages, but not for normal or * transparent huge pages. See the PageTransHuge() documentation for more * details. */ int PageHuge(struct page *page) { struct folio *folio; if (!PageCompound(page)) return 0; folio = page_folio(page); return folio_test_hugetlb(folio); } EXPORT_SYMBOL_GPL(PageHuge); /* * Find and lock address space (mapping) in write mode. * * Upon entry, the page is locked which means that page_mapping() is * stable. Due to locking order, we can only trylock_write. If we can * not get the lock, simply return NULL to caller. */ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) { struct address_space *mapping = page_mapping(hpage); if (!mapping) return mapping; if (i_mmap_trylock_write(mapping)) return mapping; return NULL; } pgoff_t hugetlb_basepage_index(struct page *page) { struct page *page_head = compound_head(page); pgoff_t index = page_index(page_head); unsigned long compound_idx; if (compound_order(page_head) > MAX_ORDER) compound_idx = page_to_pfn(page) - page_to_pfn(page_head); else compound_idx = page - page_head; return (index << compound_order(page_head)) + compound_idx; } static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) { int order = huge_page_order(h); struct page *page; bool alloc_try_hard = true; bool retry = true; /* * By default we always try hard to allocate the page with * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in * a loop (to adjust global huge page counts) and previous allocation * failed, do not continue to try hard on the same node. Use the * node_alloc_noretry bitmap to manage this state information. */ if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) alloc_try_hard = false; gfp_mask |= __GFP_COMP|__GFP_NOWARN; if (alloc_try_hard) gfp_mask |= __GFP_RETRY_MAYFAIL; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); retry: page = __alloc_pages(gfp_mask, order, nid, nmask); /* Freeze head page */ if (page && !page_ref_freeze(page, 1)) { __free_pages(page, order); if (retry) { /* retry once */ retry = false; goto retry; } /* WOW! twice in a row. */ pr_warn("HugeTLB head page unexpected inflated ref count\n"); page = NULL; } /* * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this * indicates an overall state change. Clear bit so that we resume * normal 'try hard' allocations. */ if (node_alloc_noretry && page && !alloc_try_hard) node_clear(nid, *node_alloc_noretry); /* * If we tried hard to get a page but failed, set bit so that * subsequent attempts will not try as hard until there is an * overall state change. */ if (node_alloc_noretry && !page && alloc_try_hard) node_set(nid, *node_alloc_noretry); if (!page) { __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return NULL; } __count_vm_event(HTLB_BUDDY_PGALLOC); return page_folio(page); } /* * Common helper to allocate a fresh hugetlb page. All specific allocators * should use this function to get new hugetlb pages * * Note that returned page is 'frozen': ref count of head page and all tail * pages is zero. */ static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) { struct folio *folio; bool retry = false; retry: if (hstate_is_gigantic(h)) folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); else folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, nmask, node_alloc_noretry); if (!folio) return NULL; if (hstate_is_gigantic(h)) { if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { /* * Rare failure to convert pages to compound page. * Free pages and try again - ONCE! */ free_gigantic_folio(folio, huge_page_order(h)); if (!retry) { retry = true; goto retry; } return NULL; } } prep_new_hugetlb_folio(h, folio, folio_nid(folio)); return folio; } /* * Allocates a fresh page to the hugetlb allocator pool in the node interleaved * manner. */ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, nodemask_t *node_alloc_noretry) { struct folio *folio; int nr_nodes, node; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, nodes_allowed, node_alloc_noretry); if (folio) { free_huge_folio(folio); /* free it into the hugepage allocator */ return 1; } } return 0; } /* * Remove huge page from pool from next node to free. Attempt to keep * persistent huge pages more or less balanced over allowed nodes. * This routine only 'removes' the hugetlb page. The caller must make * an additional call to free the page to low level allocators. * Called with hugetlb_lock locked. */ static struct page *remove_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, bool acct_surplus) { int nr_nodes, node; struct page *page = NULL; struct folio *folio; lockdep_assert_held(&hugetlb_lock); for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { /* * If we're returning unused surplus pages, only examine * nodes with surplus pages. */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { page = list_entry(h->hugepage_freelists[node].next, struct page, lru); folio = page_folio(page); remove_hugetlb_folio(h, folio, acct_surplus); break; } } return page; } /* * Dissolve a given free hugepage into free buddy pages. This function does * nothing for in-use hugepages and non-hugepages. * This function returns values like below: * * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages * when the system is under memory pressure and the feature of * freeing unused vmemmap pages associated with each hugetlb page * is enabled. * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use * (allocated or reserved.) * 0: successfully dissolved free hugepages or the page is not a * hugepage (considered as already dissolved) */ int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; struct folio *folio = page_folio(page); retry: /* Not to disrupt normal path by vainly holding hugetlb_lock */ if (!folio_test_hugetlb(folio)) return 0; spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(folio)) { rc = 0; goto out; } if (!folio_ref_count(folio)) { struct hstate *h = folio_hstate(folio); if (!available_huge_pages(h)) goto out; /* * We should make sure that the page is already on the free list * when it is dissolved. */ if (unlikely(!folio_test_hugetlb_freed(folio))) { spin_unlock_irq(&hugetlb_lock); cond_resched(); /* * Theoretically, we should return -EBUSY when we * encounter this race. In fact, we have a chance * to successfully dissolve the page if we do a * retry. Because the race window is quite small. * If we seize this opportunity, it is an optimization * for increasing the success rate of dissolving page. */ goto retry; } remove_hugetlb_folio(h, folio, false); h->max_huge_pages--; spin_unlock_irq(&hugetlb_lock); /* * Normally update_and_free_hugtlb_folio will allocate required vmemmmap * before freeing the page. update_and_free_hugtlb_folio will fail to * free the page if it can not allocate required vmemmap. We * need to adjust max_huge_pages if the page is not freed. * Attempt to allocate vmemmmap here so that we can take * appropriate action on failure. */ rc = hugetlb_vmemmap_restore(h, &folio->page); if (!rc) { update_and_free_hugetlb_folio(h, folio, false); } else { spin_lock_irq(&hugetlb_lock); add_hugetlb_folio(h, folio, false); h->max_huge_pages++; spin_unlock_irq(&hugetlb_lock); } return rc; } out: spin_unlock_irq(&hugetlb_lock); return rc; } /* * Dissolve free hugepages in a given pfn range. Used by memory hotplug to * make specified memory blocks removable from the system. * Note that this will dissolve a free gigantic hugepage completely, if any * part of it lies within the given range. * Also note that if dissolve_free_huge_page() returns with an error, all * free hugepages that were dissolved before that error are lost. */ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; int rc = 0; unsigned int order; struct hstate *h; if (!hugepages_supported()) return rc; order = huge_page_order(&default_hstate); for_each_hstate(h) order = min(order, huge_page_order(h)); for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { page = pfn_to_page(pfn); rc = dissolve_free_huge_page(page); if (rc) break; } return rc; } /* * Allocates a fresh surplus page from the page allocator. */ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio = NULL; if (hstate_is_gigantic(h)) return NULL; spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; spin_unlock_irq(&hugetlb_lock); folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); if (!folio) return NULL; spin_lock_irq(&hugetlb_lock); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse * temporary page to workaround the nasty free_huge_folio * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { folio_set_hugetlb_temporary(folio); spin_unlock_irq(&hugetlb_lock); free_huge_folio(folio); return NULL; } h->surplus_huge_pages++; h->surplus_huge_pages_node[folio_nid(folio)]++; out_unlock: spin_unlock_irq(&hugetlb_lock); return folio; } static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct folio *folio; if (hstate_is_gigantic(h)) return NULL; folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); if (!folio) return NULL; /* fresh huge pages are frozen */ folio_ref_unfreeze(folio, 1); /* * We do not account these pages as surplus because they are only * temporary and will be released properly on the last reference */ folio_set_hugetlb_temporary(folio); return folio; } /* * Use the VMA's mpolicy to allocate a huge page from the buddy. */ static struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct folio *folio = NULL; struct mempolicy *mpol; gfp_t gfp_mask = htlb_alloc_mask(h); int nid; nodemask_t *nodemask; nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); if (mpol_is_preferred_many(mpol)) { gfp_t gfp = gfp_mask | __GFP_NOWARN; gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); /* Fallback to all nodes if page==NULL */ nodemask = NULL; } if (!folio) folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); mpol_cond_put(mpol); return folio; } /* folio migration callback function */ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask) { spin_lock_irq(&hugetlb_lock); if (available_huge_pages(h)) { struct folio *folio; folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid, nmask); if (folio) { spin_unlock_irq(&hugetlb_lock); return folio; } } spin_unlock_irq(&hugetlb_lock); return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); } /* mempolicy aware migration callback */ struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct mempolicy *mpol; nodemask_t *nodemask; struct folio *folio; gfp_t gfp_mask; int node; gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); mpol_cond_put(mpol); return folio; } /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, long delta) __must_hold(&hugetlb_lock) { LIST_HEAD(surplus_list); struct folio *folio, *tmp; int ret; long i; long needed, allocated; bool alloc_ok = true; lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; return 0; } allocated = 0; ret = -ENOMEM; retry: spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), NUMA_NO_NODE, NULL); if (!folio) { alloc_ok = false; break; } list_add(&folio->lru, &surplus_list); cond_resched(); } allocated += i; /* * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock_irq(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { if (alloc_ok) goto retry; /* * We were not able to allocate enough pages to * satisfy the entire reservation so we free what * we've allocated so far. */ goto free; } /* * The surplus_list now contains _at_least_ the number of extra pages * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but * before they are reserved. */ needed += allocated; h->resv_huge_pages += delta; ret = 0; /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { if ((--needed) < 0) break; /* Add the page to the hugetlb allocator */ enqueue_hugetlb_folio(h, folio); } free: spin_unlock_irq(&hugetlb_lock); /* * Free unnecessary surplus pages to the buddy allocator. * Pages have no ref count, call free_huge_folio directly. */ list_for_each_entry_safe(folio, tmp, &surplus_list, lru) free_huge_folio(folio); spin_lock_irq(&hugetlb_lock); return ret; } /* * This routine has two main purposes: * 1) Decrement the reservation count (resv_huge_pages) by the value passed * in unused_resv_pages. This corresponds to the prior adjustments made * to the associated reservation map. * 2) Free any unused surplus pages that may have been allocated to satisfy * the reservation. As many as unused_resv_pages may be freed. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; struct page *page; LIST_HEAD(page_list); lockdep_assert_held(&hugetlb_lock); /* Uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) goto out; /* * Part (or even all) of the reservation could have been backed * by pre-allocated pages. Only free surplus pages. */ nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* * We want to release as many surplus pages as possible, spread * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. * remove_pool_huge_page() will balance the freed pages across the * on-line nodes with memory and will handle the hstate accounting. */ while (nr_pages--) { page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); if (!page) goto out; list_add(&page->lru, &page_list); } out: spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); spin_lock_irq(&hugetlb_lock); } /* * vma_needs_reservation, vma_commit_reservation and vma_end_reservation * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called * to add the page to the reservation map. If the page allocation fails, * the reservation must be ended instead of committed. vma_end_reservation * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this * is not the case is if a reserve map was changed between calls. It * is the responsibility of the caller to notice the difference and * take appropriate action. * * vma_add_reservation is used in error paths where a reservation must * be restored when a newly allocated huge page must be freed. It is * to be called after calling vma_needs_reservation to determine if a * reservation exists. * * vma_del_reservation is used in error paths where an entry in the reserve * map was created during huge page allocation and must be removed. It is to * be called after calling vma_needs_reservation to determine if a reservation * exists. */ enum vma_resv_mode { VMA_NEEDS_RESV, VMA_COMMIT_RESV, VMA_END_RESV, VMA_ADD_RESV, VMA_DEL_RESV, }; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; long ret; long dummy_out_regions_needed; resv = vma_resv_map(vma); if (!resv) return 1; idx = vma_hugecache_offset(h, vma, addr); switch (mode) { case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); /* We assume that vma_reservation_* routines always operate on * 1 page, and that adding to resv map a 1 page entry can only * ever require 1 region. */ VM_BUG_ON(dummy_out_regions_needed != 1); break; case VMA_COMMIT_RESV: ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); break; case VMA_END_RESV: region_abort(resv, idx, idx + 1, 1); ret = 0; break; case VMA_ADD_RESV: if (vma->vm_flags & VM_MAYSHARE) { ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); } else { region_abort(resv, idx, idx + 1, 1); ret = region_del(resv, idx, idx + 1); } break; case VMA_DEL_RESV: if (vma->vm_flags & VM_MAYSHARE) { region_abort(resv, idx, idx + 1, 1); ret = region_del(resv, idx, idx + 1); } else { ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); /* region_add calls of range 1 should never fail. */ VM_BUG_ON(ret < 0); } break; default: BUG(); } if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) return ret; /* * We know private mapping must have HPAGE_RESV_OWNER set. * * In most cases, reserves always exist for private mappings. * However, a file associated with mapping could have been * hole punched or truncated after reserves were consumed. * As subsequent fault on such a range will not use reserves. * Subtle - The reserve map for private mappings has the * opposite meaning than that of shared mappings. If NO * entry is in the reserve map, it means a reservation exists. * If an entry exists in the reserve map, it means the * reservation has already been consumed. As a result, the * return value of this routine is the opposite of the * value returned from reserve map manipulation routines above. */ if (ret > 0) return 0; if (ret == 0) return 1; return ret; } static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } static void vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } static long vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); } static long vma_del_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); } /* * This routine is called to restore reservation information on error paths. * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), * and the hugetlb mutex should remain held when calling this routine. * * It handles two specific cases: * 1) A reservation was in place and the folio consumed the reservation. * hugetlb_restore_reserve is set in the folio. * 2) No reservation was in place for the page, so hugetlb_restore_reserve is * not set. However, alloc_hugetlb_folio always updates the reserve map. * * In case 1, free_huge_folio later in the error path will increment the * global reserve count. But, free_huge_folio does not have enough context * to adjust the reservation map. This case deals primarily with private * mappings. Adjust the reserve map here to be consistent with global * reserve count adjustments to be made by free_huge_folio. Make sure the * reserve map indicates there is a reservation present. * * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. */ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct folio *folio) { long rc = vma_needs_reservation(h, vma, address); if (folio_test_hugetlb_restore_reserve(folio)) { if (unlikely(rc < 0)) /* * Rare out of memory condition in reserve map * manipulation. Clear hugetlb_restore_reserve so * that global reserve count will not be incremented * by free_huge_folio. This will make it appear * as though the reservation for this folio was * consumed. This may prevent the task from * faulting in the folio at a later time. This * is better than inconsistent global huge page * accounting of reserve counts. */ folio_clear_hugetlb_restore_reserve(folio); else if (rc) (void)vma_add_reservation(h, vma, address); else vma_end_reservation(h, vma, address); } else { if (!rc) { /* * This indicates there is an entry in the reserve map * not added by alloc_hugetlb_folio. We know it was added * before the alloc_hugetlb_folio call, otherwise * hugetlb_restore_reserve would be set on the folio. * Remove the entry so that a subsequent allocation * does not consume a reservation. */ rc = vma_del_reservation(h, vma, address); if (rc < 0) /* * VERY rare out of memory condition. Since * we can not delete the entry, set * hugetlb_restore_reserve so that the reserve * count will be incremented when the folio * is freed. This reserve will be consumed * on a subsequent allocation. */ folio_set_hugetlb_restore_reserve(folio); } else if (rc < 0) { /* * Rare out of memory condition from * vma_needs_reservation call. Memory allocation is * only attempted if a new entry is needed. Therefore, * this implies there is not an entry in the * reserve map. * * For shared mappings, no entry in the map indicates * no reservation. We are done. */ if (!(vma->vm_flags & VM_MAYSHARE)) /* * For private mappings, no entry indicates * a reservation is present. Since we can * not add an entry, set hugetlb_restore_reserve * on the folio so reserve count will be * incremented when freed. This reserve will * be consumed on a subsequent allocation. */ folio_set_hugetlb_restore_reserve(folio); } else /* * No reservation present, do nothing */ vma_end_reservation(h, vma, address); } } /* * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve * the old one * @h: struct hstate old page belongs to * @old_folio: Old folio to dissolve * @list: List to isolate the page in case we need to * Returns 0 on success, otherwise negated error. */ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, struct folio *old_folio, struct list_head *list) { gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; int nid = folio_nid(old_folio); struct folio *new_folio; int ret = 0; /* * Before dissolving the folio, we need to allocate a new one for the * pool to remain stable. Here, we allocate the folio and 'prep' it * by doing everything but actually updating counters and adding to * the pool. This simplifies and let us do most of the processing * under the lock. */ new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); if (!new_folio) return -ENOMEM; __prep_new_hugetlb_folio(h, new_folio); retry: spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(old_folio)) { /* * Freed from under us. Drop new_folio too. */ goto free_new; } else if (folio_ref_count(old_folio)) { bool isolated; /* * Someone has grabbed the folio, try to isolate it here. * Fail with -EBUSY if not possible. */ spin_unlock_irq(&hugetlb_lock); isolated = isolate_hugetlb(old_folio, list); ret = isolated ? 0 : -EBUSY; spin_lock_irq(&hugetlb_lock); goto free_new; } else if (!folio_test_hugetlb_freed(old_folio)) { /* * Folio's refcount is 0 but it has not been enqueued in the * freelist yet. Race window is small, so we can succeed here if * we retry. */ spin_unlock_irq(&hugetlb_lock); cond_resched(); goto retry; } else { /* * Ok, old_folio is still a genuine free hugepage. Remove it from * the freelist and decrease the counters. These will be * incremented again when calling __prep_account_new_huge_page() * and enqueue_hugetlb_folio() for new_folio. The counters will * remain stable since this happens under the lock. */ remove_hugetlb_folio(h, old_folio, false); /* * Ref count on new_folio is already zero as it was dropped * earlier. It can be directly added to the pool free list. */ __prep_account_new_huge_page(h, nid); enqueue_hugetlb_folio(h, new_folio); /* * Folio has been replaced, we can safely free the old one. */ spin_unlock_irq(&hugetlb_lock); update_and_free_hugetlb_folio(h, old_folio, false); } return ret; free_new: spin_unlock_irq(&hugetlb_lock); /* Folio has a zero ref count, but needs a ref to be freed */ folio_ref_unfreeze(new_folio, 1); update_and_free_hugetlb_folio(h, new_folio, false); return ret; } int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { struct hstate *h; struct folio *folio = page_folio(page); int ret = -EBUSY; /* * The page might have been dissolved from under our feet, so make sure * to carefully check the state under the lock. * Return success when racing as if we dissolved the page ourselves. */ spin_lock_irq(&hugetlb_lock); if (folio_test_hugetlb(folio)) { h = folio_hstate(folio); } else { spin_unlock_irq(&hugetlb_lock); return 0; } spin_unlock_irq(&hugetlb_lock); /* * Fence off gigantic pages as there is a cyclic dependency between * alloc_contig_range and them. Return -ENOMEM as this has the effect * of bailing out right away without further retrying. */ if (hstate_is_gigantic(h)) return -ENOMEM; if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) ret = 0; else if (!folio_ref_count(folio)) ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); return ret; } struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; long map_chg, map_commit; long gbl_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; bool deferred_reserve; idx = hstate_index(h); /* * Examine the region/reserve map to determine if the process * has a reservation for the page to be allocated. A return * code of zero indicates a reservation exists (no change). */ map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); if (map_chg < 0) return ERR_PTR(-ENOMEM); /* * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. * Allocations for MAP_NORESERVE mappings also need to be * checked against any subpool limit. */ if (map_chg || avoid_reserve) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) { vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } /* * Even though there was no reservation in the region/reserve * map, there could be reservations associated with the * subpool that can be used. This would be indicated if the * return value of hugepage_subpool_get_pages() is zero. * However, if avoid_reserve is specified we still avoid even * the subpool reservations. */ if (avoid_reserve) gbl_chg = 1; } /* If this allocation is not consuming a reservation, charge it now. */ deferred_reserve = map_chg || avoid_reserve; if (deferred_reserve) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_uncharge_cgroup_reservation; spin_lock_irq(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!folio) { spin_unlock_irq(&hugetlb_lock); folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } list_add(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); /* Fall through */ } hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. */ if (deferred_reserve) { hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), h_cg, folio); } spin_unlock_irq(&hugetlb_lock); hugetlb_set_folio_subpool(folio, spool); map_commit = vma_commit_reservation(h, vma, addr); if (unlikely(map_chg > map_commit)) { /* * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); if (deferred_reserve) hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), pages_per_huge_page(h), folio); } return folio; out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_uncharge_cgroup_reservation: if (deferred_reserve) hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: if (map_chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } int alloc_bootmem_huge_page(struct hstate *h, int nid) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h, int nid) { struct huge_bootmem_page *m = NULL; /* initialize for clang */ int nr_nodes, node; /* do node specific alloc */ if (nid != NUMA_NO_NODE) { m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (!m) return 0; goto found; } /* allocate from next node when distributing huge pages */ for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { m = memblock_alloc_try_nid_raw( huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); /* * Use the beginning of the huge page to store the * huge_bootmem_page struct (until gather_bootmem * puts them into the mem_map). */ if (!m) return 0; goto found; } found: /* Put them into a private list first because mem_map is not up yet */ INIT_LIST_HEAD(&m->list); list_add(&m->list, &huge_boot_pages); m->hstate = h; return 1; } /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_ORDER) pages. */ static void __init gather_bootmem_prealloc(void) { struct huge_bootmem_page *m; list_for_each_entry(m, &huge_boot_pages, list) { struct page *page = virt_to_page(m); struct folio *folio = page_folio(page); struct hstate *h = m->hstate; VM_BUG_ON(!hstate_is_gigantic(h)); WARN_ON(folio_ref_count(folio) != 1); if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { WARN_ON(folio_test_reserved(folio)); prep_new_hugetlb_folio(h, folio, folio_nid(folio)); free_huge_folio(folio); /* add to the hugepage allocator */ } else { /* VERY unlikely inflated ref count on a tail page */ free_gigantic_folio(folio, huge_page_order(h)); } /* * We need to restore the 'stolen' pages to totalram_pages * in order to fix confusing memory reports from free(1) and * other side-effects, like CommitLimit going negative. */ adjust_managed_page_count(page, pages_per_huge_page(h)); cond_resched(); } } static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) { unsigned long i; char buf[32]; for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h, nid)) break; } else { struct folio *folio; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, &node_states[N_MEMORY], NULL); if (!folio) break; free_huge_folio(folio); /* free it into the hugepage allocator */ } cond_resched(); } if (i == h->max_huge_pages_node[nid]) return; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", h->max_huge_pages_node[nid], buf, nid, i); h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); h->max_huge_pages_node[nid] = i; } static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long i; nodemask_t *node_alloc_noretry; bool node_specific_alloc = false; /* skip gigantic hugepages allocation if hugetlb_cma enabled */ if (hstate_is_gigantic(h) && hugetlb_cma_size) { pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); return; } /* do node specific alloc */ for_each_online_node(i) { if (h->max_huge_pages_node[i] > 0) { hugetlb_hstate_alloc_pages_onenode(h, i); node_specific_alloc = true; } } if (node_specific_alloc) return; /* below will do all node balanced alloc */ if (!hstate_is_gigantic(h)) { /* * Bit mask controlling how hard we retry per-node allocations. * Ignore errors as lower level routines can deal with * node_alloc_noretry == NULL. If this kmalloc fails at boot * time, we are likely in bigger trouble. */ node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), GFP_KERNEL); } else { /* allocations done at boot time */ node_alloc_noretry = NULL; } /* bit mask controlling how hard we retry per-node allocations */ if (node_alloc_noretry) nodes_clear(*node_alloc_noretry); for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) break; } else if (!alloc_pool_huge_page(h, &node_states[N_MEMORY], node_alloc_noretry)) break; cond_resched(); } if (i < h->max_huge_pages) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", h->max_huge_pages, buf, i); h->max_huge_pages = i; } kfree(node_alloc_noretry); } static void __init hugetlb_init_hstates(void) { struct hstate *h, *h2; for_each_hstate(h) { /* oversize hugepages were init'ed in early boot */ if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); /* * Set demote order for each hstate. Note that * h->demote_order is initially 0. * - We can not demote gigantic pages if runtime freeing * is not supported, so skip this. * - If CMA allocation is possible, we can not demote * HUGETLB_PAGE_ORDER or smaller size pages. */ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) continue; if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) continue; for_each_hstate(h2) { if (h2 == h) continue; if (h2->order < h->order && h2->order > h->demote_order) h->demote_order = h2->order; } } } static void __init report_hugepages(void) { struct hstate *h; for_each_hstate(h) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); } } #ifdef CONFIG_HIGHMEM static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; LIST_HEAD(page_list); lockdep_assert_held(&hugetlb_lock); if (hstate_is_gigantic(h)) return; /* * Collect pages to be freed on a list, and free after dropping lock */ for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) goto out; if (PageHighMem(page)) continue; remove_hugetlb_folio(h, page_folio(page), false); list_add(&page->lru, &page_list); } } out: spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); spin_lock_irq(&hugetlb_lock); } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { } #endif /* * Increment or decrement surplus_huge_pages. Keep node-specific counters * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, int delta) { int nr_nodes, node; lockdep_assert_held(&hugetlb_lock); VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) { for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node]) goto found; } } else { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node] < h->nr_huge_pages_node[node]) goto found; } } return 0; found: h->surplus_huge_pages += delta; h->surplus_huge_pages_node[node] += delta; return 1; } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) { unsigned long min_count, ret; struct page *page; LIST_HEAD(page_list); NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); /* * Bit mask controlling how hard we retry per-node allocations. * If we can not allocate the bit mask, do not attempt to allocate * the requested huge pages. */ if (node_alloc_noretry) nodes_clear(*node_alloc_noretry); else return -ENOMEM; /* * resize_lock mutex prevents concurrent adjustments to number of * pages in hstate via the proc/sysfs interfaces. */ mutex_lock(&h->resize_lock); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); /* * Check for a node specific request. * Changing node specific huge page count may require a corresponding * change to the global count. In any case, the passed node mask * (nodes_allowed) will restrict alloc/free to the specified node. */ if (nid != NUMA_NO_NODE) { unsigned long old_count = count; count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; /* * User may have specified a large count value which caused the * above calculation to overflow. In this case, they wanted * to allocate as many huge pages as possible. Set count to * largest possible value to align with their intention. */ if (count < old_count) count = ULONG_MAX; } /* * Gigantic pages runtime allocation depend on the capability for large * page range allocation. * If the system does not provide this feature, return an error when * the user tries to allocate gigantic pages but let the user free the * boottime allocated gigantic pages. */ if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { if (count > persistent_huge_pages(h)) { spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return -EINVAL; } /* Fall through to decrease pool */ } /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_surplus_hugetlb_folio() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_folio will handle it by freeing the page * and reducing the surplus. */ spin_unlock_irq(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); ret = alloc_pool_huge_page(h, nodes_allowed, node_alloc_noretry); spin_lock_irq(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_surplus_hugetlb_folio() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); /* * Collect pages to be removed on list without dropping lock */ while (min_count < persistent_huge_pages(h)) { page = remove_pool_huge_page(h, nodes_allowed, 0); if (!page) break; list_add(&page->lru, &page_list); } /* free the pages after dropping lock */ spin_unlock_irq(&hugetlb_lock); update_and_free_pages_bulk(h, &page_list); flush_free_hpage_work(h); spin_lock_irq(&hugetlb_lock); while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: h->max_huge_pages = persistent_huge_pages(h); spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); NODEMASK_FREE(node_alloc_noretry); return 0; } static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) { int i, nid = folio_nid(folio); struct hstate *target_hstate; struct page *subpage; struct folio *inner_folio; int rc = 0; target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); remove_hugetlb_folio_for_demote(h, folio, false); spin_unlock_irq(&hugetlb_lock); rc = hugetlb_vmemmap_restore(h, &folio->page); if (rc) { /* Allocation of vmemmmap failed, we can not demote folio */ spin_lock_irq(&hugetlb_lock); folio_ref_unfreeze(folio, 1); add_hugetlb_folio(h, folio, false); return rc; } /* * Use destroy_compound_hugetlb_folio_for_demote for all huge page * sizes as it will not ref count folios. */ destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); /* * Taking target hstate mutex synchronizes with set_max_huge_pages. * Without the mutex, pages added to target hstate could be marked * as surplus. * * Note that we already hold h->resize_lock. To prevent deadlock, * use the convention of always taking larger size hstate mutex first. */ mutex_lock(&target_hstate->resize_lock); for (i = 0; i < pages_per_huge_page(h); i += pages_per_huge_page(target_hstate)) { subpage = folio_page(folio, i); inner_folio = page_folio(subpage); if (hstate_is_gigantic(target_hstate)) prep_compound_gigantic_folio_for_demote(inner_folio, target_hstate->order); else prep_compound_page(subpage, target_hstate->order); folio_change_private(inner_folio, NULL); prep_new_hugetlb_folio(target_hstate, inner_folio, nid); free_huge_folio(inner_folio); } mutex_unlock(&target_hstate->resize_lock); spin_lock_irq(&hugetlb_lock); /* * Not absolutely necessary, but for consistency update max_huge_pages * based on pool changes for the demoted page. */ h->max_huge_pages--; target_hstate->max_huge_pages += pages_per_huge_page(h) / pages_per_huge_page(target_hstate); return rc; } static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) __must_hold(&hugetlb_lock) { int nr_nodes, node; struct folio *folio; lockdep_assert_held(&hugetlb_lock); /* We should never get here if no demote order */ if (!h->demote_order) { pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); return -EINVAL; /* internal error */ } for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { if (folio_test_hwpoison(folio)) continue; return demote_free_hugetlb_folio(h, folio); } } /* * Only way to get here is if all pages on free lists are poisoned. * Return -EBUSY so that caller will not retry. */ return -EBUSY; } #define HSTATE_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define HSTATE_ATTR_WO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_WO(_name) #define HSTATE_ATTR(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RW(_name) static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (hstate_kobjs[i] == kobj) { if (nidp) *nidp = NUMA_NO_NODE; return &hstates[i]; } return kobj_to_node_hstate(kobj, nidp); } static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long nr_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) nr_huge_pages = h->nr_huge_pages; else nr_huge_pages = h->nr_huge_pages_node[nid]; return sysfs_emit(buf, "%lu\n", nr_huge_pages); } static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) { int err; nodemask_t nodes_allowed, *n_mask; if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) return -EINVAL; if (nid == NUMA_NO_NODE) { /* * global hstate attribute */ if (!(obey_mempolicy && init_nodemask_of_mempolicy(&nodes_allowed))) n_mask = &node_states[N_MEMORY]; else n_mask = &nodes_allowed; } else { /* * Node specific request. count adjustment happens in * set_max_huge_pages() after acquiring hugetlb_lock. */ init_nodemask_of_node(&nodes_allowed, nid); n_mask = &nodes_allowed; } err = set_max_huge_pages(h, count, nid, n_mask); return err ? err : len; } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, const char *buf, size_t len) { struct hstate *h; unsigned long count; int nid; int err; err = kstrtoul(buf, 10, &count); if (err) return err; h = kobj_to_hstate(kobj, &nid); return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); } static ssize_t nr_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(false, kobj, buf, len); } HSTATE_ATTR(nr_hugepages); #ifdef CONFIG_NUMA /* * hstate attribute for optionally mempolicy-based constraint on persistent * huge page alloc/free. */ static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(true, kobj, buf, len); } HSTATE_ATTR(nr_hugepages_mempolicy); #endif static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); if (hstate_is_gigantic(h)) return -EINVAL; err = kstrtoul(buf, 10, &input); if (err) return err; spin_lock_irq(&hugetlb_lock); h->nr_overcommit_huge_pages = input; spin_unlock_irq(&hugetlb_lock); return count; } HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long free_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) free_huge_pages = h->free_huge_pages; else free_huge_pages = h->free_huge_pages_node[nid]; return sysfs_emit(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long surplus_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) surplus_huge_pages = h->surplus_huge_pages; else surplus_huge_pages = h->surplus_huge_pages_node[nid]; return sysfs_emit(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); static ssize_t demote_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { unsigned long nr_demote; unsigned long nr_available; nodemask_t nodes_allowed, *n_mask; struct hstate *h; int err; int nid; err = kstrtoul(buf, 10, &nr_demote); if (err) return err; h = kobj_to_hstate(kobj, &nid); if (nid != NUMA_NO_NODE) { init_nodemask_of_node(&nodes_allowed, nid); n_mask = &nodes_allowed; } else { n_mask = &node_states[N_MEMORY]; } /* Synchronize with other sysfs operations modifying huge pages */ mutex_lock(&h->resize_lock); spin_lock_irq(&hugetlb_lock); while (nr_demote) { /* * Check for available pages to demote each time thorough the * loop as demote_pool_huge_page will drop hugetlb_lock. */ if (nid != NUMA_NO_NODE) nr_available = h->free_huge_pages_node[nid]; else nr_available = h->free_huge_pages; nr_available -= h->resv_huge_pages; if (!nr_available) break; err = demote_pool_huge_page(h, n_mask); if (err) break; nr_demote--; } spin_unlock_irq(&hugetlb_lock); mutex_unlock(&h->resize_lock); if (err) return err; return len; } HSTATE_ATTR_WO(demote); static ssize_t demote_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; return sysfs_emit(buf, "%lukB\n", demote_size); } static ssize_t demote_size_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct hstate *h, *demote_hstate; unsigned long demote_size; unsigned int demote_order; demote_size = (unsigned long)memparse(buf, NULL); demote_hstate = size_to_hstate(demote_size); if (!demote_hstate) return -EINVAL; demote_order = demote_hstate->order; if (demote_order < HUGETLB_PAGE_ORDER) return -EINVAL; /* demote order must be smaller than hstate order */ h = kobj_to_hstate(kobj, NULL); if (demote_order >= h->order) return -EINVAL; /* resize_lock synchronizes access to demote size and writes */ mutex_lock(&h->resize_lock); h->demote_order = demote_order; mutex_unlock(&h->resize_lock); return count; } HSTATE_ATTR(demote_size); static struct attribute *hstate_attrs[] = { &nr_hugepages_attr.attr, &nr_overcommit_hugepages_attr.attr, &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, #ifdef CONFIG_NUMA &nr_hugepages_mempolicy_attr.attr, #endif NULL, }; static const struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; static struct attribute *hstate_demote_attrs[] = { &demote_size_attr.attr, &demote_attr.attr, NULL, }; static const struct attribute_group hstate_demote_attr_group = { .attrs = hstate_demote_attrs, }; static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, struct kobject **hstate_kobjs, const struct attribute_group *hstate_attr_group) { int retval; int hi = hstate_index(h); hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); if (!hstate_kobjs[hi]) return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) { kobject_put(hstate_kobjs[hi]); hstate_kobjs[hi] = NULL; return retval; } if (h->demote_order) { retval = sysfs_create_group(hstate_kobjs[hi], &hstate_demote_attr_group); if (retval) { pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); kobject_put(hstate_kobjs[hi]); hstate_kobjs[hi] = NULL; return retval; } } return 0; } #ifdef CONFIG_NUMA static bool hugetlb_sysfs_initialized __ro_after_init; /* * node_hstate/s - associate per node hstate attributes, via their kobjects, * with node devices in node_devices[] using a parallel array. The array * index of a node device or _hstate == node id. * This is here to avoid any static dependency of the node device driver, in * the base kernel, on the hugetlb module. */ struct node_hstate { struct kobject *hugepages_kobj; struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; }; static struct node_hstate node_hstates[MAX_NUMNODES]; /* * A subset of global hstate attributes for node devices */ static struct attribute *per_node_hstate_attrs[] = { &nr_hugepages_attr.attr, &free_hugepages_attr.attr, &surplus_hugepages_attr.attr, NULL, }; static const struct attribute_group per_node_hstate_attr_group = { .attrs = per_node_hstate_attrs, }; /* * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. * Returns node id via non-NULL nidp. */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { int nid; for (nid = 0; nid < nr_node_ids; nid++) { struct node_hstate *nhs = &node_hstates[nid]; int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (nhs->hstate_kobjs[i] == kobj) { if (nidp) *nidp = nid; return &hstates[i]; } } BUG(); return NULL; } /* * Unregister hstate attributes from a single node device. * No-op if no hstate attributes attached. */ void hugetlb_unregister_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; if (!nhs->hugepages_kobj) return; /* no hstate attributes */ for_each_hstate(h) { int idx = hstate_index(h); struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; if (!hstate_kobj) continue; if (h->demote_order) sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); kobject_put(hstate_kobj); nhs->hstate_kobjs[idx] = NULL; } kobject_put(nhs->hugepages_kobj); nhs->hugepages_kobj = NULL; } /* * Register hstate attributes for a single node device. * No-op if attributes already registered. */ void hugetlb_register_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; int err; if (!hugetlb_sysfs_initialized) return; if (nhs->hugepages_kobj) return; /* already allocated */ nhs->hugepages_kobj = kobject_create_and_add("hugepages", &node->dev.kobj); if (!nhs->hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, nhs->hstate_kobjs, &per_node_hstate_attr_group); if (err) { pr_err("HugeTLB: Unable to add hstate %s for node %d\n", h->name, node->dev.id); hugetlb_unregister_node(node); break; } } } /* * hugetlb init time: register hstate attributes for all registered node * devices of nodes that have memory. All on-line nodes should have * registered their associated device by this time. */ static void __init hugetlb_register_all_nodes(void) { int nid; for_each_online_node(nid) hugetlb_register_node(node_devices[nid]); } #else /* !CONFIG_NUMA */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { BUG(); if (nidp) *nidp = -1; return NULL; } static void hugetlb_register_all_nodes(void) { } #endif #ifdef CONFIG_CMA static void __init hugetlb_cma_check(void); #else static inline __init void hugetlb_cma_check(void) { } #endif static void __init hugetlb_sysfs_init(void) { struct hstate *h; int err; hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); if (!hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, hstate_kobjs, &hstate_attr_group); if (err) pr_err("HugeTLB: Unable to add hstate %s", h->name); } #ifdef CONFIG_NUMA hugetlb_sysfs_initialized = true; #endif hugetlb_register_all_nodes(); } #ifdef CONFIG_SYSCTL static void hugetlb_sysctl_init(void); #else static inline void hugetlb_sysctl_init(void) { } #endif static int __init hugetlb_init(void) { int i; BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < __NR_HPAGEFLAGS); if (!hugepages_supported()) { if (hugetlb_max_hstate || default_hstate_max_huge_pages) pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); return 0; } /* * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some * architectures depend on setup being done here. */ hugetlb_add_hstate(HUGETLB_PAGE_ORDER); if (!parsed_default_hugepagesz) { /* * If we did not parse a default huge page size, set * default_hstate_idx to HPAGE_SIZE hstate. And, if the * number of huge pages for this default size was implicitly * specified, set that here as well. * Note that the implicit setting will overwrite an explicit * setting. A warning will be printed in this case. */ default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); if (default_hstate_max_huge_pages) { if (default_hstate.max_huge_pages) { char buf[32]; string_get_size(huge_page_size(&default_hstate), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", default_hstate.max_huge_pages, buf); pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", default_hstate_max_huge_pages); } default_hstate.max_huge_pages = default_hstate_max_huge_pages; for_each_online_node(i) default_hstate.max_huge_pages_node[i] = default_hugepages_in_node[i]; } } hugetlb_cma_check(); hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); hugetlb_sysfs_init(); hugetlb_cgroup_file_init(); hugetlb_sysctl_init(); #ifdef CONFIG_SMP num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); #else num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = kmalloc_array(num_fault_mutexes, sizeof(struct mutex), GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } subsys_initcall(hugetlb_init); /* Overwritten by architectures with more huge page sizes */ bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) { return size == HPAGE_SIZE; } void __init hugetlb_add_hstate(unsigned int order) { struct hstate *h; unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[hugetlb_max_hstate++]; mutex_init(&h->resize_lock); h->order = order; h->mask = ~(huge_page_size(h) - 1); for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); h->next_nid_to_alloc = first_memory_node; h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/SZ_1K); parsed_hstate = h; } bool __init __weak hugetlb_node_alloc_supported(void) { return true; } static void __init hugepages_clear_pages_in_node(void) { if (!hugetlb_max_hstate) { default_hstate_max_huge_pages = 0; memset(default_hugepages_in_node, 0, sizeof(default_hugepages_in_node)); } else { parsed_hstate->max_huge_pages = 0; memset(parsed_hstate->max_huge_pages_node, 0, sizeof(parsed_hstate->max_huge_pages_node)); } } /* * hugepages command line processing * hugepages normally follows a valid hugepagsz or default_hugepagsz * specification. If not, ignore the hugepages value. hugepages can also * be the first huge page command line option in which case it implicitly * specifies the number of huge pages for the default size. */ static int __init hugepages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; int node = NUMA_NO_NODE; int count; unsigned long tmp; char *p = s; if (!parsed_valid_hugepagesz) { pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; return 1; } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter * yet, so this hugepages= parameter goes to the "default hstate". * Otherwise, it goes with the previously parsed hugepagesz or * default_hugepagesz. */ else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); return 1; } while (*p) { count = 0; if (sscanf(p, "%lu%n", &tmp, &count) != 1) goto invalid; /* Parameter is node format */ if (p[count] == ':') { if (!hugetlb_node_alloc_supported()) { pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); return 1; } if (tmp >= MAX_NUMNODES || !node_online(tmp)) goto invalid; node = array_index_nospec(tmp, MAX_NUMNODES); p += count + 1; /* Parse hugepages */ if (sscanf(p, "%lu%n", &tmp, &count) != 1) goto invalid; if (!hugetlb_max_hstate) default_hugepages_in_node[node] = tmp; else parsed_hstate->max_huge_pages_node[node] = tmp; *mhp += tmp; /* Go to parse next node*/ if (p[count] == ',') p += count + 1; else break; } else { if (p != s) goto invalid; *mhp = tmp; break; } } /* * Global state is always initialized later in hugetlb_init. * But we need to allocate gigantic hstates here early to still * use the bootmem allocator. */ if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; return 1; invalid: pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); hugepages_clear_pages_in_node(); return 1; } __setup("hugepages=", hugepages_setup); /* * hugepagesz command line processing * A specific huge page size can only be specified once with hugepagesz. * hugepagesz is followed by hugepages on the command line. The global * variable 'parsed_valid_hugepagesz' is used to determine if prior * hugepagesz argument was valid. */ static int __init hugepagesz_setup(char *s) { unsigned long size; struct hstate *h; parsed_valid_hugepagesz = false; size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); return 1; } h = size_to_hstate(size); if (h) { /* * hstate for this size already exists. This is normally * an error, but is allowed if the existing hstate is the * default hstate. More specifically, it is only allowed if * the number of huge pages for the default hstate was not * previously specified. */ if (!parsed_default_hugepagesz || h != &default_hstate || default_hstate.max_huge_pages) { pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); return 1; } /* * No need to call hugetlb_add_hstate() as hstate already * exists. But, do set parsed_hstate so that a following * hugepages= parameter will be applied to this hstate. */ parsed_hstate = h; parsed_valid_hugepagesz = true; return 1; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; return 1; } __setup("hugepagesz=", hugepagesz_setup); /* * default_hugepagesz command line input * Only one instance of default_hugepagesz allowed on command line. */ static int __init default_hugepagesz_setup(char *s) { unsigned long size; int i; parsed_valid_hugepagesz = false; if (parsed_default_hugepagesz) { pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); return 1; } size = (unsigned long)memparse(s, NULL); if (!arch_hugetlb_valid_size(size)) { pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); return 1; } hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); parsed_valid_hugepagesz = true; parsed_default_hugepagesz = true; default_hstate_idx = hstate_index(size_to_hstate(size)); /* * The number of default huge pages (for this size) could have been * specified as the first hugetlb parameter: hugepages=X. If so, * then default_hstate_max_huge_pages is set. If the default huge * page size is gigantic (> MAX_ORDER), then the pages must be * allocated here from bootmem allocator. */ if (default_hstate_max_huge_pages) { default_hstate.max_huge_pages = default_hstate_max_huge_pages; for_each_online_node(i) default_hstate.max_huge_pages_node[i] = default_hugepages_in_node[i]; if (hstate_is_gigantic(&default_hstate)) hugetlb_hstate_alloc_pages(&default_hstate); default_hstate_max_huge_pages = 0; } return 1; } __setup("default_hugepagesz=", default_hugepagesz_setup); static nodemask_t *policy_mbind_nodemask(gfp_t gfp) { #ifdef CONFIG_NUMA struct mempolicy *mpol = get_task_policy(current); /* * Only enforce MPOL_BIND policy which overlaps with cpuset policy * (from policy_nodemask) specifically for hugetlb case */ if (mpol->mode == MPOL_BIND && (apply_policy_zone(mpol, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) return &mpol->nodes; #endif return NULL; } static unsigned int allowed_mems_nr(struct hstate *h) { int node; unsigned int nr = 0; nodemask_t *mbind_nodemask; unsigned int *array = h->free_huge_pages_node; gfp_t gfp_mask = htlb_alloc_mask(h); mbind_nodemask = policy_mbind_nodemask(gfp_mask); for_each_node_mask(node, cpuset_current_mems_allowed) { if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) nr += array[node]; } return nr; } #ifdef CONFIG_SYSCTL static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos, unsigned long *out) { struct ctl_table dup_table; /* * In order to avoid races with __do_proc_doulongvec_minmax(), we * can duplicate the @table and alter the duplicate of it. */ dup_table = *table; dup_table.data = out; return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); } static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp = h->max_huge_pages; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, &tmp); if (ret) goto out; if (write) ret = __nr_hugepages_store_common(obey_mempolicy, h, NUMA_NO_NODE, tmp, *length); out: return ret; } static int hugetlb_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(false, table, write, buffer, length, ppos); } #ifdef CONFIG_NUMA static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(true, table, write, buffer, length, ppos); } #endif /* CONFIG_NUMA */ static int hugetlb_overcommit_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; tmp = h->nr_overcommit_huge_pages; if (write && hstate_is_gigantic(h)) return -EINVAL; ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, &tmp); if (ret) goto out; if (write) { spin_lock_irq(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; spin_unlock_irq(&hugetlb_lock); } out: return ret; } static struct ctl_table hugetlb_table[] = { { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_sysctl_handler, }, #ifdef CONFIG_NUMA { .procname = "nr_hugepages_mempolicy", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, }, #endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_overcommit_handler, }, { } }; static void hugetlb_sysctl_init(void) { register_sysctl_init("vm", hugetlb_table); } #endif /* CONFIG_SYSCTL */ void hugetlb_report_meminfo(struct seq_file *m) { struct hstate *h; unsigned long total = 0; if (!hugepages_supported()) return; for_each_hstate(h) { unsigned long count = h->nr_huge_pages; total += huge_page_size(h) * count; if (h == &default_hstate) seq_printf(m, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %8lu kB\n", count, h->free_huge_pages, h->resv_huge_pages, h->surplus_huge_pages, huge_page_size(h) / SZ_1K); } seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); } int hugetlb_report_node_meminfo(char *buf, int len, int nid) { struct hstate *h = &default_hstate; if (!hugepages_supported()) return 0; return sysfs_emit_at(buf, len, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", nid, h->nr_huge_pages_node[nid], nid, h->free_huge_pages_node[nid], nid, h->surplus_huge_pages_node[nid]); } void hugetlb_show_meminfo_node(int nid) { struct hstate *h; if (!hugepages_supported()) return; for_each_hstate(h) printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", nid, h->nr_huge_pages_node[nid], h->free_huge_pages_node[nid], h->surplus_huge_pages_node[nid], huge_page_size(h) / SZ_1K); } void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) { seq_printf(m, "HugetlbPages:\t%8lu kB\n", K(atomic_long_read(&mm->hugetlb_usage))); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { struct hstate *h; unsigned long nr_total_pages = 0; for_each_hstate(h) nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; if (!delta) return 0; spin_lock_irq(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such * reservation is completely rubbish in the presence of cpuset because * the reservation is not checked against page availability for the * current cpuset. Application can still potentially OOM'ed by kernel * with lack of free htlb page in cpuset that the task is in. * Attempt to enforce strict accounting with cpuset is almost * impossible (or too ugly) because cpuset is too fluid that * task or memory node can be dynamically moved between cpusets. * * The change of semantics for shared hugetlb mapping with cpuset is * undesirable. However, in order to preserve some of the semantics, * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. * * Apart from cpuset, we also have memory policy mechanism that * also determines from which node the kernel will allocate memory * in a NUMA system. So similar to cpuset, we also should consider * the memory policy of the current task. Similar to the description * above. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; if (delta > allowed_mems_nr(h)) { return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock_irq(&hugetlb_lock); return ret; } static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *resv = vma_resv_map(vma); /* * HPAGE_RESV_OWNER indicates a private mapping. * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { resv_map_dup_hugetlb_cgroup_uncharge_info(resv); kref_get(&resv->refs); } /* * vma_lock structure for sharable mappings is vma specific. * Clear old pointer (if copied via vm_area_dup) and allocate * new structure. Before clearing, make sure vma_lock is not * for this vma. */ if (vma->vm_flags & VM_MAYSHARE) { struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; if (vma_lock) { if (vma_lock->vma != vma) { vma->vm_private_data = NULL; hugetlb_vma_lock_alloc(vma); } else pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); } else hugetlb_vma_lock_alloc(vma); } } static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); struct resv_map *resv; struct hugepage_subpool *spool = subpool_vma(vma); unsigned long reserve, start, end; long gbl_reserve; hugetlb_vma_lock_free(vma); resv = vma_resv_map(vma); if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return; start = vma_hugecache_offset(h, vma, vma->vm_start); end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(resv, start, end); hugetlb_cgroup_uncharge_counter(resv, start, end); if (reserve) { /* * Decrement reserve counts. The global reserve count may be * adjusted if the subpool has a minimum size. */ gbl_reserve = hugepage_subpool_put_pages(spool, reserve); hugetlb_acct_memory(h, -gbl_reserve); } kref_put(&resv->refs, resv_map_release); } static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) { if (addr & ~(huge_page_mask(hstate_vma(vma)))) return -EINVAL; /* * PMD sharing is only possible for PUD_SIZE-aligned address ranges * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. */ if (addr & ~PUD_MASK) { /* * hugetlb_vm_op_split is called right before we attempt to * split the VMA. We will need to unshare PMDs in the old and * new VMAs, so let's unshare before we split. */ unsigned long floor = addr & PUD_MASK; unsigned long ceil = floor + PUD_SIZE; if (floor >= vma->vm_start && ceil <= vma->vm_end) hugetlb_unshare_pmds(vma, floor, ceil); } return 0; } static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) { return huge_page_size(hstate_vma(vma)); } /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) { BUG(); return 0; } /* * When a new function is introduced to vm_operations_struct and added * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. * This is because under System V memory model, mappings created via * shmget/shmat with "huge page" specified are backed by hugetlbfs files, * their original vm_ops are overwritten with shm_vm_ops. */ const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, .may_split = hugetlb_vm_op_split, .pagesize = hugetlb_vm_op_pagesize, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) { pte_t entry; unsigned int shift = huge_page_shift(hstate_vma(vma)); if (writable) { entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, vma->vm_page_prot))); } else { entry = huge_pte_wrprotect(mk_huge_pte(page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); entry = arch_make_huge_pte(entry, shift, vma->vm_flags); return entry; } static void set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t entry; entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) update_mmu_cache(vma, address, ptep); } bool is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return false; swp = pte_to_swp_entry(pte); if (is_migration_entry(swp)) return true; else return false; } static bool is_hugetlb_entry_hwpoisoned(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return false; swp = pte_to_swp_entry(pte); if (is_hwpoison_entry(swp)) return true; else return false; } static void hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old) { pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); __folio_mark_uptodate(new_folio); hugepage_add_new_anon_rmap(new_folio, vma, addr); if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(vma->vm_mm, addr, ptep, newpte); hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); folio_set_hugetlb_migratable(new_folio); } int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pte_t *src_pte, *dst_pte, entry; struct folio *pte_folio; unsigned long addr; bool cow = is_cow_mapping(src_vma->vm_flags); struct hstate *h = hstate_vma(src_vma); unsigned long sz = huge_page_size(h); unsigned long npages = pages_per_huge_page(h); struct mmu_notifier_range range; unsigned long last_addr_mask; int ret = 0; if (cow) { mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, src_vma->vm_start, src_vma->vm_end); mmu_notifier_invalidate_range_start(&range); vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src->write_protect_seq); } else { /* * For shared mappings the vma lock must be held before * calling hugetlb_walk() in the src vma. Otherwise, the * returned ptep could go away if part of a shared pmd and * another thread calls huge_pmd_unshare. */ hugetlb_vma_lock_read(src_vma); } last_addr_mask = hugetlb_mask_last_page(h); for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = hugetlb_walk(src_vma, addr, sz); if (!src_pte) { addr |= last_addr_mask; continue; } dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; } /* * If the pagetables are shared don't copy or take references. * * dst_pte == src_pte is the common case of src/dest sharing. * However, src could have 'unshared' and dst shares with * another vma. So page_count of ptep page is checked instead * to reliably determine whether pte is shared. */ if (page_count(virt_to_page(dst_pte)) > 1) { addr |= last_addr_mask; continue; } dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_pte); again: if (huge_pte_none(entry)) { /* * Skip if src entry none. */ ; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry); } else if (unlikely(is_hugetlb_entry_migration(entry))) { swp_entry_t swp_entry = pte_to_swp_entry(entry); bool uffd_wp = pte_swp_uffd_wp(entry); if (!is_readable_migration_entry(swp_entry) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. */ swp_entry = make_readable_migration_entry( swp_offset(swp_entry)); entry = swp_entry_to_pte(swp_entry); if (userfaultfd_wp(src_vma) && uffd_wp) entry = pte_swp_mkuffd_wp(entry); set_huge_pte_at(src, addr, src_pte, entry); } if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry); } else if (unlikely(is_pte_marker(entry))) { pte_marker marker = copy_pte_marker( pte_to_swp_entry(entry), dst_vma); if (marker) set_huge_pte_at(dst, addr, dst_pte, make_pte_marker(marker)); } else { entry = huge_ptep_get(src_pte); pte_folio = page_folio(pte_page(entry)); folio_get(pte_folio); /* * Failing to duplicate the anon rmap is a rare case * where we see pinned hugetlb pages while they're * prone to COW. We need to do the COW earlier during * fork. * * When pre-allocating the page or copying data, we * need to be without the pgtable locks since we could * sleep during the process. */ if (!folio_test_anon(pte_folio)) { page_dup_file_rmap(&pte_folio->page, true); } else if (page_try_dup_anon_rmap(&pte_folio->page, true, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); break; } ret = copy_user_large_folio(new_folio, pte_folio, addr, dst_vma); folio_put(pte_folio); if (ret) { folio_put(new_folio); break; } /* Install the new hugetlb folio if src pte stable */ dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_pte); if (!pte_same(src_pte_old, entry)) { restore_reserve_on_error(h, dst_vma, addr, new_folio); folio_put(new_folio); /* huge_ptep of dst_pte won't change as in child */ goto again; } hugetlb_install_folio(dst_vma, dst_pte, addr, new_folio, src_pte_old); spin_unlock(src_ptl); spin_unlock(dst_ptl); continue; } if (cow) { /* * No need to notify as we are downgrading page * table protection not changing it to point * to a new page. * * See Documentation/mm/mmu_notifier.rst */ huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_pte_wrprotect(entry); } if (!userfaultfd_wp(dst_vma)) entry = huge_pte_clear_uffd_wp(entry); set_huge_pte_at(dst, addr, dst_pte, entry); hugetlb_count_add(npages, dst); } spin_unlock(src_ptl); spin_unlock(dst_ptl); } if (cow) { raw_write_seqcount_end(&src->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } else { hugetlb_vma_unlock_read(src_vma); } return ret; } static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) { struct hstate *h = hstate_vma(vma); struct mm_struct *mm = vma->vm_mm; spinlock_t *src_ptl, *dst_ptl; pte_t pte; dst_ptl = huge_pte_lock(h, mm, dst_pte); src_ptl = huge_pte_lockptr(h, mm, src_pte); /* * We don't have to worry about the ordering of src and dst ptlocks * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. */ if (src_ptl != dst_ptl) spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); set_huge_pte_at(mm, new_addr, dst_pte, pte); if (src_ptl != dst_ptl) spin_unlock(src_ptl); spin_unlock(dst_ptl); } int move_hugetlb_page_tables(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long len) { struct hstate *h = hstate_vma(vma); struct address_space *mapping = vma->vm_file->f_mapping; unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; unsigned long old_end = old_addr + len; unsigned long last_addr_mask; pte_t *src_pte, *dst_pte; struct mmu_notifier_range range; bool shared_pmd = false; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, old_end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* * In case of shared PMDs, we should cover the maximum possible * range. */ flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); last_addr_mask = hugetlb_mask_last_page(h); /* Prevent race with file truncation */ hugetlb_vma_lock_write(vma); i_mmap_lock_write(mapping); for (; old_addr < old_end; old_addr += sz, new_addr += sz) { src_pte = hugetlb_walk(vma, old_addr, sz); if (!src_pte) { old_addr |= last_addr_mask; new_addr |= last_addr_mask; continue; } if (huge_pte_none(huge_ptep_get(src_pte))) continue; if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { shared_pmd = true; old_addr |= last_addr_mask; new_addr |= last_addr_mask; continue; } dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); if (!dst_pte) break; move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); } if (shared_pmd) flush_hugetlb_tlb_range(vma, range.start, range.end); else flush_hugetlb_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); i_mmap_unlock_write(mapping); hugetlb_vma_unlock_write(vma); return len + old_addr - old_end; } static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; pte_t pte; spinlock_t *ptl; struct page *page; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); unsigned long last_addr_mask; bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); /* * This is a hugetlb vma, all the pte entries should point * to huge page. */ tlb_change_page_size(tlb, sz); tlb_start_vma(tlb, vma); last_addr_mask = hugetlb_mask_last_page(h); address = start; for (; address < end; address += sz) { ptep = hugetlb_walk(vma, address, sz); if (!ptep) { address |= last_addr_mask; continue; } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, address, ptep)) { spin_unlock(ptl); tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); force_flush = true; address |= last_addr_mask; continue; } pte = huge_ptep_get(ptep); if (huge_pte_none(pte)) { spin_unlock(ptl); continue; } /* * Migrating hugepage or HWPoisoned hugepage is already * unmapped and its refcount is dropped, so just clear pte here. */ if (unlikely(!pte_present(pte))) { /* * If the pte was wr-protected by uffd-wp in any of the * swap forms, meanwhile the caller does not want to * drop the uffd-wp bit in this zap, then replace the * pte with a marker. */ if (pte_swp_uffd_wp_any(pte) && !(zap_flags & ZAP_FLAG_DROP_MARKER)) set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); else huge_pte_clear(mm, address, ptep, sz); spin_unlock(ptl); continue; } page = pte_page(pte); /* * If a reference page is supplied, it is because a specific * page is being unmapped, not a range. Ensure the page we * are about to unmap is the actual page of interest. */ if (ref_page) { if (page != ref_page) { spin_unlock(ptl); continue; } /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than * looking like data was lost */ set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } pte = huge_ptep_get_and_clear(mm, address, ptep); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); /* Leave a uffd-wp pte marker if needed */ if (huge_pte_uffd_wp(pte) && !(zap_flags & ZAP_FLAG_DROP_MARKER)) set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); hugetlb_count_sub(pages_per_huge_page(h), mm); page_remove_rmap(page, vma, true); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); /* * Bail out after unmapping reference page if supplied */ if (ref_page) break; } tlb_end_vma(tlb, vma); /* * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We * could defer the flush until now, since by holding i_mmap_rwsem we * guaranteed that the last refernece would not be dropped. But we must * do the flushing before we return, as otherwise i_mmap_rwsem will be * dropped and the last reference to the shared PMDs page might be * dropped as well. * * In theory we could defer the freeing of the PMD pages as well, but * huge_pmd_unshare() relies on the exact page_count for the PMD page to * detect sharing, so we cannot defer the release of the page either. * Instead, do flush now. */ if (force_flush) tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) { hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); /* mmu notification performed in caller */ __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ /* * Unlock and free the vma lock before releasing i_mmap_rwsem. * When the vma_lock is freed, this makes the vma ineligible * for pmd sharing. And, i_mmap_rwsem is required to set up * pmd sharing. This is important as page tables for this * unmapped range will be asynchrously deleted. If the page * tables are shared, there will be issues when accessed by * someone else. */ __hugetlb_vma_unlock_write_free(vma); i_mmap_unlock_write(vma->vm_file->f_mapping); } else { i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); } } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page, zap_flags_t zap_flags) { struct mmu_notifier_range range; struct mmu_gather tlb; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); mmu_notifier_invalidate_range_start(&range); tlb_gather_mmu(&tlb, vma->vm_mm); __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); } /* * This is called when the original mapper is failing to COW a MAP_PRIVATE * mapping it owns the reserve page for. The intention is to unmap the page * from other VMAs and let the children be SIGKILLed if they are faulting the * same region. */ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) { struct hstate *h = hstate_vma(vma); struct vm_area_struct *iter_vma; struct address_space *mapping; pgoff_t pgoff; /* * vm_pgoff is in PAGE_SIZE units, hence the different calculation * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; mapping = vma->vm_file->f_mapping; /* * Take the mapping lock for the duration of the table walk. As * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ i_mmap_lock_write(mapping); vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) continue; /* * Shared VMAs have their own reserves and do not affect * MAP_PRIVATE accounting but it is possible that a shared * VMA is using the same page so check and skip such VMAs. */ if (iter_vma->vm_flags & VM_MAYSHARE) continue; /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA * could insert a zeroed page instead of the data existing * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page, 0); } i_mmap_unlock_write(mapping); } /* * hugetlb_wp() should be called with page lock of the original hugepage held. * Called with hugetlb_fault_mutex_table held and pte_page locked so we * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int flags, struct folio *pagecache_folio, spinlock_t *ptl) { const bool unshare = flags & FAULT_FLAG_UNSHARE; pte_t pte = huge_ptep_get(ptep); struct hstate *h = hstate_vma(vma); struct folio *old_folio; struct folio *new_folio; int outside_reserve = 0; vm_fault_t ret = 0; unsigned long haddr = address & huge_page_mask(h); struct mmu_notifier_range range; /* * Never handle CoW for uffd-wp protected pages. It should be only * handled when the uffd-wp protection is removed. * * Note that only the CoW optimization path (in hugetlb_no_page()) * can trigger this, because hugetlb_fault() will always resolve * uffd-wp bit first. */ if (!unshare && huge_pte_uffd_wp(pte)) return 0; /* * hugetlb does not support FOLL_FORCE-style write faults that keep the * PTE mapped R/O such as maybe_mkwrite() would do. */ if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) return VM_FAULT_SIGSEGV; /* Let's take out MAP_SHARED mappings first. */ if (vma->vm_flags & VM_MAYSHARE) { set_huge_ptep_writable(vma, haddr, ptep); return 0; } old_folio = page_folio(pte_page(pte)); delayacct_wpcopy_start(); retry_avoidcopy: /* * If no-one else is actually using this page, we're the exclusive * owner and can reuse this page. */ if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { if (!PageAnonExclusive(&old_folio->page)) page_move_anon_rmap(&old_folio->page, vma); if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); delayacct_wpcopy_end(); return 0; } VM_BUG_ON_PAGE(folio_test_anon(old_folio) && PageAnonExclusive(&old_folio->page), &old_folio->page); /* * If the process that created a MAP_PRIVATE mapping is about to * perform a COW due to a shared page count, attempt to satisfy * the allocation without using the existing reserves. The pagecache * page is used to determine if the reserve at this address was * consumed or not. If reserves were used, a partial faulted mapping * at the time of fork() could consume its reserves on COW instead * of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_folio != pagecache_folio) outside_reserve = 1; folio_get(old_folio); /* * Drop page table lock as buddy allocator may be called. It will * be acquired again before returning to the caller, as expected. */ spin_unlock(ptl); new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve); if (IS_ERR(new_folio)) { /* * If a process owning a MAP_PRIVATE mapping fails to COW, * it is due to references held by a child and an insufficient * huge page pool. To guarantee the original mappers * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ if (outside_reserve) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx; u32 hash; folio_put(old_folio); /* * Drop hugetlb_fault_mutex and vma_lock before * unmapping. unmapping needs to hold vma_lock * in write mode. Dropping vma_lock in read mode * here is OK as COW mappings do not interact with * PMD sharing. * * Reacquire both after unmap operation. */ idx = vma_hugecache_offset(h, vma, haddr); hash = hugetlb_fault_mutex_hash(mapping, idx); hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); unmap_ref_private(mm, vma, &old_folio->page, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); hugetlb_vma_lock_read(vma); spin_lock(ptl); ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) goto retry_avoidcopy; /* * race occurs while re-acquiring page table * lock, and our job is done. */ delayacct_wpcopy_end(); return 0; } ret = vmf_error(PTR_ERR(new_folio)); goto out_release_old; } /* * When the original hugepage is shared one, it does not have * anon_vma prepared. */ if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto out_release_all; } if (copy_user_large_folio(new_folio, old_folio, address, vma)) { ret = VM_FAULT_HWPOISON_LARGE; goto out_release_all; } __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, haddr + huge_page_size(h)); mmu_notifier_invalidate_range_start(&range); /* * Retake the page table lock to check for racing updates * before the page tables are altered */ spin_lock(ptl); ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); page_remove_rmap(&old_folio->page, vma, true); hugepage_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, haddr, ptep, newpte); folio_set_hugetlb_migratable(new_folio); /* Make the old page be freed below */ new_folio = old_folio; } spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); out_release_all: /* * No restore in case of successful pagetable update (Break COW or * unshare) */ if (new_folio != old_folio) restore_reserve_on_error(h, vma, haddr, new_folio); folio_put(new_folio); out_release_old: folio_put(old_folio); spin_lock(ptl); /* Caller expects lock to be held */ delayacct_wpcopy_end(); return ret; } /* * Return whether there is a pagecache page to back given address within VMA. */ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, vma, address); struct folio *folio; folio = filemap_get_folio(mapping, idx); if (IS_ERR(folio)) return false; folio_put(folio); return true; } int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx) { struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); int err; __folio_set_locked(folio); err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); if (unlikely(err)) { __folio_clear_locked(folio); return err; } folio_clear_hugetlb_restore_reserve(folio); /* * mark folio dirty so that it will not be removed from cache/file * by non-hugetlbfs specific code paths. */ folio_mark_dirty(folio); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); return 0; } static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned int flags, unsigned long haddr, unsigned long addr, unsigned long reason) { u32 hash; struct vm_fault vmf = { .vma = vma, .address = haddr, .real_address = addr, .flags = flags, /* * Hard to debug if it ends up being * used by a callee that assumes * something about the other * uninitialized fields... same as in * memory.c */ }; /* * vma_lock and hugetlb_fault_mutex must be dropped before handling * userfault. Also mmap_lock could be dropped due to handling * userfault, any vma operation should be careful from here. */ hugetlb_vma_unlock_read(vma); hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return handle_userfault(&vmf, reason); } /* * Recheck pte with pgtable lock. Returns true if pte didn't change, or * false if pte changed or is changing. */ static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, pte_t *ptep, pte_t old_pte) { spinlock_t *ptl; bool same; ptl = huge_pte_lock(h, mm, ptep); same = pte_same(huge_ptep_get(ptep), old_pte); spin_unlock(ptl); return same; } static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, pte_t old_pte, unsigned int flags) { struct hstate *h = hstate_vma(vma); vm_fault_t ret = VM_FAULT_SIGBUS; int anon_rmap = 0; unsigned long size; struct folio *folio; pte_t new_pte; spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); bool new_folio, new_pagecache_folio = false; u32 hash = hugetlb_fault_mutex_hash(mapping, idx); /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed * COW/unsharing. Warn that such a situation has occurred as it may not * be obvious. */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); goto out; } /* * Use page lock to guard against racing truncation * before we get page_table_lock. */ new_folio = false; folio = filemap_lock_folio(mapping, idx); if (IS_ERR(folio)) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; /* Check for page in userfault range */ if (userfaultfd_missing(vma)) { /* * Since hugetlb_no_page() was examining pte * without pgtable lock, we need to re-test under * lock because the pte may not be stable and could * have changed from under us. Try to detect * either changed or during-changing ptes and retry * properly when needed. * * Note that userfaultfd is actually fine with * false positives (e.g. caused by pte changed), * but not wrong logical events (e.g. caused by * reading a pte during changing). The latter can * confuse the userspace, so the strictness is very * much preferred. E.g., MISSING event should * never happen on the page after UFFDIO_COPY has * correctly installed the page and returned. */ if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { ret = 0; goto out; } return hugetlb_handle_userfault(vma, mapping, idx, flags, haddr, address, VM_UFFD_MISSING); } folio = alloc_hugetlb_folio(vma, haddr, 0); if (IS_ERR(folio)) { /* * Returning error will result in faulting task being * sent SIGBUS. The hugetlb fault mutex prevents two * tasks from racing to fault in the same page which * could result in false unable to allocate errors. * Page migration does not take the fault mutex, but * does a clear then write of pte's under page table * lock. Page fault code could race with migration, * notice the clear pte and try to allocate a page * here. Before returning error, get ptl and make * sure there really is no pte entry. */ if (hugetlb_pte_stable(h, mm, ptep, old_pte)) ret = vmf_error(PTR_ERR(folio)); else ret = 0; goto out; } clear_huge_page(&folio->page, address, pages_per_huge_page(h)); __folio_mark_uptodate(folio); new_folio = true; if (vma->vm_flags & VM_MAYSHARE) { int err = hugetlb_add_to_page_cache(folio, mapping, idx); if (err) { /* * err can't be -EEXIST which implies someone * else consumed the reservation since hugetlb * fault mutex is held when add a hugetlb page * to the page cache. So it's safe to call * restore_reserve_on_error() here. */ restore_reserve_on_error(h, vma, haddr, folio); folio_put(folio); goto out; } new_pagecache_folio = true; } else { folio_lock(folio); if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto backout_unlocked; } anon_rmap = 1; } } else { /* * If memory error occurs between mmap() and fault, some process * don't have hwpoisoned swap entry for errored virtual address. * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(folio_test_hwpoison(folio))) { ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } /* Check for page in userfault range. */ if (userfaultfd_minor(vma)) { folio_unlock(folio); folio_put(folio); /* See comment in userfaultfd_missing() block above */ if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { ret = 0; goto out; } return hugetlb_handle_userfault(vma, mapping, idx, flags, haddr, address, VM_UFFD_MINOR); } } /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that * any allocations necessary to record that reservation occur outside * the spinlock. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, haddr) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); } ptl = huge_pte_lock(h, mm, ptep); ret = 0; /* If pte changed from under us, retry */ if (!pte_same(huge_ptep_get(ptep), old_pte)) goto backout; if (anon_rmap) hugepage_add_new_anon_rmap(folio, vma, haddr); else page_dup_file_rmap(&folio->page, true); new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); /* * If this pte was previously wr-protected, keep it wr-protected even * if populated. */ if (unlikely(pte_marker_uffd_wp(old_pte))) new_pte = huge_pte_mkuffd_wp(new_pte); set_huge_pte_at(mm, haddr, ptep, new_pte); hugetlb_count_add(pages_per_huge_page(h), mm); if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl); } spin_unlock(ptl); /* * Only set hugetlb_migratable in newly allocated pages. Existing pages * found in the pagecache may not have hugetlb_migratable if they have * been isolated for migration. */ if (new_folio) folio_set_hugetlb_migratable(folio); folio_unlock(folio); out: hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return ret; backout: spin_unlock(ptl); backout_unlocked: if (new_folio && !new_pagecache_folio) restore_reserve_on_error(h, vma, haddr, folio); folio_unlock(folio); folio_put(folio); goto out; } #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) { unsigned long key[2]; u32 hash; key[0] = (unsigned long) mapping; key[1] = idx; hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); return hash & (num_fault_mutexes - 1); } #else /* * For uniprocessor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) { return 0; } #endif vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t *ptep, entry; spinlock_t *ptl; vm_fault_t ret; u32 hash; pgoff_t idx; struct folio *folio = NULL; struct folio *pagecache_folio = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; int need_wait_lock = 0; unsigned long haddr = address & huge_page_mask(h); /* TODO: Handle faults under the VMA lock */ if (flags & FAULT_FLAG_VMA_LOCK) { vma_end_read(vma); return VM_FAULT_RETRY; } /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, haddr); hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* * Acquire vma lock before calling huge_pte_alloc and hold * until finished with ptep. This prevents huge_pmd_unshare from * being called elsewhere and making the ptep no longer valid. */ hugetlb_vma_lock_read(vma); ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); if (!ptep) { hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return VM_FAULT_OOM; } entry = huge_ptep_get(ptep); if (huge_pte_none_mostly(entry)) { if (is_pte_marker(entry)) { pte_marker marker = pte_marker_get(pte_to_swp_entry(entry)); if (marker & PTE_MARKER_POISONED) { ret = VM_FAULT_HWPOISON_LARGE; goto out_mutex; } } /* * Other PTE markers should be handled the same way as none PTE. * * hugetlb_no_page will drop vma lock and hugetlb fault * mutex internally, which make us return immediately. */ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, entry, flags); } ret = 0; /* * entry could be a migration/hwpoison entry at this point, so this * check prevents the kernel from going below assuming that we have * an active hugepage in pagecache. This goto expects the 2nd page * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will * properly handle it. */ if (!pte_present(entry)) { if (unlikely(is_hugetlb_entry_migration(entry))) { /* * Release the hugetlb fault lock now, but retain * the vma lock, because it is needed to guard the * huge_pte_lockptr() later in * migration_entry_wait_huge(). The vma lock will * be released there. */ mutex_unlock(&hugetlb_fault_mutex_table[hash]); migration_entry_wait_huge(vma, ptep); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); goto out_mutex; } /* * If we are going to COW/unshare the mapping later, we examine the * pending reservations for this page now. This will ensure that any * allocations necessary to record that reservation occur outside the * spinlock. Also lookup the pagecache page now as it is used to * determine if a reservation has been consumed. */ if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { if (vma_needs_reservation(h, vma, haddr) < 0) { ret = VM_FAULT_OOM; goto out_mutex; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); pagecache_folio = filemap_lock_folio(mapping, idx); if (IS_ERR(pagecache_folio)) pagecache_folio = NULL; } ptl = huge_pte_lock(h, mm, ptep); /* Check for a racing update before calling hugetlb_wp() */ if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) goto out_ptl; /* Handle userfault-wp first, before trying to lock more pages */ if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { struct vm_fault vmf = { .vma = vma, .address = haddr, .real_address = address, .flags = flags, }; spin_unlock(ptl); if (pagecache_folio) { folio_unlock(pagecache_folio); folio_put(pagecache_folio); } hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); return handle_userfault(&vmf, VM_UFFD_WP); } /* * hugetlb_wp() requires page locks of pte_page(entry) and * pagecache_folio, so here we need take the former one * when folio != pagecache_folio or !pagecache_folio. */ folio = page_folio(pte_page(entry)); if (folio != pagecache_folio) if (!folio_trylock(folio)) { need_wait_lock = 1; goto out_ptl; } folio_get(folio); if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(entry)) { ret = hugetlb_wp(mm, vma, address, ptep, flags, pagecache_folio, ptl); goto out_put_page; } else if (likely(flags & FAULT_FLAG_WRITE)) { entry = huge_pte_mkdirty(entry); } } entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, haddr, ptep); out_put_page: if (folio != pagecache_folio) folio_unlock(folio); folio_put(folio); out_ptl: spin_unlock(ptl); if (pagecache_folio) { folio_unlock(pagecache_folio); folio_put(pagecache_folio); } out_mutex: hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and * the page is not used after unlocked before returning from the current * page fault. So we are safe from accessing freed page, even if we wait * here without taking refcount. */ if (need_wait_lock) folio_wait_locked(folio); return ret; } #ifdef CONFIG_USERFAULTFD /* * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte * with modifications for hugetlb pages. */ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) { struct mm_struct *dst_mm = dst_vma->vm_mm; bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); bool wp_enabled = (flags & MFILL_ATOMIC_WP); struct hstate *h = hstate_vma(dst_vma); struct address_space *mapping = dst_vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); unsigned long size; int vm_shared = dst_vma->vm_flags & VM_SHARED; pte_t _dst_pte; spinlock_t *ptl; int ret = -ENOMEM; struct folio *folio; int writable; bool folio_in_pagecache = false; if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { ptl = huge_pte_lock(h, dst_mm, dst_pte); /* Don't overwrite any existing PTEs (even markers) */ if (!huge_pte_none(huge_ptep_get(dst_pte))) { spin_unlock(ptl); return -EEXIST; } _dst_pte = make_pte_marker(PTE_MARKER_POISONED); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); return 0; } if (is_continue) { ret = -EFAULT; folio = filemap_lock_folio(mapping, idx); if (IS_ERR(folio)) goto out; folio_in_pagecache = true; } else if (!*foliop) { /* If a folio already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. */ if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { ret = -EEXIST; goto out; } folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); if (IS_ERR(folio)) { ret = -ENOMEM; goto out; } ret = copy_folio_from_user(folio, (const void __user *) src_addr, false); /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { ret = -ENOENT; /* Free the allocated folio which may have * consumed a reservation. */ restore_reserve_on_error(h, dst_vma, dst_addr, folio); folio_put(folio); /* Allocate a temporary folio to hold the copied * contents. */ folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); if (!folio) { ret = -ENOMEM; goto out; } *foliop = folio; /* Set the outparam foliop and return to the caller to * copy the contents outside the lock. Don't free the * folio. */ goto out; } } else { if (vm_shared && hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { folio_put(*foliop); ret = -EEXIST; *foliop = NULL; goto out; } folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); if (IS_ERR(folio)) { folio_put(*foliop); ret = -ENOMEM; *foliop = NULL; goto out; } ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); folio_put(*foliop); *foliop = NULL; if (ret) { folio_put(folio); goto out; } } /* * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __folio_mark_uptodate(folio); /* Add shared, newly allocated pages to the page cache. */ if (vm_shared && !is_continue) { size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) goto out_release_nounlock; /* * Serialization between remove_inode_hugepages() and * hugetlb_add_to_page_cache() below happens through the * hugetlb_fault_mutex_table that here must be hold by * the caller. */ ret = hugetlb_add_to_page_cache(folio, mapping, idx); if (ret) goto out_release_nounlock; folio_in_pagecache = true; } ptl = huge_pte_lock(h, dst_mm, dst_pte); ret = -EIO; if (folio_test_hwpoison(folio)) goto out_release_unlock; /* * We allow to overwrite a pte marker: consider when both MISSING|WP * registered, we firstly wr-protect a none pte which has no page cache * page backing it, then access the page. */ ret = -EEXIST; if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) goto out_release_unlock; if (folio_in_pagecache) page_dup_file_rmap(&folio->page, true); else hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); /* * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * with wp flag set, don't set pte write bit. */ if (wp_enabled || (is_continue && !vm_shared)) writable = 0; else writable = dst_vma->vm_flags & VM_WRITE; _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); /* * Always mark UFFDIO_COPY page dirty; note that this may not be * extremely important for hugetlbfs for now since swapping is not * supported, but we should still be clear in that this page cannot be * thrown away at will, even if write bit not set. */ _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); if (wp_enabled) _dst_pte = huge_pte_mkuffd_wp(_dst_pte); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); hugetlb_count_add(pages_per_huge_page(h), dst_mm); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); if (!is_continue) folio_set_hugetlb_migratable(folio); if (vm_shared || is_continue) folio_unlock(folio); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); if (vm_shared || is_continue) folio_unlock(folio); out_release_nounlock: if (!folio_in_pagecache) restore_reserve_on_error(h, dst_vma, dst_addr, folio); folio_put(folio); goto out; } #endif /* CONFIG_USERFAULTFD */ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) { struct hstate *h = hstate_vma(vma); struct mm_struct *mm = vma->vm_mm; unsigned long haddr = address & huge_page_mask(h); struct page *page = NULL; spinlock_t *ptl; pte_t *pte, entry; int ret; hugetlb_vma_lock_read(vma); pte = hugetlb_walk(vma, haddr, huge_page_size(h)); if (!pte) goto out_unlock; ptl = huge_pte_lock(h, mm, pte); entry = huge_ptep_get(pte); if (pte_present(entry)) { page = pte_page(entry); if (!huge_pte_write(entry)) { if (flags & FOLL_WRITE) { page = NULL; goto out; } if (gup_must_unshare(vma, flags, page)) { /* Tell the caller to do unsharing */ page = ERR_PTR(-EMLINK); goto out; } } page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); /* * Note that page may be a sub-page, and with vmemmap * optimizations the page struct may be read only. * try_grab_page() will increase the ref count on the * head page, so this will be OK. * * try_grab_page() should always be able to get the page here, * because we hold the ptl lock and have verified pte_present(). */ ret = try_grab_page(page, flags); if (WARN_ON_ONCE(ret)) { page = ERR_PTR(ret); goto out; } *page_mask = (1U << huge_page_order(h)) - 1; } out: spin_unlock(ptl); out_unlock: hugetlb_vma_unlock_read(vma); /* * Fixup retval for dump requests: if pagecache doesn't exist, * don't try to allocate a new page but just skip it. */ if (!page && (flags & FOLL_DUMP) && !hugetlbfs_pagecache_present(h, vma, address)) page = ERR_PTR(-EFAULT); return page; } long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; unsigned long start = address; pte_t *ptep; pte_t pte; struct hstate *h = hstate_vma(vma); long pages = 0, psize = huge_page_size(h); bool shared_pmd = false; struct mmu_notifier_range range; unsigned long last_addr_mask; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; /* * In the case of shared PMDs, the area to flush could be beyond * start/end. Set range.start/range.end to cover the maximum possible * range if PMD sharing is possible. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, mm, start, end); adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); BUG_ON(address >= end); flush_cache_range(vma, range.start, range.end); mmu_notifier_invalidate_range_start(&range); hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); last_addr_mask = hugetlb_mask_last_page(h); for (; address < end; address += psize) { spinlock_t *ptl; ptep = hugetlb_walk(vma, address, psize); if (!ptep) { if (!uffd_wp) { address |= last_addr_mask; continue; } /* * Userfaultfd wr-protect requires pgtable * pre-allocations to install pte markers. */ ptep = huge_pte_alloc(mm, vma, address, psize); if (!ptep) { pages = -ENOMEM; break; } } ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, address, ptep)) { /* * When uffd-wp is enabled on the vma, unshare * shouldn't happen at all. Warn about it if it * happened due to some reason. */ WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); pages++; spin_unlock(ptl); shared_pmd = true; address |= last_addr_mask; continue; } pte = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { /* Nothing to do. */ } else if (unlikely(is_hugetlb_entry_migration(pte))) { swp_entry_t entry = pte_to_swp_entry(pte); struct page *page = pfn_swap_entry_to_page(entry); pte_t newpte = pte; if (is_writable_migration_entry(entry)) { if (PageAnon(page)) entry = make_readable_exclusive_migration_entry( swp_offset(entry)); else entry = make_readable_migration_entry( swp_offset(entry)); newpte = swp_entry_to_pte(entry); pages++; } if (uffd_wp) newpte = pte_swp_mkuffd_wp(newpte); else if (uffd_wp_resolve) newpte = pte_swp_clear_uffd_wp(newpte); if (!pte_same(pte, newpte)) set_huge_pte_at(mm, address, ptep, newpte); } else if (unlikely(is_pte_marker(pte))) { /* No other markers apply for now. */ WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); if (uffd_wp_resolve) /* Safe to modify directly (non-present->none). */ huge_pte_clear(mm, address, ptep, psize); } else if (!huge_pte_none(pte)) { pte_t old_pte; unsigned int shift = huge_page_shift(hstate_vma(vma)); old_pte = huge_ptep_modify_prot_start(vma, address, ptep); pte = huge_pte_modify(old_pte, newprot); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (uffd_wp) pte = huge_pte_mkuffd_wp(pte); else if (uffd_wp_resolve) pte = huge_pte_clear_uffd_wp(pte); huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); pages++; } else { /* None pte */ if (unlikely(uffd_wp)) /* Safe to modify directly (none->non-present). */ set_huge_pte_at(mm, address, ptep, make_pte_marker(PTE_MARKER_UFFD_WP)); } spin_unlock(ptl); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * may have cleared our pud entry and done put_page on the page table: * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. If we actually * did unshare a page of pmds, flush the range corresponding to the pud. */ if (shared_pmd) flush_hugetlb_tlb_range(vma, range.start, range.end); else flush_hugetlb_tlb_range(vma, start, end); /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are * downgrading page table protection not changing it to point to a new * page. * * See Documentation/mm/mmu_notifier.rst */ i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); mmu_notifier_invalidate_range_end(&range); return pages > 0 ? (pages << h->order) : pages; } /* Return true if reservation was successful, false otherwise. */ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) { long chg = -1, add = -1; struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map; struct hugetlb_cgroup *h_cg = NULL; long gbl_reserve, regions_needed = 0; /* This should never happen */ if (from > to) { VM_WARN(1, "%s called with a negative range\n", __func__); return false; } /* * vma specific semaphore used for pmd sharing and fault/truncation * synchronization */ hugetlb_vma_lock_alloc(vma); /* * Only apply hugepage reservation if asked. At fault time, an * attempt will be made for VM_NORESERVE to allocate a page * without using reserves */ if (vm_flags & VM_NORESERVE) return true; /* * Shared mappings base their reservation on the number of pages that * are already allocated on behalf of the file. Private mappings need * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !vma is a shm mapping */ if (!vma || vma->vm_flags & VM_MAYSHARE) { /* * resv_map can not be NULL as hugetlb_reserve_pages is only * called for inodes for which resv_maps were created (see * hugetlbfs_get_inode). */ resv_map = inode_resv_map(inode); chg = region_chg(resv_map, from, to, &regions_needed); } else { /* Private mapping. */ resv_map = resv_map_alloc(); if (!resv_map) goto out_err; chg = to - from; set_vma_resv_map(vma, resv_map); set_vma_resv_flags(vma, HPAGE_RESV_OWNER); } if (chg < 0) goto out_err; if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), chg * pages_per_huge_page(h), &h_cg) < 0) goto out_err; if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { /* For private mappings, the hugetlb_cgroup uncharge info hangs * of the resv_map. */ resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); } /* * There must be enough pages in the subpool for the mapping. If * the subpool has a minimum size, there may be some global * reservations already in place (gbl_reserve). */ gbl_reserve = hugepage_subpool_get_pages(spool, chg); if (gbl_reserve < 0) goto out_uncharge_cgroup; /* * Check enough hugepages are available for the reservation. * Hand the pages back to the subpool if there are not */ if (hugetlb_acct_memory(h, gbl_reserve) < 0) goto out_put_pages; /* * Account for the reservations made. Shared mappings record regions * that have reservations as they are shared by multiple VMAs. * When the last VMA disappears, the region map says how much * the reservation was and the page cache tells how much of * the reservation was consumed. Private mappings are per-VMA and * only the consumed reservations are tracked. When the VMA * disappears, the original reservation is the VMA size and the * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ if (!vma || vma->vm_flags & VM_MAYSHARE) { add = region_add(resv_map, from, to, regions_needed, h, h_cg); if (unlikely(add < 0)) { hugetlb_acct_memory(h, -gbl_reserve); goto out_put_pages; } else if (unlikely(chg > add)) { /* * pages in this range were added to the reserve * map between region_chg and region_add. This * indicates a race with alloc_hugetlb_folio. Adjust * the subpool and reserve counts modified above * based on the difference. */ long rsv_adjust; /* * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the * reference to h_cg->css. See comment below for detail. */ hugetlb_cgroup_uncharge_cgroup_rsvd( hstate_index(h), (chg - add) * pages_per_huge_page(h), h_cg); rsv_adjust = hugepage_subpool_put_pages(spool, chg - add); hugetlb_acct_memory(h, -rsv_adjust); } else if (h_cg) { /* * The file_regions will hold their own reference to * h_cg->css. So we should release the reference held * via hugetlb_cgroup_charge_cgroup_rsvd() when we are * done. */ hugetlb_cgroup_put_rsvd_cgroup(h_cg); } } return true; out_put_pages: /* put back original number of pages, chg */ (void)hugepage_subpool_put_pages(spool, chg); out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), chg * pages_per_huge_page(h), h_cg); out_err: hugetlb_vma_lock_free(vma); if (!vma || vma->vm_flags & VM_MAYSHARE) /* Only call region_abort if the region_chg succeeded but the * region_add failed or didn't run. */ if (chg >= 0 && add < 0) region_abort(resv_map, from, to, regions_needed); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return false; } long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); long chg = 0; struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; /* * Since this routine can be called in the evict inode path for all * hugetlbfs inodes, resv_map could be NULL. */ if (resv_map) { chg = region_del(resv_map, start, end); /* * region_del() can fail in the rare case where a region * must be split and another region descriptor can not be * allocated. If end == LONG_MAX, it will not fail. */ if (chg < 0) return chg; } spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); /* * If the subpool has a minimum size, the number of global * reservations to be released may be adjusted. * * Note that !resv_map implies freed == 0. So (chg - freed) * won't go negative. */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); return 0; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) { unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + svma->vm_start; unsigned long sbase = saddr & PUD_MASK; unsigned long s_end = sbase + PUD_SIZE; /* Allow segments to share if only one is marked locked */ unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; /* * match the virtual addresses, permission and the alignment of the * page table page. * * Also, vma_lock (vm_private_data) is required for sharing. */ if (pmd_index(addr) != pmd_index(saddr) || vm_flags != svm_flags || !range_in_vma(svma, sbase, s_end) || !svma->vm_private_data) return 0; return saddr; } bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { unsigned long start = addr & PUD_MASK; unsigned long end = start + PUD_SIZE; #ifdef CONFIG_USERFAULTFD if (uffd_disable_huge_pmd_share(vma)) return false; #endif /* * check on proper vm_flags and page table alignment */ if (!(vma->vm_flags & VM_MAYSHARE)) return false; if (!vma->vm_private_data) /* vma lock required for sharing */ return false; if (!range_in_vma(vma, start, end)) return false; return true; } /* * Determine if start,end range within vma could be mapped by shared pmd. * If yes, adjust start and end to cover range associated with possible * shared pmd mappings. */ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); /* * vma needs to span at least one aligned PUD size, and the range * must be at least partially within in. */ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || (*end <= v_start) || (*start >= v_end)) return; /* Extend the range to be PUD aligned for a worst case scenario */ if (*start > v_start) *start = ALIGN_DOWN(*start, PUD_SIZE); if (*end < v_end) *end = ALIGN(*end, PUD_SIZE); } /* * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the * !shared pmd case because we can allocate the pmd later as well, it makes the * code much cleaner. pmd allocation is essential for the shared case because * pud has to be populated inside the same i_mmap_rwsem section - otherwise * racing tasks could either miss the sharing (see huge_pte_offset) or select a * bad pmd for sharing. */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; pte_t *pte; i_mmap_lock_read(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; saddr = page_table_shareable(svma, vma, addr, idx); if (saddr) { spte = hugetlb_walk(svma, saddr, vma_mmu_pagesize(svma)); if (spte) { get_page(virt_to_page(spte)); break; } } } if (!spte) goto out; spin_lock(&mm->page_table_lock); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); mm_inc_nr_pmds(mm); } else { put_page(virt_to_page(spte)); } spin_unlock(&mm->page_table_lock); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_read(mapping); return pte; } /* * unmap huge page backed by shared pte. * * Hugetlb pte page is ref counted at the time of mapping. If pte is shared * indicated by page_count > 1, unmap is achieved by clearing pud and * decrementing the ref count. If count == 1, the pte page is not shared. * * Called with page table lock held. * * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); i_mmap_assert_write_locked(vma->vm_file->f_mapping); hugetlb_vma_assert_locked(vma); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) return 0; pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); return 1; } #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) { return NULL; } int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { } bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) { return false; } #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share(vma, addr) && pud_none(*pud)) pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } } if (pte) { pte_t pteval = ptep_get_lockless(pte); BUG_ON(pte_present(pteval) && !pte_huge(pteval)); } return pte; } /* * huge_pte_offset() - Walk the page table to resolve the hugepage * entry at address @addr * * Return: Pointer to page table entry (PUD or PMD) for * address @addr, or NULL if a !p*d_present() entry is encountered and the * size @sz doesn't match the hugepage size at this level of the page * table. */ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) return NULL; pud = pud_offset(p4d, addr); if (sz == PUD_SIZE) /* must be pud huge, non-present or none */ return (pte_t *)pud; if (!pud_present(*pud)) return NULL; /* must have a valid entry and size to go further */ pmd = pmd_offset(pud, addr); /* must be pmd huge, non-present or none */ return (pte_t *)pmd; } /* * Return a mask that can be used to update an address to the last huge * page in a page table page mapping size. Used to skip non-present * page table entries when linearly scanning address ranges. Architectures * with unique huge page to page table relationships can define their own * version of this routine. */ unsigned long hugetlb_mask_last_page(struct hstate *h) { unsigned long hp_size = huge_page_size(h); if (hp_size == PUD_SIZE) return P4D_SIZE - PUD_SIZE; else if (hp_size == PMD_SIZE) return PUD_SIZE - PMD_SIZE; else return 0UL; } #else /* See description above. Architectures can provide their own version. */ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) { #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE if (huge_page_size(h) == PMD_SIZE) return PUD_SIZE - PMD_SIZE; #endif return 0UL; } #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /* * These functions are overwritable if your architecture needs its own * behavior. */ bool isolate_hugetlb(struct folio *folio, struct list_head *list) { bool ret = true; spin_lock_irq(&hugetlb_lock); if (!folio_test_hugetlb(folio) || !folio_test_hugetlb_migratable(folio) || !folio_try_get(folio)) { ret = false; goto unlock; } folio_clear_hugetlb_migratable(folio); list_move_tail(&folio->lru, list); unlock: spin_unlock_irq(&hugetlb_lock); return ret; } int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) { int ret = 0; *hugetlb = false; spin_lock_irq(&hugetlb_lock); if (folio_test_hugetlb(folio)) { *hugetlb = true; if (folio_test_hugetlb_freed(folio)) ret = 0; else if (folio_test_hugetlb_migratable(folio) || unpoison) ret = folio_try_get(folio); else ret = -EBUSY; } spin_unlock_irq(&hugetlb_lock); return ret; } int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) { int ret; spin_lock_irq(&hugetlb_lock); ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); spin_unlock_irq(&hugetlb_lock); return ret; } void folio_putback_active_hugetlb(struct folio *folio) { spin_lock_irq(&hugetlb_lock); folio_set_hugetlb_migratable(folio); list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); spin_unlock_irq(&hugetlb_lock); folio_put(folio); } void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) { struct hstate *h = folio_hstate(old_folio); hugetlb_cgroup_migrate(old_folio, new_folio); set_page_owner_migrate_reason(&new_folio->page, reason); /* * transfer temporary state of the new hugetlb folio. This is * reverse to other transitions because the newpage is going to * be final while the old one will be freed so it takes over * the temporary status. * * Also note that we have to transfer the per-node surplus state * here as well otherwise the global surplus count will not match * the per-node's. */ if (folio_test_hugetlb_temporary(new_folio)) { int old_nid = folio_nid(old_folio); int new_nid = folio_nid(new_folio); folio_set_hugetlb_temporary(old_folio); folio_clear_hugetlb_temporary(new_folio); /* * There is no need to transfer the per-node surplus state * when we do not cross the node. */ if (new_nid == old_nid) return; spin_lock_irq(&hugetlb_lock); if (h->surplus_huge_pages_node[old_nid]) { h->surplus_huge_pages_node[old_nid]--; h->surplus_huge_pages_node[new_nid]++; } spin_unlock_irq(&hugetlb_lock); } } static void hugetlb_unshare_pmds(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mm_struct *mm = vma->vm_mm; struct mmu_notifier_range range; unsigned long address; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & VM_MAYSHARE)) return; if (start >= end) return; flush_cache_range(vma, start, end); /* * No need to call adjust_range_if_pmd_sharing_possible(), because * we have already done the PUD_SIZE alignment. */ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, start, end); mmu_notifier_invalidate_range_start(&range); hugetlb_vma_lock_write(vma); i_mmap_lock_write(vma->vm_file->f_mapping); for (address = start; address < end; address += PUD_SIZE) { ptep = hugetlb_walk(vma, address, sz); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); huge_pmd_unshare(mm, vma, address, ptep); spin_unlock(ptl); } flush_hugetlb_tlb_range(vma, start, end); i_mmap_unlock_write(vma->vm_file->f_mapping); hugetlb_vma_unlock_write(vma); /* * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see * Documentation/mm/mmu_notifier.rst. */ mmu_notifier_invalidate_range_end(&range); } /* * This function will unconditionally remove all the shared pmd pgtable entries * within the specific vma for a hugetlbfs memory range. */ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), ALIGN_DOWN(vma->vm_end, PUD_SIZE)); } #ifdef CONFIG_CMA static bool cma_reserve_called __initdata; static int __init cmdline_parse_hugetlb_cma(char *p) { int nid, count = 0; unsigned long tmp; char *s = p; while (*s) { if (sscanf(s, "%lu%n", &tmp, &count) != 1) break; if (s[count] == ':') { if (tmp >= MAX_NUMNODES) break; nid = array_index_nospec(tmp, MAX_NUMNODES); s += count + 1; tmp = memparse(s, &s); hugetlb_cma_size_in_node[nid] = tmp; hugetlb_cma_size += tmp; /* * Skip the separator if have one, otherwise * break the parsing. */ if (*s == ',') s++; else break; } else { hugetlb_cma_size = memparse(p, &p); break; } } return 0; } early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); void __init hugetlb_cma_reserve(int order) { unsigned long size, reserved, per_node; bool node_specific_cma_alloc = false; int nid; cma_reserve_called = true; if (!hugetlb_cma_size) return; for (nid = 0; nid < MAX_NUMNODES; nid++) { if (hugetlb_cma_size_in_node[nid] == 0) continue; if (!node_online(nid)) { pr_warn("hugetlb_cma: invalid node %d specified\n", nid); hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; hugetlb_cma_size_in_node[nid] = 0; continue; } if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", nid, (PAGE_SIZE << order) / SZ_1M); hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; hugetlb_cma_size_in_node[nid] = 0; } else { node_specific_cma_alloc = true; } } /* Validate the CMA size again in case some invalid nodes specified. */ if (!hugetlb_cma_size) return; if (hugetlb_cma_size < (PAGE_SIZE << order)) { pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", (PAGE_SIZE << order) / SZ_1M); hugetlb_cma_size = 0; return; } if (!node_specific_cma_alloc) { /* * If 3 GB area is requested on a machine with 4 numa nodes, * let's allocate 1 GB on first three nodes and ignore the last one. */ per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", hugetlb_cma_size / SZ_1M, per_node / SZ_1M); } reserved = 0; for_each_online_node(nid) { int res; char name[CMA_MAX_NAME]; if (node_specific_cma_alloc) { if (hugetlb_cma_size_in_node[nid] == 0) continue; size = hugetlb_cma_size_in_node[nid]; } else { size = min(per_node, hugetlb_cma_size - reserved); } size = round_up(size, PAGE_SIZE << order); snprintf(name, sizeof(name), "hugetlb%d", nid); /* * Note that 'order per bit' is based on smallest size that * may be returned to CMA allocator in the case of * huge page demotion. */ res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << HUGETLB_PAGE_ORDER, 0, false, name, &hugetlb_cma[nid], nid); if (res) { pr_warn("hugetlb_cma: reservation failed: err %d, node %d", res, nid); continue; } reserved += size; pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", size / SZ_1M, nid); if (reserved >= hugetlb_cma_size) break; } if (!reserved) /* * hugetlb_cma_size is used to determine if allocations from * cma are possible. Set to zero if no cma regions are set up. */ hugetlb_cma_size = 0; } static void __init hugetlb_cma_check(void) { if (!hugetlb_cma_size || cma_reserve_called) return; pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); } #endif /* CONFIG_CMA */
linux-master
mm/hugetlb.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/swapfile.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie */ #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/shmem_fs.h> #include <linux/blk-cgroup.h> #include <linux/random.h> #include <linux/writeback.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/backing-dev.h> #include <linux/mutex.h> #include <linux/capability.h> #include <linux/syscalls.h> #include <linux/memcontrol.h> #include <linux/poll.h> #include <linux/oom.h> #include <linux/swapfile.h> #include <linux/export.h> #include <linux/swap_slots.h> #include <linux/sort.h> #include <linux/completion.h> #include <linux/suspend.h> #include <linux/zswap.h> #include <asm/tlbflush.h> #include <linux/swapops.h> #include <linux/swap_cgroup.h> #include "internal.h" #include "swap.h" static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; atomic_long_t nr_swap_pages; /* * Some modules use swappable objects and may try to swap them out under * memory pressure (via the shrinker). Before doing so, they may wish to * check to see if any swap space is available. */ EXPORT_SYMBOL_GPL(nr_swap_pages); /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ long total_swap_pages; static int least_priority = -1; unsigned long swapfile_maximum_size; #ifdef CONFIG_MIGRATION bool swap_migration_ad_supported; #endif /* CONFIG_MIGRATION */ static const char Bad_file[] = "Bad swap file entry "; static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; /* * all active swap_info_structs * protected with swap_lock, and ordered by priority. */ static PLIST_HEAD(swap_active_head); /* * all available (active, not full) swap_info_structs * protected with swap_avail_lock, ordered by priority. * This is used by folio_alloc_swap() instead of swap_active_head * because swap_active_head includes all swap_info_structs, * but folio_alloc_swap() doesn't need to look at full ones. * This uses its own lock instead of swap_lock because when a * swap_info_struct changes between not-full/full, it needs to * add/remove itself to/from this list, but the swap_info_struct->lock * is held and the locking order requires swap_lock to be taken * before any swap_info_struct->lock. */ static struct plist_head *swap_avail_heads; static DEFINE_SPINLOCK(swap_avail_lock); static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); /* Activity counter to indicate that a swapon or swapoff has occurred */ static atomic_t proc_poll_event = ATOMIC_INIT(0); atomic_t nr_rotate_swap = ATOMIC_INIT(0); static struct swap_info_struct *swap_type_to_swap_info(int type) { if (type >= MAX_SWAPFILES) return NULL; return READ_ONCE(swap_info[type]); /* rcu_dereference() */ } static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ } /* Reclaim the swap entry anyway if possible */ #define TTRS_ANYWAY 0x1 /* * Reclaim the swap entry if there are no more mappings of the * corresponding page */ #define TTRS_UNMAPPED 0x2 /* Reclaim the swap entry if swap is getting full*/ #define TTRS_FULL 0x4 /* returns 1 if swap entry is freed */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { swp_entry_t entry = swp_entry(si->type, offset); struct folio *folio; int ret = 0; folio = filemap_get_folio(swap_address_space(entry), offset); if (IS_ERR(folio)) return 0; /* * When this function is called from scan_swap_map_slots() and it's * called by vmscan.c at reclaiming folios. So we hold a folio lock * here. We have to use trylock for avoiding deadlock. This is a special * case and you should use folio_free_swap() with explicit folio_lock() * in usual operations. */ if (folio_trylock(folio)) { if ((flags & TTRS_ANYWAY) || ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))) ret = folio_free_swap(folio); folio_unlock(folio); } folio_put(folio); return ret; } static inline struct swap_extent *first_se(struct swap_info_struct *sis) { struct rb_node *rb = rb_first(&sis->swap_extent_root); return rb_entry(rb, struct swap_extent, rb_node); } static inline struct swap_extent *next_se(struct swap_extent *se) { struct rb_node *rb = rb_next(&se->rb_node); return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; } /* * swapon tell device that all the old swap contents can be discarded, * to allow the swap device to optimize its wear-levelling. */ static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; sector_t start_block; sector_t nr_blocks; int err = 0; /* Do not discard the swap header page! */ se = first_se(si); start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) return err; cond_resched(); } for (se = next_se(se); se; se = next_se(se)) { start_block = se->start_block << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) break; cond_resched(); } return err; /* That will often be -EOPNOTSUPP */ } static struct swap_extent * offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) { struct swap_extent *se; struct rb_node *rb; rb = sis->swap_extent_root.rb_node; while (rb) { se = rb_entry(rb, struct swap_extent, rb_node); if (offset < se->start_page) rb = rb->rb_left; else if (offset >= se->start_page + se->nr_pages) rb = rb->rb_right; else return se; } /* It *must* be present */ BUG(); } sector_t swap_page_sector(struct page *page) { struct swap_info_struct *sis = page_swap_info(page); struct swap_extent *se; sector_t sector; pgoff_t offset; offset = __page_file_index(page); se = offset_to_swap_extent(sis, offset); sector = se->start_block + (offset - se->start_page); return sector << (PAGE_SHIFT - 9); } /* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. */ static void discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) { struct swap_extent *se = offset_to_swap_extent(si, start_page); while (nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; start_page += nr_blocks; nr_pages -= nr_blocks; start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_NOIO)) break; se = next_se(se); } } #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR #define swap_entry_size(size) (size) #else #define SWAPFILE_CLUSTER 256 /* * Define swap_entry_size() as constant to let compiler to optimize * out some code if !CONFIG_THP_SWAP */ #define swap_entry_size(size) 1 #endif #define LATENCY_LIMIT 256 static inline void cluster_set_flag(struct swap_cluster_info *info, unsigned int flag) { info->flags = flag; } static inline unsigned int cluster_count(struct swap_cluster_info *info) { return info->data; } static inline void cluster_set_count(struct swap_cluster_info *info, unsigned int c) { info->data = c; } static inline void cluster_set_count_flag(struct swap_cluster_info *info, unsigned int c, unsigned int f) { info->flags = f; info->data = c; } static inline unsigned int cluster_next(struct swap_cluster_info *info) { return info->data; } static inline void cluster_set_next(struct swap_cluster_info *info, unsigned int n) { info->data = n; } static inline void cluster_set_next_flag(struct swap_cluster_info *info, unsigned int n, unsigned int f) { info->flags = f; info->data = n; } static inline bool cluster_is_free(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_FREE; } static inline bool cluster_is_null(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_NEXT_NULL; } static inline void cluster_set_null(struct swap_cluster_info *info) { info->flags = CLUSTER_FLAG_NEXT_NULL; info->data = 0; } static inline bool cluster_is_huge(struct swap_cluster_info *info) { if (IS_ENABLED(CONFIG_THP_SWAP)) return info->flags & CLUSTER_FLAG_HUGE; return false; } static inline void cluster_clear_huge(struct swap_cluster_info *info) { info->flags &= ~CLUSTER_FLAG_HUGE; } static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { struct swap_cluster_info *ci; ci = si->cluster_info; if (ci) { ci += offset / SWAPFILE_CLUSTER; spin_lock(&ci->lock); } return ci; } static inline void unlock_cluster(struct swap_cluster_info *ci) { if (ci) spin_unlock(&ci->lock); } /* * Determine the locking method in use for this device. Return * swap_cluster_info if SSD-style cluster-based locking is in place. */ static inline struct swap_cluster_info *lock_cluster_or_swap_info( struct swap_info_struct *si, unsigned long offset) { struct swap_cluster_info *ci; /* Try to use fine-grained SSD-style locking if available: */ ci = lock_cluster(si, offset); /* Otherwise, fall back to traditional, coarse locking: */ if (!ci) spin_lock(&si->lock); return ci; } static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, struct swap_cluster_info *ci) { if (ci) unlock_cluster(ci); else spin_unlock(&si->lock); } static inline bool cluster_list_empty(struct swap_cluster_list *list) { return cluster_is_null(&list->head); } static inline unsigned int cluster_list_first(struct swap_cluster_list *list) { return cluster_next(&list->head); } static void cluster_list_init(struct swap_cluster_list *list) { cluster_set_null(&list->head); cluster_set_null(&list->tail); } static void cluster_list_add_tail(struct swap_cluster_list *list, struct swap_cluster_info *ci, unsigned int idx) { if (cluster_list_empty(list)) { cluster_set_next_flag(&list->head, idx, 0); cluster_set_next_flag(&list->tail, idx, 0); } else { struct swap_cluster_info *ci_tail; unsigned int tail = cluster_next(&list->tail); /* * Nested cluster lock, but both cluster locks are * only acquired when we held swap_info_struct->lock */ ci_tail = ci + tail; spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); cluster_set_next(ci_tail, idx); spin_unlock(&ci_tail->lock); cluster_set_next_flag(&list->tail, idx, 0); } } static unsigned int cluster_list_del_first(struct swap_cluster_list *list, struct swap_cluster_info *ci) { unsigned int idx; idx = cluster_next(&list->head); if (cluster_next(&list->tail) == idx) { cluster_set_null(&list->head); cluster_set_null(&list->tail); } else cluster_set_next_flag(&list->head, cluster_next(&ci[idx]), 0); return idx; } /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, unsigned int idx) { /* * If scan_swap_map_slots() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't * taken by scan_swap_map_slots(), mark the swap entries bad (occupied). * It will be cleared after discard */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); schedule_work(&si->discard_work); } static void __free_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info; cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); cluster_list_add_tail(&si->free_clusters, ci, idx); } /* * Doing discard actually. After a cluster discard is finished, the cluster * will be added to free cluster list. caller should hold si->lock. */ static void swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *info, *ci; unsigned int idx; info = si->cluster_info; while (!cluster_list_empty(&si->discard_clusters)) { idx = cluster_list_del_first(&si->discard_clusters, info); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&si->lock); ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); __free_cluster(si, idx); memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); unlock_cluster(ci); } } static void swap_discard_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, discard_work); spin_lock(&si->lock); swap_do_scheduled_discard(si); spin_unlock(&si->lock); } static void swap_users_ref_free(struct percpu_ref *ref) { struct swap_info_struct *si; si = container_of(ref, struct swap_info_struct, users); complete(&si->comp); } static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info; VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); cluster_list_del_first(&si->free_clusters, ci); cluster_set_count_flag(ci + idx, 0, 0); } static void free_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info + idx; VM_BUG_ON(cluster_count(ci) != 0); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed * after discard. */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { swap_cluster_schedule_discard(si, idx); return; } __free_cluster(si, idx); } /* * The cluster corresponding to page_nr will be used. The cluster will be * removed from free cluster list and its usage counter will be increased. */ static void inc_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; if (cluster_is_free(&cluster_info[idx])) alloc_cluster(p, idx); VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) + 1); } /* * The cluster corresponding to page_nr decreases one usage. If the usage * counter becomes 0, which means no page in the cluster is in using, we can * optionally discard the cluster and add it to free cluster list. */ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) - 1); if (cluster_count(&cluster_info[idx]) == 0) free_cluster(p, idx); } /* * It's possible scan_swap_map_slots() uses a free cluster in the middle of free * cluster list. Avoiding such abuse to avoid list corruption. */ static bool scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, unsigned long offset) { struct percpu_cluster *percpu_cluster; bool conflict; offset /= SWAPFILE_CLUSTER; conflict = !cluster_list_empty(&si->free_clusters) && offset != cluster_list_first(&si->free_clusters) && cluster_is_free(&si->cluster_info[offset]); if (!conflict) return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); cluster_set_null(&percpu_cluster->index); return true; } /* * Try to get a swap entry from current cpu's swap entry pool (a cluster). This * might involve allocating a new cluster for current CPU too. */ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, unsigned long *offset, unsigned long *scan_base) { struct percpu_cluster *cluster; struct swap_cluster_info *ci; unsigned long tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); if (cluster_is_null(&cluster->index)) { if (!cluster_list_empty(&si->free_clusters)) { cluster->index = si->free_clusters.head; cluster->next = cluster_next(&cluster->index) * SWAPFILE_CLUSTER; } else if (!cluster_list_empty(&si->discard_clusters)) { /* * we don't have free cluster but have some clusters in * discarding, do discard now and reclaim them, then * reread cluster_next_cpu since we dropped si->lock */ swap_do_scheduled_discard(si); *scan_base = this_cpu_read(*si->cluster_next_cpu); *offset = *scan_base; goto new_cluster; } else return false; } /* * Other CPUs can use our cluster if they can't find a free cluster, * check if there is still free entry in the cluster */ tmp = cluster->next; max = min_t(unsigned long, si->max, (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); if (tmp < max) { ci = lock_cluster(si, tmp); while (tmp < max) { if (!si->swap_map[tmp]) break; tmp++; } unlock_cluster(ci); } if (tmp >= max) { cluster_set_null(&cluster->index); goto new_cluster; } cluster->next = tmp + 1; *offset = tmp; *scan_base = tmp; return true; } static void __del_from_avail_list(struct swap_info_struct *p) { int nid; assert_spin_locked(&p->lock); for_each_node(nid) plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); } static void del_from_avail_list(struct swap_info_struct *p) { spin_lock(&swap_avail_lock); __del_from_avail_list(p); spin_unlock(&swap_avail_lock); } static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned int end = offset + nr_entries - 1; if (offset == si->lowest_bit) si->lowest_bit += nr_entries; if (end == si->highest_bit) WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); if (si->inuse_pages == si->pages) { si->lowest_bit = si->max; si->highest_bit = 0; del_from_avail_list(si); } } static void add_to_avail_list(struct swap_info_struct *p) { int nid; spin_lock(&swap_avail_lock); for_each_node(nid) plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); spin_unlock(&swap_avail_lock); } static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned long begin = offset; unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); if (offset < si->lowest_bit) si->lowest_bit = offset; if (end > si->highest_bit) { bool was_full = !si->highest_bit; WRITE_ONCE(si->highest_bit, end); if (was_full && (si->flags & SWP_WRITEOK)) add_to_avail_list(si); } atomic_long_add(nr_entries, &nr_swap_pages); WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; else swap_slot_free_notify = NULL; while (offset <= end) { arch_swap_invalidate_page(si->type, offset); zswap_invalidate(si->type, offset); if (swap_slot_free_notify) swap_slot_free_notify(si->bdev, offset); offset++; } clear_shadow_from_swap_cache(si->type, begin, end); } static void set_cluster_next(struct swap_info_struct *si, unsigned long next) { unsigned long prev; if (!(si->flags & SWP_SOLIDSTATE)) { si->cluster_next = next; return; } prev = this_cpu_read(*si->cluster_next_cpu); /* * Cross the swap address space size aligned trunk, choose * another trunk randomly to avoid lock contention on swap * address space if possible. */ if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) != (next >> SWAP_ADDRESS_SPACE_SHIFT)) { /* No free swap slots available */ if (si->highest_bit <= si->lowest_bit) return; next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit); next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); next = max_t(unsigned int, next, si->lowest_bit); } this_cpu_write(*si->cluster_next_cpu, next); } static bool swap_offset_available_and_locked(struct swap_info_struct *si, unsigned long offset) { if (data_race(!si->swap_map[offset])) { spin_lock(&si->lock); return true; } if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { spin_lock(&si->lock); return true; } return false; } static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[]) { struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; int n_ret = 0; bool scanned_many = false; /* * We try to cluster swap pages by allocating them sequentially * in swap. Once we've allocated SWAPFILE_CLUSTER pages this * way, however, we resort to first-free allocation, starting * a new cluster. This prevents us from scattering swap pages * all over the entire swap partition, so that we reduce * overall disk seek times between swap pages. -- sct * But we do now try to find an empty cluster. -Andrea * And we let swap pages go all over an SSD partition. Hugh */ si->flags += SWP_SCANNING; /* * Use percpu scan base for SSD to reduce lock contention on * cluster and swap cache. For HDD, sequential access is more * important. */ if (si->flags & SWP_SOLIDSTATE) scan_base = this_cpu_read(*si->cluster_next_cpu); else scan_base = si->cluster_next; offset = scan_base; /* SSD algorithm */ if (si->cluster_info) { if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) goto scan; } else if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; } spin_unlock(&si->lock); /* * If seek is expensive, start searching for new cluster from * start of partition, to minimize the span of allocated swap. * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info * case, just handled by scan_swap_map_try_ssd_cluster() above. */ scan_base = offset = si->lowest_bit; last_in_cluster = offset + SWAPFILE_CLUSTER - 1; /* Locate the first empty (unaligned) cluster */ for (; last_in_cluster <= si->highest_bit; offset++) { if (si->swap_map[offset]) last_in_cluster = offset + SWAPFILE_CLUSTER; else if (offset == last_in_cluster) { spin_lock(&si->lock); offset -= SWAPFILE_CLUSTER - 1; si->cluster_next = offset; si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; } } offset = scan_base; spin_lock(&si->lock); si->cluster_nr = SWAPFILE_CLUSTER - 1; } checks: if (si->cluster_info) { while (scan_swap_map_ssd_cluster_conflict(si, offset)) { /* take a break if we already got some slots */ if (n_ret) goto done; if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) goto scan; } } if (!(si->flags & SWP_WRITEOK)) goto no_page; if (!si->highest_bit) goto no_page; if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; ci = lock_cluster(si, offset); /* reuse swap entry of cache-only swap if not busy. */ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; unlock_cluster(ci); spin_unlock(&si->lock); swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ if (swap_was_freed) goto checks; goto scan; /* check next one */ } if (si->swap_map[offset]) { unlock_cluster(ci); if (!n_ret) goto scan; else goto done; } WRITE_ONCE(si->swap_map[offset], usage); inc_cluster_info_page(si, si->cluster_info, offset); unlock_cluster(ci); swap_range_alloc(si, offset, 1); slots[n_ret++] = swp_entry(si->type, offset); /* got enough slots or reach max slots? */ if ((n_ret == nr) || (offset >= si->highest_bit)) goto done; /* search for next available slot */ /* time to take a break? */ if (unlikely(--latency_ration < 0)) { if (n_ret) goto done; spin_unlock(&si->lock); cond_resched(); spin_lock(&si->lock); latency_ration = LATENCY_LIMIT; } /* try to get more slots in cluster */ if (si->cluster_info) { if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) goto checks; } else if (si->cluster_nr && !si->swap_map[++offset]) { /* non-ssd case, still more slots in cluster? */ --si->cluster_nr; goto checks; } /* * Even if there's no free clusters available (fragmented), * try to scan a little more quickly with lock held unless we * have scanned too many slots already. */ if (!scanned_many) { unsigned long scan_limit; if (offset < scan_base) scan_limit = scan_base; else scan_limit = si->highest_bit; for (; offset <= scan_limit && --latency_ration > 0; offset++) { if (!si->swap_map[offset]) goto checks; } } done: set_cluster_next(si, offset + 1); si->flags -= SWP_SCANNING; return n_ret; scan: spin_unlock(&si->lock); while (++offset <= READ_ONCE(si->highest_bit)) { if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; scanned_many = true; } if (swap_offset_available_and_locked(si, offset)) goto checks; } offset = si->lowest_bit; while (offset < scan_base) { if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; scanned_many = true; } if (swap_offset_available_and_locked(si, offset)) goto checks; offset++; } spin_lock(&si->lock); no_page: si->flags -= SWP_SCANNING; return n_ret; } static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) { unsigned long idx; struct swap_cluster_info *ci; unsigned long offset; /* * Should not even be attempting cluster allocations when huge * page swap is disabled. Warn and fail the allocation. */ if (!IS_ENABLED(CONFIG_THP_SWAP)) { VM_WARN_ON_ONCE(1); return 0; } if (cluster_list_empty(&si->free_clusters)) return 0; idx = cluster_list_first(&si->free_clusters); offset = idx * SWAPFILE_CLUSTER; ci = lock_cluster(si, offset); alloc_cluster(si, idx); cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); unlock_cluster(ci); swap_range_alloc(si, offset, SWAPFILE_CLUSTER); *slot = swp_entry(si->type, offset); return 1; } static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) { unsigned long offset = idx * SWAPFILE_CLUSTER; struct swap_cluster_info *ci; ci = lock_cluster(si, offset); memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); cluster_set_count_flag(ci, 0, 0); free_cluster(si, idx); unlock_cluster(ci); swap_range_free(si, offset, SWAPFILE_CLUSTER); } int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) { unsigned long size = swap_entry_size(entry_size); struct swap_info_struct *si, *next; long avail_pgs; int n_ret = 0; int node; /* Only single cluster request supported */ WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); spin_lock(&swap_avail_lock); avail_pgs = atomic_long_read(&nr_swap_pages) / size; if (avail_pgs <= 0) { spin_unlock(&swap_avail_lock); goto noswap; } n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); atomic_long_sub(n_goal * size, &nr_swap_pages); start_over: node = numa_node_id(); plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); spin_lock(&si->lock); if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { spin_lock(&swap_avail_lock); if (plist_node_empty(&si->avail_lists[node])) { spin_unlock(&si->lock); goto nextsi; } WARN(!si->highest_bit, "swap_info %d in list but !highest_bit\n", si->type); WARN(!(si->flags & SWP_WRITEOK), "swap_info %d in list but !SWP_WRITEOK\n", si->type); __del_from_avail_list(si); spin_unlock(&si->lock); goto nextsi; } if (size == SWAPFILE_CLUSTER) { if (si->flags & SWP_BLKDEV) n_ret = swap_alloc_cluster(si, swp_entries); } else n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, n_goal, swp_entries); spin_unlock(&si->lock); if (n_ret || size == SWAPFILE_CLUSTER) goto check_out; cond_resched(); spin_lock(&swap_avail_lock); nextsi: /* * if we got here, it's likely that si was almost full before, * and since scan_swap_map_slots() can drop the si->lock, * multiple callers probably all tried to get a page from the * same si and it filled up before we could get one; or, the si * filled up between us dropping swap_avail_lock and taking * si->lock. Since we dropped the swap_avail_lock, the * swap_avail_head list may have been modified; so if next is * still in the swap_avail_head list then try it, otherwise * start over if we have not gotten any slots. */ if (plist_node_empty(&next->avail_lists[node])) goto start_over; } spin_unlock(&swap_avail_lock); check_out: if (n_ret < n_goal) atomic_long_add((long)(n_goal - n_ret) * size, &nr_swap_pages); noswap: return n_ret; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) { struct swap_info_struct *p; unsigned long offset; if (!entry.val) goto out; p = swp_swap_info(entry); if (!p) goto bad_nofile; if (data_race(!(p->flags & SWP_USED))) goto bad_device; offset = swp_offset(entry); if (offset >= p->max) goto bad_offset; if (data_race(!p->swap_map[swp_offset(entry)])) goto bad_free; return p; bad_free: pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val); goto out; bad_offset: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); goto out; bad_device: pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val); goto out; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; } static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, struct swap_info_struct *q) { struct swap_info_struct *p; p = _swap_info_get(entry); if (p != q) { if (q != NULL) spin_unlock(&q->lock); if (p != NULL) spin_lock(&p->lock); } return p; } static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, unsigned long offset, unsigned char usage) { unsigned char count; unsigned char has_cache; count = p->swap_map[offset]; has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); has_cache = 0; } else if (count == SWAP_MAP_SHMEM) { /* * Or we could insist on shmem.c using a special * swap_shmem_free() and free_shmem_swap_and_cache()... */ count = 0; } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { if (count == COUNT_CONTINUED) { if (swap_count_continued(p, offset, count)) count = SWAP_MAP_MAX | COUNT_CONTINUED; else count = SWAP_MAP_MAX; } else count--; } usage = count | has_cache; if (usage) WRITE_ONCE(p->swap_map[offset], usage); else WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE); return usage; } /* * When we get a swap entry, if there aren't some other ways to * prevent swapoff, such as the folio in swap cache is locked, page * table lock is held, etc., the swap entry may become invalid because * of swapoff. Then, we need to enclose all swap related functions * with get_swap_device() and put_swap_device(), unless the swap * functions call get/put_swap_device() by themselves. * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until * put_swap_device() is called. Otherwise return NULL. * * Notice that swapoff or swapoff+swapon can still happen before the * percpu_ref_tryget_live() in get_swap_device() or after the * percpu_ref_put() in put_swap_device() if there isn't any other way * to prevent swapoff. The caller must be prepared for that. For * example, the following situation is possible. * * CPU1 CPU2 * do_swap_page() * ... swapoff+swapon * __read_swap_cache_async() * swapcache_prepare() * __swap_duplicate() * // check swap_map * // verify PTE not changed * * In __swap_duplicate(), the swap_map need to be checked before * changing partly because the specified swap entry may be for another * swap device which has been swapoff. And in do_swap_page(), after * the page is read from the swap device, the PTE is verified not * changed with the page table locked to check whether the swap device * has been swapoff or swapoff+swapon. */ struct swap_info_struct *get_swap_device(swp_entry_t entry) { struct swap_info_struct *si; unsigned long offset; if (!entry.val) goto out; si = swp_swap_info(entry); if (!si) goto bad_nofile; if (!percpu_ref_tryget_live(&si->users)) goto out; /* * Guarantee the si->users are checked before accessing other * fields of swap_info_struct. * * Paired with the spin_unlock() after setup_swap_info() in * enable_swap_info(). */ smp_rmb(); offset = swp_offset(entry); if (offset >= si->max) goto put_out; return si; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; put_out: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); percpu_ref_put(&si->users); return NULL; } static unsigned char __swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) { struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); unsigned char usage; ci = lock_cluster_or_swap_info(p, offset); usage = __swap_entry_free_locked(p, offset, 1); unlock_cluster_or_swap_info(p, ci); if (!usage) free_swap_slot(entry); return usage; } static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) { struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); unsigned char count; ci = lock_cluster(p, offset); count = p->swap_map[offset]; VM_BUG_ON(count != SWAP_HAS_CACHE); p->swap_map[offset] = 0; dec_cluster_info_page(p, p->cluster_info, offset); unlock_cluster(ci); mem_cgroup_uncharge_swap(entry, 1); swap_range_free(p, offset, 1); } /* * Caller has made sure that the swap device corresponding to entry * is still around or has not been recycled. */ void swap_free(swp_entry_t entry) { struct swap_info_struct *p; p = _swap_info_get(entry); if (p) __swap_entry_free(p, entry); } /* * Called after dropping swapcache to decrease refcnt to swap entries. */ void put_swap_folio(struct folio *folio, swp_entry_t entry) { unsigned long offset = swp_offset(entry); unsigned long idx = offset / SWAPFILE_CLUSTER; struct swap_cluster_info *ci; struct swap_info_struct *si; unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; int size = swap_entry_size(folio_nr_pages(folio)); si = _swap_info_get(entry); if (!si) return; ci = lock_cluster_or_swap_info(si, offset); if (size == SWAPFILE_CLUSTER) { VM_BUG_ON(!cluster_is_huge(ci)); map = si->swap_map + offset; for (i = 0; i < SWAPFILE_CLUSTER; i++) { val = map[i]; VM_BUG_ON(!(val & SWAP_HAS_CACHE)); if (val == SWAP_HAS_CACHE) free_entries++; } cluster_clear_huge(ci); if (free_entries == SWAPFILE_CLUSTER) { unlock_cluster_or_swap_info(si, ci); spin_lock(&si->lock); mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); swap_free_cluster(si, idx); spin_unlock(&si->lock); return; } } for (i = 0; i < size; i++, entry.val++) { if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { unlock_cluster_or_swap_info(si, ci); free_swap_slot(entry); if (i == size - 1) return; lock_cluster_or_swap_info(si, offset); } } unlock_cluster_or_swap_info(si, ci); } #ifdef CONFIG_THP_SWAP int split_swap_cluster(swp_entry_t entry) { struct swap_info_struct *si; struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); si = _swap_info_get(entry); if (!si) return -EBUSY; ci = lock_cluster(si, offset); cluster_clear_huge(ci); unlock_cluster(ci); return 0; } #endif static int swp_entry_cmp(const void *ent1, const void *ent2) { const swp_entry_t *e1 = ent1, *e2 = ent2; return (int)swp_type(*e1) - (int)swp_type(*e2); } void swapcache_free_entries(swp_entry_t *entries, int n) { struct swap_info_struct *p, *prev; int i; if (n <= 0) return; prev = NULL; p = NULL; /* * Sort swap entries by swap device, so each lock is only taken once. * nr_swapfiles isn't absolutely correct, but the overhead of sort() is * so low that it isn't necessary to optimize further. */ if (nr_swapfiles > 1) sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); for (i = 0; i < n; ++i) { p = swap_info_get_cont(entries[i], prev); if (p) swap_entry_free(p, entries[i]); prev = p; } if (p) spin_unlock(&p->lock); } int __swap_count(swp_entry_t entry) { struct swap_info_struct *si = swp_swap_info(entry); pgoff_t offset = swp_offset(entry); return swap_count(si->swap_map[offset]); } /* * How many references to @entry are currently swapped out? * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; int count; ci = lock_cluster_or_swap_info(si, offset); count = swap_count(si->swap_map[offset]); unlock_cluster_or_swap_info(si, ci); return count; } /* * How many references to @entry are currently swapped out? * This considers COUNT_CONTINUED so it returns exact answer. */ int swp_swapcount(swp_entry_t entry) { int count, tmp_count, n; struct swap_info_struct *p; struct swap_cluster_info *ci; struct page *page; pgoff_t offset; unsigned char *map; p = _swap_info_get(entry); if (!p) return 0; offset = swp_offset(entry); ci = lock_cluster_or_swap_info(p, offset); count = swap_count(p->swap_map[offset]); if (!(count & COUNT_CONTINUED)) goto out; count &= ~COUNT_CONTINUED; n = SWAP_MAP_MAX + 1; page = vmalloc_to_page(p->swap_map + offset); offset &= ~PAGE_MASK; VM_BUG_ON(page_private(page) != SWP_CONTINUED); do { page = list_next_entry(page, lru); map = kmap_atomic(page); tmp_count = map[offset]; kunmap_atomic(map); count += (tmp_count & ~COUNT_CONTINUED) * n; n *= (SWAP_CONT_MAX + 1); } while (tmp_count & COUNT_CONTINUED); out: unlock_cluster_or_swap_info(p, ci); return count; } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, swp_entry_t entry) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; unsigned long roffset = swp_offset(entry); unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); int i; bool ret = false; ci = lock_cluster_or_swap_info(si, offset); if (!ci || !cluster_is_huge(ci)) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } for (i = 0; i < SWAPFILE_CLUSTER; i++) { if (swap_count(map[offset + i])) { ret = true; break; } } unlock_out: unlock_cluster_or_swap_info(si, ci); return ret; } static bool folio_swapped(struct folio *folio) { swp_entry_t entry = folio->swap; struct swap_info_struct *si = _swap_info_get(entry); if (!si) return false; if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_swapcount(si, entry) != 0; return swap_page_trans_huge_swapped(si, entry); } /** * folio_free_swap() - Free the swap space used for this folio. * @folio: The folio to remove. * * If swap is getting full, or if there are no more mappings of this folio, * then call folio_free_swap to free its swap space. * * Return: true if we were able to release the swap space. */ bool folio_free_swap(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!folio_test_swapcache(folio)) return false; if (folio_test_writeback(folio)) return false; if (folio_swapped(folio)) return false; /* * Once hibernation has begun to create its image of memory, * there's a danger that one of the calls to folio_free_swap() * - most probably a call from __try_to_reclaim_swap() while * hibernation is allocating its own swap pages for the image, * but conceivably even a call from memory reclaim - will free * the swap from a folio which has already been recorded in the * image as a clean swapcache folio, and then reuse its swap for * another page of the image. On waking from hibernation, the * original folio might be freed under memory pressure, then * later read back in from swap, now with the wrong data. * * Hibernation suspends storage while it is writing the image * to disk so check that here. */ if (pm_suspended_storage()) return false; delete_from_swap_cache(folio); folio_set_dirty(folio); return true; } /* * Free the swap entry like above, but also try to * free the page cache entry if it is the last user. */ int free_swap_and_cache(swp_entry_t entry) { struct swap_info_struct *p; unsigned char count; if (non_swap_entry(entry)) return 1; p = _swap_info_get(entry); if (p) { count = __swap_entry_free(p, entry); if (count == SWAP_HAS_CACHE && !swap_page_trans_huge_swapped(p, entry)) __try_to_reclaim_swap(p, swp_offset(entry), TTRS_UNMAPPED | TTRS_FULL); } return p != NULL; } #ifdef CONFIG_HIBERNATION swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type); swp_entry_t entry = {0}; if (!si) goto fail; /* This is called for allocating swap entry, not cache */ spin_lock(&si->lock); if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) atomic_long_dec(&nr_swap_pages); spin_unlock(&si->lock); fail: return entry; } /* * Find the swap type that corresponds to given device (if any). * * @offset - number of the PAGE_SIZE-sized block of the device, starting * from 0, in which the swap header is expected to be located. * * This is needed for the suspend to disk (aka swsusp). */ int swap_type_of(dev_t device, sector_t offset) { int type; if (!device) return -1; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; if (device == sis->bdev->bd_dev) { struct swap_extent *se = first_se(sis); if (se->start_block == offset) { spin_unlock(&swap_lock); return type; } } } spin_unlock(&swap_lock); return -ENODEV; } int find_first_swap(dev_t *device) { int type; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; *device = sis->bdev->bd_dev; spin_unlock(&swap_lock); return type; } spin_unlock(&swap_lock); return -ENODEV; } /* * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev * corresponding to given index in swap_info (swap type). */ sector_t swapdev_block(int type, pgoff_t offset) { struct swap_info_struct *si = swap_type_to_swap_info(type); struct swap_extent *se; if (!si || !(si->flags & SWP_WRITEOK)) return 0; se = offset_to_swap_extent(si, offset); return se->start_block + (offset - se->start_page); } /* * Return either the total number of swap pages of given type, or the number * of free pages of that type (depending on @free) * * This is needed for software suspend */ unsigned int count_swap_pages(int type, int free) { unsigned int n = 0; spin_lock(&swap_lock); if ((unsigned int)type < nr_swapfiles) { struct swap_info_struct *sis = swap_info[type]; spin_lock(&sis->lock); if (sis->flags & SWP_WRITEOK) { n = sis->pages; if (free) n -= sis->inuse_pages; } spin_unlock(&sis->lock); } spin_unlock(&swap_lock); return n; } #endif /* CONFIG_HIBERNATION */ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) { return pte_same(pte_swp_clear_flags(pte), swp_pte); } /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. */ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { struct page *page = folio_file_page(folio, swp_offset(entry)); struct page *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; bool hwpoisoned = PageHWPoison(page); int ret = 1; swapcache = page; page = ksm_might_need_to_copy(page, vma, addr); if (unlikely(!page)) return -ENOMEM; else if (unlikely(PTR_ERR(page) == -EHWPOISON)) hwpoisoned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), swp_entry_to_pte(entry)))) { ret = 0; goto out; } old_pte = ptep_get(pte); if (unlikely(hwpoisoned || !PageUptodate(page))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { swp_entry = make_hwpoison_entry(swapcache); page = swapcache; } else { swp_entry = make_poisoned_swp_entry(); } new_pte = swp_entry_to_pte(swp_entry); ret = 0; goto setpte; } /* * Some architectures may have to restore extra metadata to the page * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ arch_swap_restore(entry, page_folio(page)); /* See do_swap_page() */ BUG_ON(!PageAnon(page) && PageMappedToDisk(page)); BUG_ON(PageAnon(page) && PageAnonExclusive(page)); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); get_page(page); if (page == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* * See do_swap_page(): PageWriteback() would be problematic. * However, we do a wait_on_page_writeback() just before this * call and have the page locked. */ VM_BUG_ON_PAGE(PageWriteback(page), page); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(page, vma, addr, rmap_flags); } else { /* ksm created a completely new copy */ page_add_new_anon_rmap(page, vma, addr); lru_cache_add_inactive_or_unevictable(page, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) new_pte = pte_mksoft_dirty(new_pte); if (pte_swp_uffd_wp(old_pte)) new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); swap_free(entry); out: if (pte) pte_unmap_unlock(pte, ptl); if (page != swapcache) { unlock_page(page); put_page(page); } return ret; } static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned int type) { pte_t *pte = NULL; struct swap_info_struct *si; si = swap_info[type]; do { struct folio *folio; unsigned long offset; unsigned char swp_count; swp_entry_t entry; int ret; pte_t ptent; if (!pte++) { pte = pte_offset_map(pmd, addr); if (!pte) break; } ptent = ptep_get_lockless(pte); if (!is_swap_pte(ptent)) continue; entry = pte_to_swp_entry(ptent); if (swp_type(entry) != type) continue; offset = swp_offset(entry); pte_unmap(pte); pte = NULL; folio = swap_cache_get_folio(entry, vma, addr); if (!folio) { struct page *page; struct vm_fault vmf = { .vma = vma, .address = addr, .real_address = addr, .pmd = pmd, }; page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf); if (page) folio = page_folio(page); } if (!folio) { swp_count = READ_ONCE(si->swap_map[offset]); if (swp_count == 0 || swp_count == SWAP_MAP_BAD) continue; return -ENOMEM; } folio_lock(folio); folio_wait_writeback(folio); ret = unuse_pte(vma, pmd, addr, entry, folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); return ret; } folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } while (addr += PAGE_SIZE, addr != end); if (pte) pte_unmap(pte); return 0; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned int type) { pmd_t *pmd; unsigned long next; int ret; pmd = pmd_offset(pud, addr); do { cond_resched(); next = pmd_addr_end(addr, end); ret = unuse_pte_range(vma, pmd, addr, next, type); if (ret) return ret; } while (pmd++, addr = next, addr != end); return 0; } static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned int type) { pud_t *pud; unsigned long next; int ret; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; ret = unuse_pmd_range(vma, pud, addr, next, type); if (ret) return ret; } while (pud++, addr = next, addr != end); return 0; } static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned int type) { p4d_t *p4d; unsigned long next; int ret; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; ret = unuse_pud_range(vma, p4d, addr, next, type); if (ret) return ret; } while (p4d++, addr = next, addr != end); return 0; } static int unuse_vma(struct vm_area_struct *vma, unsigned int type) { pgd_t *pgd; unsigned long addr, end, next; int ret; addr = vma->vm_start; end = vma->vm_end; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; ret = unuse_p4d_range(vma, pgd, addr, next, type); if (ret) return ret; } while (pgd++, addr = next, addr != end); return 0; } static int unuse_mm(struct mm_struct *mm, unsigned int type) { struct vm_area_struct *vma; int ret = 0; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (vma->anon_vma) { ret = unuse_vma(vma, type); if (ret) break; } cond_resched(); } mmap_read_unlock(mm); return ret; } /* * Scan swap_map from current position to next entry still in use. * Return 0 if there are no inuse entries after prev till end of * the map. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, unsigned int prev) { unsigned int i; unsigned char count; /* * No need for swap_lock here: we're just looking * for whether an entry is in use, not modifying it; false * hits are okay, and sys_swapoff() has already prevented new * allocations from this area (while holding swap_lock). */ for (i = prev + 1; i < si->max; i++) { count = READ_ONCE(si->swap_map[i]); if (count && swap_count(count) != SWAP_MAP_BAD) break; if ((i % LATENCY_LIMIT) == 0) cond_resched(); } if (i == si->max) i = 0; return i; } static int try_to_unuse(unsigned int type) { struct mm_struct *prev_mm; struct mm_struct *mm; struct list_head *p; int retval = 0; struct swap_info_struct *si = swap_info[type]; struct folio *folio; swp_entry_t entry; unsigned int i; if (!READ_ONCE(si->inuse_pages)) return 0; retry: retval = shmem_unuse(type); if (retval) return retval; prev_mm = &init_mm; mmget(prev_mm); spin_lock(&mmlist_lock); p = &init_mm.mmlist; while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!mmget_not_zero(mm)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); prev_mm = mm; retval = unuse_mm(mm, type); if (retval) { mmput(prev_mm); return retval; } /* * Make sure that we aren't completely killing * interactive performance. */ cond_resched(); spin_lock(&mmlist_lock); } spin_unlock(&mmlist_lock); mmput(prev_mm); i = 0; while (READ_ONCE(si->inuse_pages) && !signal_pending(current) && (i = find_next_to_unuse(si, i)) != 0) { entry = swp_entry(type, i); folio = filemap_get_folio(swap_address_space(entry), i); if (IS_ERR(folio)) continue; /* * It is conceivable that a racing task removed this folio from * swap cache just before we acquired the page lock. The folio * might even be back in swap cache on another swap area. But * that is okay, folio_free_swap() only removes stale folios. */ folio_lock(folio); folio_wait_writeback(folio); folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } /* * Lets check again to see if there are still swap entries in the map. * If yes, we would need to do retry the unuse logic again. * Under global memory pressure, swap entries can be reinserted back * into process space after the mmlist loop above passes over them. * * Limit the number of retries? No: when mmget_not_zero() * above fails, that mm is likely to be freeing swap from * exit_mmap(), which proceeds at its own independent pace; * and even shmem_writepage() could have been preempted after * folio_alloc_swap(), temporarily hiding that swap. It's easy * and robust (though cpu-intensive) just to keep retrying. */ if (READ_ONCE(si->inuse_pages)) { if (!signal_pending(current)) goto retry; return -EINTR; } return 0; } /* * After a successful try_to_unuse, if no swap is now in use, we know * we can empty the mmlist. swap_lock must be held on entry and exit. * Note that mmlist_lock nests inside swap_lock, and an mm must be * added to the mmlist just after page_duplicate - before would be racy. */ static void drain_mmlist(void) { struct list_head *p, *next; unsigned int type; for (type = 0; type < nr_swapfiles; type++) if (swap_info[type]->inuse_pages) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) list_del_init(p); spin_unlock(&mmlist_lock); } /* * Free all of a swapdev's extent information */ static void destroy_swap_extents(struct swap_info_struct *sis) { while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { struct rb_node *rb = sis->swap_extent_root.rb_node; struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); rb_erase(rb, &sis->swap_extent_root); kfree(se); } if (sis->flags & SWP_ACTIVATED) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; sis->flags &= ~SWP_ACTIVATED; if (mapping->a_ops->swap_deactivate) mapping->a_ops->swap_deactivate(swap_file); } } /* * Add a block range (and the corresponding page range) into this swapdev's * extent tree. * * This function rather assumes that it is called in ascending page order. */ int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; struct swap_extent *se; struct swap_extent *new_se; /* * place the new node at the right most since the * function is called in ascending page order. */ while (*link) { parent = *link; link = &parent->rb_right; } if (parent) { se = rb_entry(parent, struct swap_extent, rb_node); BUG_ON(se->start_page + se->nr_pages != start_page); if (se->start_block + se->nr_pages == start_block) { /* Merge it */ se->nr_pages += nr_pages; return 0; } } /* No merge, insert a new extent. */ new_se = kmalloc(sizeof(*se), GFP_KERNEL); if (new_se == NULL) return -ENOMEM; new_se->start_page = start_page; new_se->nr_pages = nr_pages; new_se->start_block = start_block; rb_link_node(&new_se->rb_node, parent, link); rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); return 1; } EXPORT_SYMBOL_GPL(add_swap_extent); /* * A `swap extent' is a simple thing which maps a contiguous range of pages * onto a contiguous range of disk blocks. A rbtree of swap extents is * built at swapon time and is then used at swap_writepage/swap_readpage * time for locating where on disk a page belongs. * * If the swapfile is an S_ISBLK block device, a single extent is installed. * This is done so that the main operating code can treat S_ISBLK and S_ISREG * swap files identically. * * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK * swapfiles are handled *identically* after swapon time. * * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray * blocks are found which do not fall within the PAGE_SIZE alignment * requirements, they are simply tossed out - we will never use those blocks * for swapping. * * For all swap devices we set S_SWAPFILE across the life of the swapon. This * prevents users from writing to the swap device, which will corrupt memory. * * The amount of disk space which a single swap extent represents varies. * Typically it is in the 1-4 megabyte range. So we can have hundreds of * extents in the rbtree. - akpm. */ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; int ret; if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); *span = sis->pages; return ret; } if (mapping->a_ops->swap_activate) { ret = mapping->a_ops->swap_activate(sis, swap_file, span); if (ret < 0) return ret; sis->flags |= SWP_ACTIVATED; if ((sis->flags & SWP_FS_OPS) && sio_pool_init() != 0) { destroy_swap_extents(sis); return -ENOMEM; } return ret; } return generic_swapfile_activate(sis, swap_file, span); } static int swap_node(struct swap_info_struct *p) { struct block_device *bdev; if (p->bdev) bdev = p->bdev; else bdev = p->swap_file->f_inode->i_sb->s_bdev; return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; } static void setup_swap_info(struct swap_info_struct *p, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info) { int i; if (prio >= 0) p->prio = prio; else p->prio = --least_priority; /* * the plist prio is negated because plist ordering is * low-to-high, while swap ordering is high-to-low */ p->list.prio = -p->prio; for_each_node(i) { if (p->prio >= 0) p->avail_lists[i].prio = -p->prio; else { if (swap_node(p) == i) p->avail_lists[i].prio = 1; else p->avail_lists[i].prio = -p->prio; } } p->swap_map = swap_map; p->cluster_info = cluster_info; } static void _enable_swap_info(struct swap_info_struct *p) { p->flags |= SWP_WRITEOK; atomic_long_add(p->pages, &nr_swap_pages); total_swap_pages += p->pages; assert_spin_locked(&swap_lock); /* * both lists are plists, and thus priority ordered. * swap_active_head needs to be priority ordered for swapoff(), * which on removal of any swap_info_struct with an auto-assigned * (i.e. negative) priority increments the auto-assigned priority * of any lower-priority swap_info_structs. * swap_avail_head needs to be priority ordered for folio_alloc_swap(), * which allocates swap pages from the highest available priority * swap_info_struct. */ plist_add(&p->list, &swap_active_head); /* add to available list iff swap device is not full */ if (p->highest_bit) add_to_avail_list(p); } static void enable_swap_info(struct swap_info_struct *p, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info) { zswap_swapon(p->type); spin_lock(&swap_lock); spin_lock(&p->lock); setup_swap_info(p, prio, swap_map, cluster_info); spin_unlock(&p->lock); spin_unlock(&swap_lock); /* * Finished initializing swap device, now it's safe to reference it. */ percpu_ref_resurrect(&p->users); spin_lock(&swap_lock); spin_lock(&p->lock); _enable_swap_info(p); spin_unlock(&p->lock); spin_unlock(&swap_lock); } static void reinsert_swap_info(struct swap_info_struct *p) { spin_lock(&swap_lock); spin_lock(&p->lock); setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); _enable_swap_info(p); spin_unlock(&p->lock); spin_unlock(&swap_lock); } bool has_usable_swap(void) { bool ret = true; spin_lock(&swap_lock); if (plist_head_empty(&swap_active_head)) ret = false; spin_unlock(&swap_lock); return ret; } SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; unsigned char *swap_map; struct swap_cluster_info *cluster_info; struct file *swap_file, *victim; struct address_space *mapping; struct inode *inode; struct filename *pathname; int err, found = 0; unsigned int old_block_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(!current->mm); pathname = getname(specialfile); if (IS_ERR(pathname)) return PTR_ERR(pathname); victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); err = PTR_ERR(victim); if (IS_ERR(victim)) goto out; mapping = victim->f_mapping; spin_lock(&swap_lock); plist_for_each_entry(p, &swap_active_head, list) { if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) { found = 1; break; } } } if (!found) { err = -EINVAL; spin_unlock(&swap_lock); goto out_dput; } if (!security_vm_enough_memory_mm(current->mm, p->pages)) vm_unacct_memory(p->pages); else { err = -ENOMEM; spin_unlock(&swap_lock); goto out_dput; } spin_lock(&p->lock); del_from_avail_list(p); if (p->prio < 0) { struct swap_info_struct *si = p; int nid; plist_for_each_entry_continue(si, &swap_active_head, list) { si->prio++; si->list.prio--; for_each_node(nid) { if (si->avail_lists[nid].prio != 1) si->avail_lists[nid].prio--; } } least_priority++; } plist_del(&p->list, &swap_active_head); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; p->flags &= ~SWP_WRITEOK; spin_unlock(&p->lock); spin_unlock(&swap_lock); disable_swap_slots_cache_lock(); set_current_oom_origin(); err = try_to_unuse(p->type); clear_current_oom_origin(); if (err) { /* re-insert swap space back into swap_list */ reinsert_swap_info(p); reenable_swap_slots_cache_unlock(); goto out_dput; } reenable_swap_slots_cache_unlock(); /* * Wait for swap operations protected by get/put_swap_device() * to complete. * * We need synchronize_rcu() here to protect the accessing to * the swap cache data structure. */ percpu_ref_kill(&p->users); synchronize_rcu(); wait_for_completion(&p->comp); flush_work(&p->discard_work); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) free_swap_count_continuations(p); if (!p->bdev || !bdev_nonrot(p->bdev)) atomic_dec(&nr_rotate_swap); mutex_lock(&swapon_mutex); spin_lock(&swap_lock); spin_lock(&p->lock); drain_mmlist(); /* wait for anyone still in scan_swap_map_slots */ p->highest_bit = 0; /* cuts scans short */ while (p->flags >= SWP_SCANNING) { spin_unlock(&p->lock); spin_unlock(&swap_lock); schedule_timeout_uninterruptible(1); spin_lock(&swap_lock); spin_lock(&p->lock); } swap_file = p->swap_file; old_block_size = p->old_block_size; p->swap_file = NULL; p->max = 0; swap_map = p->swap_map; p->swap_map = NULL; cluster_info = p->cluster_info; p->cluster_info = NULL; spin_unlock(&p->lock); spin_unlock(&swap_lock); arch_swap_invalidate_area(p->type); zswap_swapoff(p->type); mutex_unlock(&swapon_mutex); free_percpu(p->percpu_cluster); p->percpu_cluster = NULL; free_percpu(p->cluster_next_cpu); p->cluster_next_cpu = NULL; vfree(swap_map); kvfree(cluster_info); /* Destroy swap account information */ swap_cgroup_swapoff(p->type); exit_swap_address_space(p->type); inode = mapping->host; if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); set_blocksize(bdev, old_block_size); blkdev_put(bdev, p); } inode_lock(inode); inode->i_flags &= ~S_SWAPFILE; inode_unlock(inode); filp_close(swap_file, NULL); /* * Clear the SWP_USED flag after all resources are freed so that swapon * can reuse this swap_info in alloc_swap_info() safely. It is ok to * not hold p->lock after we cleared its SWP_WRITEOK. */ spin_lock(&swap_lock); p->flags = 0; spin_unlock(&swap_lock); err = 0; atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); out_dput: filp_close(victim, NULL); out: putname(pathname); return err; } #ifdef CONFIG_PROC_FS static __poll_t swaps_poll(struct file *file, poll_table *wait) { struct seq_file *seq = file->private_data; poll_wait(file, &proc_poll_wait, wait); if (seq->poll_event != atomic_read(&proc_poll_event)) { seq->poll_event = atomic_read(&proc_poll_event); return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; } return EPOLLIN | EPOLLRDNORM; } /* iterator */ static void *swap_start(struct seq_file *swap, loff_t *pos) { struct swap_info_struct *si; int type; loff_t l = *pos; mutex_lock(&swapon_mutex); if (!l) return SEQ_START_TOKEN; for (type = 0; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; if (!--l) return si; } return NULL; } static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { struct swap_info_struct *si = v; int type; if (v == SEQ_START_TOKEN) type = 0; else type = si->type + 1; ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; return si; } return NULL; } static void swap_stop(struct seq_file *swap, void *v) { mutex_unlock(&swapon_mutex); } static int swap_show(struct seq_file *swap, void *v) { struct swap_info_struct *si = v; struct file *file; int len; unsigned long bytes, inuse; if (si == SEQ_START_TOKEN) { seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); return 0; } bytes = K(si->pages); inuse = K(READ_ONCE(si->inuse_pages)); file = si->swap_file; len = seq_file_path(swap, file, " \t\n\\"); seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file_inode(file)->i_mode) ? "partition" : "file\t", bytes, bytes < 10000000 ? "\t" : "", inuse, inuse < 10000000 ? "\t" : "", si->prio); return 0; } static const struct seq_operations swaps_op = { .start = swap_start, .next = swap_next, .stop = swap_stop, .show = swap_show }; static int swaps_open(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &swaps_op); if (ret) return ret; seq = file->private_data; seq->poll_event = atomic_read(&proc_poll_event); return 0; } static const struct proc_ops swaps_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = swaps_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release, .proc_poll = swaps_poll, }; static int __init procswaps_init(void) { proc_create("swaps", 0, NULL, &swaps_proc_ops); return 0; } __initcall(procswaps_init); #endif /* CONFIG_PROC_FS */ #ifdef MAX_SWAPFILES_CHECK static int __init max_swapfiles_check(void) { MAX_SWAPFILES_CHECK(); return 0; } late_initcall(max_swapfiles_check); #endif static struct swap_info_struct *alloc_swap_info(void) { struct swap_info_struct *p; struct swap_info_struct *defer = NULL; unsigned int type; int i; p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (percpu_ref_init(&p->users, swap_users_ref_free, PERCPU_REF_INIT_DEAD, GFP_KERNEL)) { kvfree(p); return ERR_PTR(-ENOMEM); } spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { if (!(swap_info[type]->flags & SWP_USED)) break; } if (type >= MAX_SWAPFILES) { spin_unlock(&swap_lock); percpu_ref_exit(&p->users); kvfree(p); return ERR_PTR(-EPERM); } if (type >= nr_swapfiles) { p->type = type; /* * Publish the swap_info_struct after initializing it. * Note that kvzalloc() above zeroes all its fields. */ smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */ nr_swapfiles++; } else { defer = p; p = swap_info[type]; /* * Do not memset this entry: a racing procfs swap_next() * would be relying on p->type to remain valid. */ } p->swap_extent_root = RB_ROOT; plist_node_init(&p->list, 0); for_each_node(i) plist_node_init(&p->avail_lists[i], 0); p->flags = SWP_USED; spin_unlock(&swap_lock); if (defer) { percpu_ref_exit(&defer->users); kvfree(defer); } spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); init_completion(&p->comp); return p; } static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) { int error; if (S_ISBLK(inode->i_mode)) { p->bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL); if (IS_ERR(p->bdev)) { error = PTR_ERR(p->bdev); p->bdev = NULL; return error; } p->old_block_size = block_size(p->bdev); error = set_blocksize(p->bdev, PAGE_SIZE); if (error < 0) return error; /* * Zoned block devices contain zones that have a sequential * write only restriction. Hence zoned block devices are not * suitable for swapping. Disallow them here. */ if (bdev_is_zoned(p->bdev)) return -EINVAL; p->flags |= SWP_BLKDEV; } else if (S_ISREG(inode->i_mode)) { p->bdev = inode->i_sb->s_bdev; } return 0; } /* * Find out how many pages are allowed for a single swap device. There * are two limiting factors: * 1) the number of bits for the swap offset in the swp_entry_t type, and * 2) the number of bits in the swap pte, as defined by the different * architectures. * * In order to find the largest possible bit mask, a swap entry with * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, * decoded to a swp_entry_t again, and finally the swap offset is * extracted. * * This will mask all the bits from the initial ~0UL mask that can't * be encoded in either the swp_entry_t or the architecture definition * of a swap pte. */ unsigned long generic_max_swapfile_size(void) { return swp_offset(pte_to_swp_entry( swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; } /* Can be overridden by an architecture for additional checks. */ __weak unsigned long arch_max_swapfile_size(void) { return generic_max_swapfile_size(); } static unsigned long read_swap_header(struct swap_info_struct *p, union swap_header *swap_header, struct inode *inode) { int i; unsigned long maxpages; unsigned long swapfilepages; unsigned long last_page; if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { pr_err("Unable to find swap-space signature\n"); return 0; } /* swap partition endianness hack... */ if (swab32(swap_header->info.version) == 1) { swab32s(&swap_header->info.version); swab32s(&swap_header->info.last_page); swab32s(&swap_header->info.nr_badpages); if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; for (i = 0; i < swap_header->info.nr_badpages; i++) swab32s(&swap_header->info.badpages[i]); } /* Check the swap header's sub-version */ if (swap_header->info.version != 1) { pr_warn("Unable to handle swap header version %d\n", swap_header->info.version); return 0; } p->lowest_bit = 1; p->cluster_next = 1; p->cluster_nr = 0; maxpages = swapfile_maximum_size; last_page = swap_header->info.last_page; if (!last_page) { pr_warn("Empty swap-file\n"); return 0; } if (last_page > maxpages) { pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", K(maxpages), K(last_page)); } if (maxpages > last_page) { maxpages = last_page + 1; /* p->max is an unsigned int: don't overflow it */ if ((unsigned int)maxpages == 0) maxpages = UINT_MAX; } p->highest_bit = maxpages - 1; if (!maxpages) return 0; swapfilepages = i_size_read(inode) >> PAGE_SHIFT; if (swapfilepages && maxpages > swapfilepages) { pr_warn("Swap area shorter than signature indicates\n"); return 0; } if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) return 0; if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; return maxpages; } #define SWAP_CLUSTER_INFO_COLS \ DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) #define SWAP_CLUSTER_SPACE_COLS \ DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) #define SWAP_CLUSTER_COLS \ max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) static int setup_swap_map_and_extents(struct swap_info_struct *p, union swap_header *swap_header, unsigned char *swap_map, struct swap_cluster_info *cluster_info, unsigned long maxpages, sector_t *span) { unsigned int j, k; unsigned int nr_good_pages; int nr_extents; unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; unsigned long i, idx; nr_good_pages = maxpages - 1; /* omit header page */ cluster_list_init(&p->free_clusters); cluster_list_init(&p->discard_clusters); for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; if (page_nr == 0 || page_nr > swap_header->info.last_page) return -EINVAL; if (page_nr < maxpages) { swap_map[page_nr] = SWAP_MAP_BAD; nr_good_pages--; /* * Haven't marked the cluster free yet, no list * operation involved */ inc_cluster_info_page(p, cluster_info, page_nr); } } /* Haven't marked the cluster free yet, no list operation involved */ for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) inc_cluster_info_page(p, cluster_info, i); if (nr_good_pages) { swap_map[0] = SWAP_MAP_BAD; /* * Not mark the cluster free yet, no list * operation involved */ inc_cluster_info_page(p, cluster_info, 0); p->max = maxpages; p->pages = nr_good_pages; nr_extents = setup_swap_extents(p, span); if (nr_extents < 0) return nr_extents; nr_good_pages = p->pages; } if (!nr_good_pages) { pr_warn("Empty swap-file\n"); return -EINVAL; } if (!cluster_info) return nr_extents; /* * Reduce false cache line sharing between cluster_info and * sharing same address space. */ for (k = 0; k < SWAP_CLUSTER_COLS; k++) { j = (k + col) % SWAP_CLUSTER_COLS; for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { idx = i * SWAP_CLUSTER_COLS + j; if (idx >= nr_clusters) continue; if (cluster_count(&cluster_info[idx])) continue; cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); cluster_list_add_tail(&p->free_clusters, cluster_info, idx); } } return nr_extents; } SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) { struct swap_info_struct *p; struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; struct dentry *dentry; int prio; int error; union swap_header *swap_header; int nr_extents; sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; struct swap_cluster_info *cluster_info = NULL; struct page *page = NULL; struct inode *inode = NULL; bool inced_nr_rotate_swap = false; if (swap_flags & ~SWAP_FLAGS_VALID) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!swap_avail_heads) return -ENOMEM; p = alloc_swap_info(); if (IS_ERR(p)) return PTR_ERR(p); INIT_WORK(&p->discard_work, swap_discard_work); name = getname(specialfile); if (IS_ERR(name)) { error = PTR_ERR(name); name = NULL; goto bad_swap; } swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); if (IS_ERR(swap_file)) { error = PTR_ERR(swap_file); swap_file = NULL; goto bad_swap; } p->swap_file = swap_file; mapping = swap_file->f_mapping; dentry = swap_file->f_path.dentry; inode = mapping->host; error = claim_swapfile(p, inode); if (unlikely(error)) goto bad_swap; inode_lock(inode); if (d_unlinked(dentry) || cant_mount(dentry)) { error = -ENOENT; goto bad_swap_unlock_inode; } if (IS_SWAPFILE(inode)) { error = -EBUSY; goto bad_swap_unlock_inode; } /* * Read the swap header. */ if (!mapping->a_ops->read_folio) { error = -EINVAL; goto bad_swap_unlock_inode; } page = read_mapping_page(mapping, 0, swap_file); if (IS_ERR(page)) { error = PTR_ERR(page); goto bad_swap_unlock_inode; } swap_header = kmap(page); maxpages = read_swap_header(p, swap_header, inode); if (unlikely(!maxpages)) { error = -EINVAL; goto bad_swap_unlock_inode; } /* OK, set up the swap map and apply the bad block list */ swap_map = vzalloc(maxpages); if (!swap_map) { error = -ENOMEM; goto bad_swap_unlock_inode; } if (p->bdev && bdev_stable_writes(p->bdev)) p->flags |= SWP_STABLE_WRITES; if (p->bdev && bdev_synchronous(p->bdev)) p->flags |= SWP_SYNCHRONOUS_IO; if (p->bdev && bdev_nonrot(p->bdev)) { int cpu; unsigned long ci, nr_cluster; p->flags |= SWP_SOLIDSTATE; p->cluster_next_cpu = alloc_percpu(unsigned int); if (!p->cluster_next_cpu) { error = -ENOMEM; goto bad_swap_unlock_inode; } /* * select a random position to start with to help wear leveling * SSD */ for_each_possible_cpu(cpu) { per_cpu(*p->cluster_next_cpu, cpu) = get_random_u32_inclusive(1, p->highest_bit); } nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) { error = -ENOMEM; goto bad_swap_unlock_inode; } for (ci = 0; ci < nr_cluster; ci++) spin_lock_init(&((cluster_info + ci)->lock)); p->percpu_cluster = alloc_percpu(struct percpu_cluster); if (!p->percpu_cluster) { error = -ENOMEM; goto bad_swap_unlock_inode; } for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; cluster = per_cpu_ptr(p->percpu_cluster, cpu); cluster_set_null(&cluster->index); } } else { atomic_inc(&nr_rotate_swap); inced_nr_rotate_swap = true; } error = swap_cgroup_swapon(p->type, maxpages); if (error) goto bad_swap_unlock_inode; nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, cluster_info, maxpages, &span); if (unlikely(nr_extents < 0)) { error = nr_extents; goto bad_swap_unlock_inode; } if ((swap_flags & SWAP_FLAG_DISCARD) && p->bdev && bdev_max_discard_sectors(p->bdev)) { /* * When discard is enabled for swap with no particular * policy flagged, we set all swap discard flags here in * order to sustain backward compatibility with older * swapon(8) releases. */ p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | SWP_PAGE_DISCARD); /* * By flagging sys_swapon, a sysadmin can tell us to * either do single-time area discards only, or to just * perform discards for released swap page-clusters. * Now it's time to adjust the p->flags accordingly. */ if (swap_flags & SWAP_FLAG_DISCARD_ONCE) p->flags &= ~SWP_PAGE_DISCARD; else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) p->flags &= ~SWP_AREA_DISCARD; /* issue a swapon-time discard if it's still required */ if (p->flags & SWP_AREA_DISCARD) { int err = discard_swap(p); if (unlikely(err)) pr_err("swapon: discard_swap(%p): %d\n", p, err); } } error = init_swap_address_space(p->type, maxpages); if (error) goto bad_swap_unlock_inode; /* * Flush any pending IO and dirty mappings before we start using this * swap device. */ inode->i_flags |= S_SWAPFILE; error = inode_drain_writes(inode); if (error) { inode->i_flags &= ~S_SWAPFILE; goto free_swap_address_space; } mutex_lock(&swapon_mutex); prio = -1; if (swap_flags & SWAP_FLAG_PREFER) prio = (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; enable_swap_info(p, prio, swap_map, cluster_info); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", K(p->pages), name->name, p->prio, nr_extents, K((unsigned long long)span), (p->flags & SWP_SOLIDSTATE) ? "SS" : "", (p->flags & SWP_DISCARDABLE) ? "D" : "", (p->flags & SWP_AREA_DISCARD) ? "s" : "", (p->flags & SWP_PAGE_DISCARD) ? "c" : ""); mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); error = 0; goto out; free_swap_address_space: exit_swap_address_space(p->type); bad_swap_unlock_inode: inode_unlock(inode); bad_swap: free_percpu(p->percpu_cluster); p->percpu_cluster = NULL; free_percpu(p->cluster_next_cpu); p->cluster_next_cpu = NULL; if (inode && S_ISBLK(inode->i_mode) && p->bdev) { set_blocksize(p->bdev, p->old_block_size); blkdev_put(p->bdev, p); } inode = NULL; destroy_swap_extents(p); swap_cgroup_swapoff(p->type); spin_lock(&swap_lock); p->swap_file = NULL; p->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); kvfree(cluster_info); if (inced_nr_rotate_swap) atomic_dec(&nr_rotate_swap); if (swap_file) filp_close(swap_file, NULL); out: if (page && !IS_ERR(page)) { kunmap(page); put_page(page); } if (name) putname(name); if (inode) inode_unlock(inode); if (!error) enable_swap_slots_cache(); return error; } void si_swapinfo(struct sysinfo *val) { unsigned int type; unsigned long nr_to_be_unused = 0; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *si = swap_info[type]; if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) nr_to_be_unused += READ_ONCE(si->inuse_pages); } val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); } /* * Verify that a swap entry is valid and increment its swap map count. * * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL * - swp_entry is migration entry -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) { struct swap_info_struct *p; struct swap_cluster_info *ci; unsigned long offset; unsigned char count; unsigned char has_cache; int err; p = swp_swap_info(entry); offset = swp_offset(entry); ci = lock_cluster_or_swap_info(p, offset); count = p->swap_map[offset]; /* * swapin_readahead() doesn't check if a swap entry is valid, so the * swap entry could be SWAP_MAP_BAD. Check here with lock held. */ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { err = -ENOENT; goto unlock_out; } has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; err = 0; if (usage == SWAP_HAS_CACHE) { /* set SWAP_HAS_CACHE if there is no cache and entry is used */ if (!has_cache && count) has_cache = SWAP_HAS_CACHE; else if (has_cache) /* someone else added cache */ err = -EEXIST; else /* no users remaining */ err = -ENOENT; } else if (count || has_cache) { if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) count += usage; else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) err = -EINVAL; else if (swap_count_continued(p, offset, count)) count = COUNT_CONTINUED; else err = -ENOMEM; } else err = -ENOENT; /* unused swap entry */ WRITE_ONCE(p->swap_map[offset], count | has_cache); unlock_out: unlock_cluster_or_swap_info(p, ci); return err; } /* * Help swapoff by noting that swap entry belongs to shmem/tmpfs * (in which case its reference count is never incremented). */ void swap_shmem_alloc(swp_entry_t entry) { __swap_duplicate(entry, SWAP_MAP_SHMEM); } /* * Increase reference count of swap entry by 1. * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required * but could not be atomically allocated. Returns 0, just as if it succeeded, * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which * might occur if a page table entry has got corrupted. */ int swap_duplicate(swp_entry_t entry) { int err = 0; while (!err && __swap_duplicate(entry, 1) == -ENOMEM) err = add_swap_count_continuation(entry, GFP_ATOMIC); return err; } /* * @entry: swap entry for which we allocate swap cache. * * Called when allocating swap cache for existing swap entry, * This can return error codes. Returns 0 at success. * -EEXIST means there is a swap cache. * Note: return code is different from swap_duplicate(). */ int swapcache_prepare(swp_entry_t entry) { return __swap_duplicate(entry, SWAP_HAS_CACHE); } struct swap_info_struct *swp_swap_info(swp_entry_t entry) { return swap_type_to_swap_info(swp_type(entry)); } struct swap_info_struct *page_swap_info(struct page *page) { swp_entry_t entry = page_swap_entry(page); return swp_swap_info(entry); } /* * out-of-line methods to avoid include hell. */ struct address_space *swapcache_mapping(struct folio *folio) { return page_swap_info(&folio->page)->swap_file->f_mapping; } EXPORT_SYMBOL_GPL(swapcache_mapping); pgoff_t __page_file_index(struct page *page) { swp_entry_t swap = page_swap_entry(page); return swp_offset(swap); } EXPORT_SYMBOL_GPL(__page_file_index); /* * add_swap_count_continuation - called when a swap count is duplicated * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's * page of the original vmalloc'ed swap_map, to hold the continuation count * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. * * These continuation pages are seldom referenced: the common paths all work * on the original swap_map, only referring to a continuation page when the * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. * * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) * can be called after dropping locks. */ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) { struct swap_info_struct *si; struct swap_cluster_info *ci; struct page *head; struct page *page; struct page *list_page; pgoff_t offset; unsigned char count; int ret = 0; /* * When debugging, it's easier to use __GFP_ZERO here; but it's better * for latency not to zero a page while GFP_ATOMIC and holding locks. */ page = alloc_page(gfp_mask | __GFP_HIGHMEM); si = get_swap_device(entry); if (!si) { /* * An acceptable race has occurred since the failing * __swap_duplicate(): the swap device may be swapoff */ goto outer; } spin_lock(&si->lock); offset = swp_offset(entry); ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { /* * The higher the swap count, the more likely it is that tasks * will race to add swap count continuation: we need to avoid * over-provisioning. */ goto out; } if (!page) { ret = -ENOMEM; goto out; } head = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; spin_lock(&si->cont_lock); /* * Page allocation does not initialize the page's lru field, * but it does always reset its private field. */ if (!page_private(head)) { BUG_ON(count & COUNT_CONTINUED); INIT_LIST_HEAD(&head->lru); set_page_private(head, SWP_CONTINUED); si->flags |= SWP_CONTINUED; } list_for_each_entry(list_page, &head->lru, lru) { unsigned char *map; /* * If the previous map said no continuation, but we've found * a continuation page, free our allocation and use this one. */ if (!(count & COUNT_CONTINUED)) goto out_unlock_cont; map = kmap_atomic(list_page) + offset; count = *map; kunmap_atomic(map); /* * If this continuation count now has some space in it, * free our allocation and use this one. */ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) goto out_unlock_cont; } list_add_tail(&page->lru, &head->lru); page = NULL; /* now it's attached, don't free it */ out_unlock_cont: spin_unlock(&si->cont_lock); out: unlock_cluster(ci); spin_unlock(&si->lock); put_swap_device(si); outer: if (page) __free_page(page); return ret; } /* * swap_count_continued - when the original swap_map count is incremented * from SWAP_MAP_MAX, check if there is already a continuation page to carry * into, carry if so, or else fail until a new continuation page is allocated; * when the original swap_map count is decremented from 0 with continuation, * borrow from the continuation and report whether it still holds more. * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster * lock. */ static bool swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) { struct page *head; struct page *page; unsigned char *map; bool ret; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head) != SWP_CONTINUED) { BUG_ON(count & COUNT_CONTINUED); return false; /* need to add count continuation */ } spin_lock(&si->cont_lock); offset &= ~PAGE_MASK; page = list_next_entry(head, lru); map = kmap_atomic(page) + offset; if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ goto init_map; /* jump over SWAP_CONT_MAX checks */ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ /* * Think of how you add 1 to 999 */ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { kunmap_atomic(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_atomic(page) + offset; } if (*map == SWAP_CONT_MAX) { kunmap_atomic(map); page = list_next_entry(page, lru); if (page == head) { ret = false; /* add count continuation */ goto out; } map = kmap_atomic(page) + offset; init_map: *map = 0; /* we didn't zero the page */ } *map += 1; kunmap_atomic(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_atomic(page) + offset; *map = COUNT_CONTINUED; kunmap_atomic(map); } ret = true; /* incremented */ } else { /* decrementing */ /* * Think of how you subtract 1 from 1000 */ BUG_ON(count != COUNT_CONTINUED); while (*map == COUNT_CONTINUED) { kunmap_atomic(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_atomic(page) + offset; } BUG_ON(*map == 0); *map -= 1; if (*map == 0) count = 0; kunmap_atomic(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_atomic(page) + offset; *map = SWAP_CONT_MAX | count; count = COUNT_CONTINUED; kunmap_atomic(map); } ret = count == COUNT_CONTINUED; } out: spin_unlock(&si->cont_lock); return ret; } /* * free_swap_count_continuations - swapoff free all the continuation pages * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. */ static void free_swap_count_continuations(struct swap_info_struct *si) { pgoff_t offset; for (offset = 0; offset < si->max; offset += PAGE_SIZE) { struct page *head; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head)) { struct page *page, *next; list_for_each_entry_safe(page, next, &head->lru, lru) { list_del(&page->lru); __free_page(page); } } } } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { struct swap_info_struct *si, *next; int nid = folio_nid(folio); if (!(gfp & __GFP_IO)) return; if (!blk_cgroup_congested()) return; /* * We've already scheduled a throttle, avoid taking the global swap * lock. */ if (current->throttle_disk) return; spin_lock(&swap_avail_lock); plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], avail_lists[nid]) { if (si->bdev) { blkcg_schedule_throttle(si->bdev->bd_disk, true); break; } } spin_unlock(&swap_avail_lock); } #endif static int __init swapfile_init(void) { int nid; swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head), GFP_KERNEL); if (!swap_avail_heads) { pr_emerg("Not enough memory for swap heads, swap is disabled\n"); return -ENOMEM; } for_each_node(nid) plist_head_init(&swap_avail_heads[nid]); swapfile_maximum_size = arch_max_swapfile_size(); #ifdef CONFIG_MIGRATION if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS)) swap_migration_ad_supported = true; #endif /* CONFIG_MIGRATION */ return 0; } subsys_initcall(swapfile_init);
linux-master
mm/swapfile.c
// SPDX-License-Identifier: GPL-2.0 /* * Provide common bits of early_ioremap() support for architectures needing * temporary mappings during boot before ioremap() is available. * * This is mostly a direct copy of the x86 early_ioremap implementation. * * (C) Copyright 1995 1996, 2014 Linus Torvalds * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/fixmap.h> #include <asm/early_ioremap.h> #include "internal.h" #ifdef CONFIG_MMU static int early_ioremap_debug __initdata; static int __init early_ioremap_debug_setup(char *str) { early_ioremap_debug = 1; return 0; } early_param("early_ioremap_debug", early_ioremap_debug_setup); static int after_paging_init __initdata; pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { return prot; } void __init early_ioremap_reset(void) { after_paging_init = 1; } /* * Generally, ioremap() is available after paging_init() has been called. * Architectures wanting to allow early_ioremap after paging_init() can * define __late_set_fixmap and __late_clear_fixmap to do the right thing. */ #ifndef __late_set_fixmap static inline void __init __late_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) { BUG(); } #endif #ifndef __late_clear_fixmap static inline void __init __late_clear_fixmap(enum fixed_addresses idx) { BUG(); } #endif static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; void __init early_ioremap_setup(void) { int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { WARN_ON_ONCE(prev_map[i]); slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); } } static int __init check_early_ioremap_leak(void) { int count = 0; int i; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) if (prev_map[i]) count++; if (WARN(count, KERN_WARNING "Debug warning: early ioremap leak of %d areas detected.\n" "please boot with early_ioremap_debug and report the dmesg.\n", count)) return 1; return 0; } late_initcall(check_early_ioremap_leak); static void __init __iomem * __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { unsigned long offset; resource_size_t last_addr; unsigned int nrpages; enum fixed_addresses idx; int i, slot; WARN_ON(system_state >= SYSTEM_RUNNING); slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (!prev_map[i]) { slot = i; break; } } if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n", __func__, &phys_addr, size)) return NULL; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (WARN_ON(!size || last_addr < phys_addr)) return NULL; prev_size[slot] = size; /* * Mappings have to be page-aligned */ offset = offset_in_page(phys_addr); phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Mappings have to fit in the FIX_BTMAP area. */ nrpages = size >> PAGE_SHIFT; if (WARN_ON(nrpages > NR_FIX_BTMAPS)) return NULL; /* * Ok, go for it.. */ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_set_fixmap(idx, phys_addr, prot); else __early_set_fixmap(idx, phys_addr, prot); phys_addr += PAGE_SIZE; --idx; --nrpages; } WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n", __func__, &phys_addr, size, slot, offset, slot_virt[slot]); prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); return prev_map[slot]; } void __init early_iounmap(void __iomem *addr, unsigned long size) { unsigned long virt_addr; unsigned long offset; unsigned int nrpages; enum fixed_addresses idx; int i, slot; slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { if (prev_map[i] == addr) { slot = i; break; } } if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n", __func__, addr, size)) return; if (WARN(prev_size[slot] != size, "%s(%p, %08lx) [%d] size not consistent %08lx\n", __func__, addr, size, slot, prev_size[slot])) return; WARN(early_ioremap_debug, "%s(%p, %08lx) [%d]\n", __func__, addr, size, slot); virt_addr = (unsigned long)addr; if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))) return; offset = offset_in_page(virt_addr); nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; while (nrpages > 0) { if (after_paging_init) __late_clear_fixmap(idx); else __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR); --idx; --nrpages; } prev_map[slot] = NULL; } /* Remap an IO device */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO); } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, FIXMAP_PAGE_NORMAL); return (__force void *)__early_ioremap(phys_addr, size, prot); } #ifdef FIXMAP_PAGE_RO void __init * early_memremap_ro(resource_size_t phys_addr, unsigned long size) { pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size, FIXMAP_PAGE_RO); return (__force void *)__early_ioremap(phys_addr, size, prot); } #endif #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT void __init * early_memremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return (__force void *)__early_ioremap(phys_addr, size, __pgprot(prot_val)); } #endif #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) { unsigned long slop, clen; char *p; while (size) { slop = offset_in_page(src); clen = size; if (clen > MAX_MAP_CHUNK - slop) clen = MAX_MAP_CHUNK - slop; p = early_memremap(src & PAGE_MASK, clen + slop); memcpy(dest, p + slop, clen); early_memunmap(p, clen + slop); dest += clen; src += clen; size -= clen; } } #else /* CONFIG_MMU */ void __init __iomem * early_ioremap(resource_size_t phys_addr, unsigned long size) { return (__force void __iomem *)phys_addr; } /* Remap memory */ void __init * early_memremap(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } void __init * early_memremap_ro(resource_size_t phys_addr, unsigned long size) { return (void *)phys_addr; } void __init early_iounmap(void __iomem *addr, unsigned long size) { } #endif /* CONFIG_MMU */ void __init early_memunmap(void *addr, unsigned long size) { early_iounmap((__force void __iomem *)addr, size); }
linux-master
mm/early_ioremap.c
// SPDX-License-Identifier: GPL-2.0 /* * Manage cache of swap slots to be used for and returned from * swap. * * Copyright(c) 2016 Intel Corporation. * * Author: Tim Chen <[email protected]> * * We allocate the swap slots from the global pool and put * it into local per cpu caches. This has the advantage * of no needing to acquire the swap_info lock every time * we need a new slot. * * There is also opportunity to simply return the slot * to local caches without needing to acquire swap_info * lock. We do not reuse the returned slots directly but * move them back to the global pool in a batch. This * allows the slots to coalesce and reduce fragmentation. * * The swap entry allocated is marked with SWAP_HAS_CACHE * flag in map_count that prevents it from being allocated * again from the global pool. * * The swap slots cache is protected by a mutex instead of * a spin lock as when we search for slots with scan_swap_map, * we can possibly sleep. */ #include <linux/swap_slots.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/mm.h> static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); static bool swap_slot_cache_active; bool swap_slot_cache_enabled; static bool swap_slot_cache_initialized; static DEFINE_MUTEX(swap_slots_cache_mutex); /* Serialize swap slots cache enable/disable operations */ static DEFINE_MUTEX(swap_slots_cache_enable_mutex); static void __drain_swap_slots_cache(unsigned int type); #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) #define SLOTS_CACHE 0x1 #define SLOTS_CACHE_RET 0x2 static void deactivate_swap_slots_cache(void) { mutex_lock(&swap_slots_cache_mutex); swap_slot_cache_active = false; __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); mutex_unlock(&swap_slots_cache_mutex); } static void reactivate_swap_slots_cache(void) { mutex_lock(&swap_slots_cache_mutex); swap_slot_cache_active = true; mutex_unlock(&swap_slots_cache_mutex); } /* Must not be called with cpu hot plug lock */ void disable_swap_slots_cache_lock(void) { mutex_lock(&swap_slots_cache_enable_mutex); swap_slot_cache_enabled = false; if (swap_slot_cache_initialized) { /* serialize with cpu hotplug operations */ cpus_read_lock(); __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); cpus_read_unlock(); } } static void __reenable_swap_slots_cache(void) { swap_slot_cache_enabled = has_usable_swap(); } void reenable_swap_slots_cache_unlock(void) { __reenable_swap_slots_cache(); mutex_unlock(&swap_slots_cache_enable_mutex); } static bool check_cache_active(void) { long pages; if (!swap_slot_cache_enabled) return false; pages = get_nr_swap_pages(); if (!swap_slot_cache_active) { if (pages > num_online_cpus() * THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) reactivate_swap_slots_cache(); goto out; } /* if global pool of slot caches too low, deactivate cache */ if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) deactivate_swap_slots_cache(); out: return swap_slot_cache_active; } static int alloc_swap_slot_cache(unsigned int cpu) { struct swap_slots_cache *cache; swp_entry_t *slots, *slots_ret; /* * Do allocation outside swap_slots_cache_mutex * as kvzalloc could trigger reclaim and folio_alloc_swap, * which can lock swap_slots_cache_mutex. */ slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), GFP_KERNEL); if (!slots) return -ENOMEM; slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), GFP_KERNEL); if (!slots_ret) { kvfree(slots); return -ENOMEM; } mutex_lock(&swap_slots_cache_mutex); cache = &per_cpu(swp_slots, cpu); if (cache->slots || cache->slots_ret) { /* cache already allocated */ mutex_unlock(&swap_slots_cache_mutex); kvfree(slots); kvfree(slots_ret); return 0; } if (!cache->lock_initialized) { mutex_init(&cache->alloc_lock); spin_lock_init(&cache->free_lock); cache->lock_initialized = true; } cache->nr = 0; cache->cur = 0; cache->n_ret = 0; /* * We initialized alloc_lock and free_lock earlier. We use * !cache->slots or !cache->slots_ret to know if it is safe to acquire * the corresponding lock and use the cache. Memory barrier below * ensures the assumption. */ mb(); cache->slots = slots; cache->slots_ret = slots_ret; mutex_unlock(&swap_slots_cache_mutex); return 0; } static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, bool free_slots) { struct swap_slots_cache *cache; swp_entry_t *slots = NULL; cache = &per_cpu(swp_slots, cpu); if ((type & SLOTS_CACHE) && cache->slots) { mutex_lock(&cache->alloc_lock); swapcache_free_entries(cache->slots + cache->cur, cache->nr); cache->cur = 0; cache->nr = 0; if (free_slots && cache->slots) { kvfree(cache->slots); cache->slots = NULL; } mutex_unlock(&cache->alloc_lock); } if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { spin_lock_irq(&cache->free_lock); swapcache_free_entries(cache->slots_ret, cache->n_ret); cache->n_ret = 0; if (free_slots && cache->slots_ret) { slots = cache->slots_ret; cache->slots_ret = NULL; } spin_unlock_irq(&cache->free_lock); kvfree(slots); } } static void __drain_swap_slots_cache(unsigned int type) { unsigned int cpu; /* * This function is called during * 1) swapoff, when we have to make sure no * left over slots are in cache when we remove * a swap device; * 2) disabling of swap slot cache, when we run low * on swap slots when allocating memory and need * to return swap slots to global pool. * * We cannot acquire cpu hot plug lock here as * this function can be invoked in the cpu * hot plug path: * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback * -> memory allocation -> direct reclaim -> folio_alloc_swap * -> drain_swap_slots_cache * * Hence the loop over current online cpu below could miss cpu that * is being brought online but not yet marked as online. * That is okay as we do not schedule and run anything on a * cpu before it has been marked online. Hence, we will not * fill any swap slots in slots cache of such cpu. * There are no slots on such cpu that need to be drained. */ for_each_online_cpu(cpu) drain_slots_cache_cpu(cpu, type, false); } static int free_slot_cache(unsigned int cpu) { mutex_lock(&swap_slots_cache_mutex); drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); mutex_unlock(&swap_slots_cache_mutex); return 0; } void enable_swap_slots_cache(void) { mutex_lock(&swap_slots_cache_enable_mutex); if (!swap_slot_cache_initialized) { int ret; ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", alloc_swap_slot_cache, free_slot_cache); if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " "without swap slots cache.\n", __func__)) goto out_unlock; swap_slot_cache_initialized = true; } __reenable_swap_slots_cache(); out_unlock: mutex_unlock(&swap_slots_cache_enable_mutex); } /* called with swap slot cache's alloc lock held */ static int refill_swap_slots_cache(struct swap_slots_cache *cache) { if (!use_swap_slot_cache) return 0; cache->cur = 0; if (swap_slot_cache_active) cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, cache->slots, 1); return cache->nr; } void free_swap_slot(swp_entry_t entry) { struct swap_slots_cache *cache; cache = raw_cpu_ptr(&swp_slots); if (likely(use_swap_slot_cache && cache->slots_ret)) { spin_lock_irq(&cache->free_lock); /* Swap slots cache may be deactivated before acquiring lock */ if (!use_swap_slot_cache || !cache->slots_ret) { spin_unlock_irq(&cache->free_lock); goto direct_free; } if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { /* * Return slots to global pool. * The current swap_map value is SWAP_HAS_CACHE. * Set it to 0 to indicate it is available for * allocation in global pool */ swapcache_free_entries(cache->slots_ret, cache->n_ret); cache->n_ret = 0; } cache->slots_ret[cache->n_ret++] = entry; spin_unlock_irq(&cache->free_lock); } else { direct_free: swapcache_free_entries(&entry, 1); } } swp_entry_t folio_alloc_swap(struct folio *folio) { swp_entry_t entry; struct swap_slots_cache *cache; entry.val = 0; if (folio_test_large(folio)) { if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) get_swap_pages(1, &entry, folio_nr_pages(folio)); goto out; } /* * Preemption is allowed here, because we may sleep * in refill_swap_slots_cache(). But it is safe, because * accesses to the per-CPU data structure are protected by the * mutex cache->alloc_lock. * * The alloc path here does not touch cache->slots_ret * so cache->free_lock is not taken. */ cache = raw_cpu_ptr(&swp_slots); if (likely(check_cache_active() && cache->slots)) { mutex_lock(&cache->alloc_lock); if (cache->slots) { repeat: if (cache->nr) { entry = cache->slots[cache->cur]; cache->slots[cache->cur++].val = 0; cache->nr--; } else if (refill_swap_slots_cache(cache)) { goto repeat; } } mutex_unlock(&cache->alloc_lock); if (entry.val) goto out; } get_swap_pages(1, &entry, 1); out: if (mem_cgroup_try_charge_swap(folio, entry)) { put_swap_folio(folio, entry); entry.val = 0; } return entry; }
linux-master
mm/swap_slots.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/slab.c * Written by Mark Hemment, 1996/97. * ([email protected]) * * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli * * Major cleanup, different bufctl logic, per-cpu arrays * (c) 2000 Manfred Spraul * * Cleanup, make the head arrays unconditional, preparation for NUMA * (c) 2002 Manfred Spraul * * An implementation of the Slab Allocator as described in outline in; * UNIX Internals: The New Frontiers by Uresh Vahalia * Pub: Prentice Hall ISBN 0-13-101908-2 * or with a little more detail in; * The Slab Allocator: An Object-Caching Kernel Memory Allocator * Jeff Bonwick (Sun Microsystems). * Presented at: USENIX Summer 1994 Technical Conference * * The memory is organized in caches, one cache for each object type. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct) * Each cache consists out of many slabs (they are small (usually one * page long) and always contiguous), and each slab contains multiple * initialized objects. * * This means, that your constructor is used only for newly allocated * slabs and you must pass objects with the same initializations to * kmem_cache_free. * * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, * normal). If you need a special memory type, then must create a new * cache for that memory type. * * In order to reduce fragmentation, the slabs are sorted in 3 groups: * full slabs with 0 free objects * partial slabs * empty slabs with no allocated objects * * If partial slabs exist, then new allocations come from these slabs, * otherwise from empty slabs or new slabs are allocated. * * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache * during kmem_cache_destroy(). The caller must prevent concurrent allocs. * * Each cache has a short per-cpu head array, most allocs * and frees go into that array, and if that array overflows, then 1/2 * of the entries in the array are given back into the global cache. * The head array is strictly LIFO and should improve the cache hit rates. * On SMP, it additionally reduces the spinlock operations. * * The c_cpuarray may not be read with enabled local interrupts - * it's changed with a smp_call_function(). * * SMP synchronization: * constructors and destructors are called without any locking. * Several members in struct kmem_cache and struct slab never change, they * are accessed without any locking. * The per-cpu arrays are never accessed from the wrong cpu, no locking, * and local interrupts are disabled so slab code is preempt-safe. * The non-constant members are protected with a per-cache irq spinlock. * * Many thanks to Mark Hemment, who wrote another per-cpu slab patch * in 2000 - many ideas in the current implementation are derived from * his patch. * * Further notes from the original documentation: * * 11 April '97. Started multi-threading - markhe * The global cache-chain is protected by the mutex 'slab_mutex'. * The sem is only needed when accessing/extending the cache-chain, which * can never happen inside an interrupt (kmem_cache_create(), * kmem_cache_shrink() and kmem_cache_reap()). * * At present, each engine can be growing a cache. This should be blocked. * * 15 March 2005. NUMA slab allocator. * Shai Fultheim <[email protected]>. * Shobhit Dayal <[email protected]> * Alok N Kataria <[email protected]> * Christoph Lameter <[email protected]> * * Modified the slab allocator to be node aware on NUMA systems. * Each node has its own list of partial, free and full slabs. * All object allocations for a node occur from node specific slab lists. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/poison.h> #include <linux/swap.h> #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/cpuset.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/kallsyms.h> #include <linux/kfence.h> #include <linux/cpu.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/nodemask.h> #include <linux/kmemleak.h> #include <linux/mempolicy.h> #include <linux/mutex.h> #include <linux/fault-inject.h> #include <linux/rtmutex.h> #include <linux/reciprocal_div.h> #include <linux/debugobjects.h> #include <linux/memory.h> #include <linux/prefetch.h> #include <linux/sched/task_stack.h> #include <net/sock.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <trace/events/kmem.h> #include "internal.h" #include "slab.h" /* * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * * STATS - 1 to collect stats for /proc/slabinfo. * 0 for faster, smaller code (especially in the critical paths). * * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) */ #ifdef CONFIG_DEBUG_SLAB #define DEBUG 1 #define STATS 1 #define FORCED_DEBUG 1 #else #define DEBUG 0 #define STATS 0 #define FORCED_DEBUG 0 #endif /* Shouldn't this be in a header file somewhere? */ #define BYTES_PER_WORD sizeof(void *) #define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) #ifndef ARCH_KMALLOC_FLAGS #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #endif #define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) #if FREELIST_BYTE_INDEX typedef unsigned char freelist_idx_t; #else typedef unsigned short freelist_idx_t; #endif #define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) /* * struct array_cache * * Purpose: * - LIFO ordering, to hand out cache-warm objects from _alloc * - reduce the number of linked list operations * - reduce spinlock operations * * The limit is stored in the per-cpu structure to reduce the data cache * footprint. * */ struct array_cache { unsigned int avail; unsigned int limit; unsigned int batchcount; unsigned int touched; void *entry[]; /* * Must have this definition in here for the proper * alignment of array_cache. Also simplifies accessing * the entries. */ }; struct alien_cache { spinlock_t lock; struct array_cache ac; }; /* * Need this for bootstrapping a per node allocator. */ #define NUM_INIT_LISTS (2 * MAX_NUMNODES) static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; #define CACHE_CACHE 0 #define SIZE_NODE (MAX_NUMNODES) static int drain_freelist(struct kmem_cache *cache, struct kmem_cache_node *n, int tofree); static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node, struct list_head *list); static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list); static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); static void cache_reap(struct work_struct *unused); static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, void **list); static inline void fixup_slab_list(struct kmem_cache *cachep, struct kmem_cache_node *n, struct slab *slab, void **list); #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) static void kmem_cache_node_init(struct kmem_cache_node *parent) { INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->total_slabs = 0; parent->free_slabs = 0; parent->shared = NULL; parent->alien = NULL; parent->colour_next = 0; raw_spin_lock_init(&parent->list_lock); parent->free_objects = 0; parent->free_touched = 0; } #define MAKE_LIST(cachep, listp, slab, nodeid) \ do { \ INIT_LIST_HEAD(listp); \ list_splice(&get_node(cachep, nodeid)->slab, listp); \ } while (0) #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ do { \ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000U) #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000U) #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) #define BATCHREFILL_LIMIT 16 /* * Optimization question: fewer reaps means less probability for unnecessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, * which could lock up otherwise freeable slabs. */ #define REAPTIMEOUT_AC (2*HZ) #define REAPTIMEOUT_NODE (4*HZ) #if STATS #define STATS_INC_ACTIVE(x) ((x)->num_active++) #define STATS_DEC_ACTIVE(x) ((x)->num_active--) #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) #define STATS_INC_GROWN(x) ((x)->grown++) #define STATS_ADD_REAPED(x, y) ((x)->reaped += (y)) #define STATS_SET_HIGH(x) \ do { \ if ((x)->num_active > (x)->high_mark) \ (x)->high_mark = (x)->num_active; \ } while (0) #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) #define STATS_SET_FREEABLE(x, i) \ do { \ if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ } while (0) #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) #else #define STATS_INC_ACTIVE(x) do { } while (0) #define STATS_DEC_ACTIVE(x) do { } while (0) #define STATS_INC_ALLOCED(x) do { } while (0) #define STATS_INC_GROWN(x) do { } while (0) #define STATS_ADD_REAPED(x, y) do { (void)(y); } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) #define STATS_INC_ACOVERFLOW(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) #define STATS_INC_FREEHIT(x) do { } while (0) #define STATS_INC_FREEMISS(x) do { } while (0) #endif #if DEBUG /* * memory layout of objects: * 0 : objp * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that * the end of an object is aligned with the end of the real * allocation. Catches writes behind the end of the allocation. * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. * cachep->obj_offset: The real object. * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] * cachep->size - 1* BYTES_PER_WORD: last caller address * [BYTES_PER_WORD long] */ static int obj_offset(struct kmem_cache *cachep) { return cachep->obj_offset; } static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); return (unsigned long long *) (objp + obj_offset(cachep) - sizeof(unsigned long long)); } static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) return (unsigned long long *)(objp + cachep->size - sizeof(unsigned long long) - REDZONE_ALIGN); return (unsigned long long *) (objp + cachep->size - sizeof(unsigned long long)); } static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); return (void **)(objp + cachep->size - BYTES_PER_WORD); } #else #define obj_offset(x) 0 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) #endif /* * Do not go above this order unless 0 objects fit into the slab or * overridden on the command line. */ #define SLAB_MAX_ORDER_HI 1 #define SLAB_MAX_ORDER_LO 0 static int slab_max_order = SLAB_MAX_ORDER_LO; static bool slab_max_order_set __initdata; static inline void *index_to_obj(struct kmem_cache *cache, const struct slab *slab, unsigned int idx) { return slab->s_mem + cache->size * idx; } #define BOOT_CPUCACHE_ENTRIES 1 /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, .size = sizeof(struct kmem_cache), .name = "kmem_cache", }; static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return this_cpu_ptr(cachep->cpu_cache); } /* * Calculate the number of objects and left-over bytes for a given buffer size. */ static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, slab_flags_t flags, size_t *left_over) { unsigned int num; size_t slab_size = PAGE_SIZE << gfporder; /* * The slab management structure can be either off the slab or * on it. For the latter case, the memory allocated for a * slab is used for: * * - @buffer_size bytes for each object * - One freelist_idx_t for each object * * We don't need to consider alignment of freelist because * freelist will be at the end of slab page. The objects will be * at the correct alignment. * * If the slab management structure is off the slab, then the * alignment will already be calculated into the size. Because * the slabs are all pages aligned, the objects will be at the * correct alignment when allocated. */ if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { num = slab_size / buffer_size; *left_over = slab_size % buffer_size; } else { num = slab_size / (buffer_size + sizeof(freelist_idx_t)); *left_over = slab_size % (buffer_size + sizeof(freelist_idx_t)); } return num; } #if DEBUG #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { pr_err("slab error in %s(): cache `%s': %s\n", function, cachep->name, msg); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } #endif /* * By default on NUMA we use alien caches to stage the freeing of * objects allocated from other nodes. This causes massive memory * inefficiencies when using fake NUMA setup to split memory into a * large number of small nodes, so it can be disabled on the command * line */ static int use_alien_caches __read_mostly = 1; static int __init noaliencache_setup(char *s) { use_alien_caches = 0; return 1; } __setup("noaliencache", noaliencache_setup); static int __init slab_max_order_setup(char *str) { get_option(&str, &slab_max_order); slab_max_order = slab_max_order < 0 ? 0 : min(slab_max_order, MAX_ORDER); slab_max_order_set = true; return 1; } __setup("slab_max_order=", slab_max_order_setup); #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). * These take care of doing round robin flushing of alien caches (containing * objects freed on different nodes from which they were allocated) and the * flushing of remote pcps by calling drain_node_pages. */ static DEFINE_PER_CPU(unsigned long, slab_reap_node); static void init_reap_node(int cpu) { per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), node_online_map); } static void next_reap_node(void) { int node = __this_cpu_read(slab_reap_node); node = next_node_in(node, node_online_map); __this_cpu_write(slab_reap_node, node); } #else #define init_reap_node(cpu) do { } while (0) #define next_reap_node(void) do { } while (0) #endif /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz * via the workqueue/eventd. * Add the CPU number into the expiration time to minimize the possibility of * the CPUs getting into lockstep and contending for the global cache chain * lock. */ static void start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); if (reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DEFERRABLE_WORK(reap_work, cache_reap); schedule_delayed_work_on(cpu, reap_work, __round_jiffies_relative(HZ, cpu)); } } static void init_arraycache(struct array_cache *ac, int limit, int batch) { if (ac) { ac->avail = 0; ac->limit = limit; ac->batchcount = batch; ac->touched = 0; } } static struct array_cache *alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) { size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *ac = NULL; ac = kmalloc_node(memsize, gfp, node); /* * The array_cache structures contain pointers to free object. * However, when such objects are allocated or transferred to another * cache the pointers are not cleared and they could be counted as * valid references during a kmemleak scan. Therefore, kmemleak must * not scan such objects. */ kmemleak_no_scan(ac); init_arraycache(ac, entries, batchcount); return ac; } static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, struct slab *slab, void *objp) { struct kmem_cache_node *n; int slab_node; LIST_HEAD(list); slab_node = slab_nid(slab); n = get_node(cachep, slab_node); raw_spin_lock(&n->list_lock); free_block(cachep, &objp, 1, slab_node, &list); raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } /* * Transfer objects in one arraycache to another. * Locking must be handled by the caller. * * Return the number of entries transferred. */ static int transfer_objects(struct array_cache *to, struct array_cache *from, unsigned int max) { /* Figure out how many entries to transfer */ int nr = min3(from->avail, max, to->limit - to->avail); if (!nr) return 0; memcpy(to->entry + to->avail, from->entry + from->avail - nr, sizeof(void *) *nr); from->avail -= nr; to->avail += nr; return nr; } /* &alien->lock must be held by alien callers. */ static __always_inline void __free_one(struct array_cache *ac, void *objp) { /* Avoid trivial double-free. */ if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp)) return; ac->entry[ac->avail++] = objp; } #ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, n) do { } while (0) static inline struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { return NULL; } static inline void free_alien_cache(struct alien_cache **ac_ptr) { } static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { return 0; } static inline gfp_t gfp_exact_node(gfp_t flags) { return flags & ~__GFP_NOFAIL; } #else /* CONFIG_NUMA */ static struct alien_cache *__alloc_alien_cache(int node, int entries, int batch, gfp_t gfp) { size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); struct alien_cache *alc = NULL; alc = kmalloc_node(memsize, gfp, node); if (alc) { kmemleak_no_scan(alc); init_arraycache(&alc->ac, entries, batch); spin_lock_init(&alc->lock); } return alc; } static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { struct alien_cache **alc_ptr; int i; if (limit > 1) limit = 12; alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node); if (!alc_ptr) return NULL; for_each_node(i) { if (i == node || !node_online(i)) continue; alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); if (!alc_ptr[i]) { for (i--; i >= 0; i--) kfree(alc_ptr[i]); kfree(alc_ptr); return NULL; } } return alc_ptr; } static void free_alien_cache(struct alien_cache **alc_ptr) { int i; if (!alc_ptr) return; for_each_node(i) kfree(alc_ptr[i]); kfree(alc_ptr); } static void __drain_alien_cache(struct kmem_cache *cachep, struct array_cache *ac, int node, struct list_head *list) { struct kmem_cache_node *n = get_node(cachep, node); if (ac->avail) { raw_spin_lock(&n->list_lock); /* * Stuff objects into the remote nodes shared array first. * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ if (n->shared) transfer_objects(n->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node, list); ac->avail = 0; raw_spin_unlock(&n->list_lock); } } /* * Called from cache_reap() to regularly drain alien caches round robin. */ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) { int node = __this_cpu_read(slab_reap_node); if (n->alien) { struct alien_cache *alc = n->alien[node]; struct array_cache *ac; if (alc) { ac = &alc->ac; if (ac->avail && spin_trylock_irq(&alc->lock)) { LIST_HEAD(list); __drain_alien_cache(cachep, ac, node, &list); spin_unlock_irq(&alc->lock); slabs_destroy(cachep, &list); } } } } static void drain_alien_cache(struct kmem_cache *cachep, struct alien_cache **alien) { int i = 0; struct alien_cache *alc; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { alc = alien[i]; if (alc) { LIST_HEAD(list); ac = &alc->ac; spin_lock_irqsave(&alc->lock, flags); __drain_alien_cache(cachep, ac, i, &list); spin_unlock_irqrestore(&alc->lock, flags); slabs_destroy(cachep, &list); } } } static int __cache_free_alien(struct kmem_cache *cachep, void *objp, int node, int slab_node) { struct kmem_cache_node *n; struct alien_cache *alien = NULL; struct array_cache *ac; LIST_HEAD(list); n = get_node(cachep, node); STATS_INC_NODEFREES(cachep); if (n->alien && n->alien[slab_node]) { alien = n->alien[slab_node]; ac = &alien->ac; spin_lock(&alien->lock); if (unlikely(ac->avail == ac->limit)) { STATS_INC_ACOVERFLOW(cachep); __drain_alien_cache(cachep, ac, slab_node, &list); } __free_one(ac, objp); spin_unlock(&alien->lock); slabs_destroy(cachep, &list); } else { n = get_node(cachep, slab_node); raw_spin_lock(&n->list_lock); free_block(cachep, &objp, 1, slab_node, &list); raw_spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } return 1; } static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { int slab_node = slab_nid(virt_to_slab(objp)); int node = numa_mem_id(); /* * Make sure we are not freeing an object from another node to the array * cache on this cpu. */ if (likely(node == slab_node)) return 0; return __cache_free_alien(cachep, objp, node, slab_node); } /* * Construct gfp mask to allocate from a specific node but do not reclaim or * warn about failures. */ static inline gfp_t gfp_exact_node(gfp_t flags) { return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~(__GFP_RECLAIM|__GFP_NOFAIL); } #endif static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) { struct kmem_cache_node *n; /* * Set up the kmem_cache_node for cpu before we can * begin anything. Make sure some other cpu on this * node has not already allocated this */ n = get_node(cachep, node); if (n) { raw_spin_lock_irq(&n->list_lock); n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; raw_spin_unlock_irq(&n->list_lock); return 0; } n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); if (!n) return -ENOMEM; kmem_cache_node_init(n); n->next_reap = jiffies + REAPTIMEOUT_NODE + ((unsigned long)cachep) % REAPTIMEOUT_NODE; n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; /* * The kmem_cache_nodes don't come and go as CPUs * come and go. slab_mutex provides sufficient * protection here. */ cachep->node[node] = n; return 0; } #if defined(CONFIG_NUMA) || defined(CONFIG_SMP) /* * Allocates and initializes node for a node on each slab cache, used for * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node * will be allocated off-node since memory is not yet online for the new node. * When hotplugging memory or a cpu, existing nodes are not replaced if * already in use. * * Must hold slab_mutex. */ static int init_cache_node_node(int node) { int ret; struct kmem_cache *cachep; list_for_each_entry(cachep, &slab_caches, list) { ret = init_cache_node(cachep, node, GFP_KERNEL); if (ret) return ret; } return 0; } #endif static int setup_kmem_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp, bool force_change) { int ret = -ENOMEM; struct kmem_cache_node *n; struct array_cache *old_shared = NULL; struct array_cache *new_shared = NULL; struct alien_cache **new_alien = NULL; LIST_HEAD(list); if (use_alien_caches) { new_alien = alloc_alien_cache(node, cachep->limit, gfp); if (!new_alien) goto fail; } if (cachep->shared) { new_shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); if (!new_shared) goto fail; } ret = init_cache_node(cachep, node, gfp); if (ret) goto fail; n = get_node(cachep, node); raw_spin_lock_irq(&n->list_lock); if (n->shared && force_change) { free_block(cachep, n->shared->entry, n->shared->avail, node, &list); n->shared->avail = 0; } if (!n->shared || force_change) { old_shared = n->shared; n->shared = new_shared; new_shared = NULL; } if (!n->alien) { n->alien = new_alien; new_alien = NULL; } raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); /* * To protect lockless access to n->shared during irq disabled context. * If n->shared isn't NULL in irq disabled context, accessing to it is * guaranteed to be valid until irq is re-enabled, because it will be * freed after synchronize_rcu(). */ if (old_shared && force_change) synchronize_rcu(); fail: kfree(old_shared); kfree(new_shared); free_alien_cache(new_alien); return ret; } #ifdef CONFIG_SMP static void cpuup_canceled(long cpu) { struct kmem_cache *cachep; struct kmem_cache_node *n = NULL; int node = cpu_to_mem(cpu); const struct cpumask *mask = cpumask_of_node(node); list_for_each_entry(cachep, &slab_caches, list) { struct array_cache *nc; struct array_cache *shared; struct alien_cache **alien; LIST_HEAD(list); n = get_node(cachep, node); if (!n) continue; raw_spin_lock_irq(&n->list_lock); /* Free limit for this kmem_cache_node */ n->free_limit -= cachep->batchcount; /* cpu is dead; no one can alloc from it. */ nc = per_cpu_ptr(cachep->cpu_cache, cpu); free_block(cachep, nc->entry, nc->avail, node, &list); nc->avail = 0; if (!cpumask_empty(mask)) { raw_spin_unlock_irq(&n->list_lock); goto free_slab; } shared = n->shared; if (shared) { free_block(cachep, shared->entry, shared->avail, node, &list); n->shared = NULL; } alien = n->alien; n->alien = NULL; raw_spin_unlock_irq(&n->list_lock); kfree(shared); if (alien) { drain_alien_cache(cachep, alien); free_alien_cache(alien); } free_slab: slabs_destroy(cachep, &list); } /* * In the previous loop, all the objects were freed to * the respective cache's slabs, now we can go ahead and * shrink each nodelist to its limit. */ list_for_each_entry(cachep, &slab_caches, list) { n = get_node(cachep, node); if (!n) continue; drain_freelist(cachep, n, INT_MAX); } } static int cpuup_prepare(long cpu) { struct kmem_cache *cachep; int node = cpu_to_mem(cpu); int err; /* * We need to do this right in the beginning since * alloc_arraycache's are going to use this list. * kmalloc_node allows us to add the slab to the right * kmem_cache_node and not this cpu's kmem_cache_node */ err = init_cache_node_node(node); if (err < 0) goto bad; /* * Now we can go ahead with allocating the shared arrays and * array caches */ list_for_each_entry(cachep, &slab_caches, list) { err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); if (err) goto bad; } return 0; bad: cpuup_canceled(cpu); return -ENOMEM; } int slab_prepare_cpu(unsigned int cpu) { int err; mutex_lock(&slab_mutex); err = cpuup_prepare(cpu); mutex_unlock(&slab_mutex); return err; } /* * This is called for a failed online attempt and for a successful * offline. * * Even if all the cpus of a node are down, we don't free the * kmem_cache_node of any cache. This is to avoid a race between cpu_down, and * a kmalloc allocation from another cpu for memory from the node of * the cpu going down. The kmem_cache_node structure is usually allocated from * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). */ int slab_dead_cpu(unsigned int cpu) { mutex_lock(&slab_mutex); cpuup_canceled(cpu); mutex_unlock(&slab_mutex); return 0; } #endif static int slab_online_cpu(unsigned int cpu) { start_cpu_timer(cpu); return 0; } static int slab_offline_cpu(unsigned int cpu) { /* * Shutdown cache reaper. Note that the slab_mutex is held so * that if cache_reap() is invoked it cannot do anything * expensive but will only modify reap_work and reschedule the * timer. */ cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); /* Now the cache_reaper is guaranteed to be not running. */ per_cpu(slab_reap_work, cpu).work.func = NULL; return 0; } #if defined(CONFIG_NUMA) /* * Drains freelist for a node on each slab cache, used for memory hot-remove. * Returns -EBUSY if all objects cannot be drained so that the node is not * removed. * * Must hold slab_mutex. */ static int __meminit drain_cache_node_node(int node) { struct kmem_cache *cachep; int ret = 0; list_for_each_entry(cachep, &slab_caches, list) { struct kmem_cache_node *n; n = get_node(cachep, node); if (!n) continue; drain_freelist(cachep, n, INT_MAX); if (!list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial)) { ret = -EBUSY; break; } } return ret; } static int __meminit slab_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mnb = arg; int ret = 0; int nid; nid = mnb->status_change_nid; if (nid < 0) goto out; switch (action) { case MEM_GOING_ONLINE: mutex_lock(&slab_mutex); ret = init_cache_node_node(nid); mutex_unlock(&slab_mutex); break; case MEM_GOING_OFFLINE: mutex_lock(&slab_mutex); ret = drain_cache_node_node(nid); mutex_unlock(&slab_mutex); break; case MEM_ONLINE: case MEM_OFFLINE: case MEM_CANCEL_ONLINE: case MEM_CANCEL_OFFLINE: break; } out: return notifier_from_errno(ret); } #endif /* CONFIG_NUMA */ /* * swap the static kmem_cache_node with kmalloced memory */ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, int nodeid) { struct kmem_cache_node *ptr; ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); BUG_ON(!ptr); memcpy(ptr, list, sizeof(struct kmem_cache_node)); /* * Do not assume that spinlocks can be initialized via memcpy: */ raw_spin_lock_init(&ptr->list_lock); MAKE_ALL_LISTS(cachep, ptr, nodeid); cachep->node[nodeid] = ptr; } /* * For setting up all the kmem_cache_node for cache whose buffer_size is same as * size of kmem_cache_node. */ static void __init set_up_node(struct kmem_cache *cachep, int index) { int node; for_each_online_node(node) { cachep->node[node] = &init_kmem_cache_node[index + node]; cachep->node[node]->next_reap = jiffies + REAPTIMEOUT_NODE + ((unsigned long)cachep) % REAPTIMEOUT_NODE; } } /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). */ void __init kmem_cache_init(void) { int i; kmem_cache = &kmem_cache_boot; if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1) use_alien_caches = 0; for (i = 0; i < NUM_INIT_LISTS; i++) kmem_cache_node_init(&init_kmem_cache_node[i]); /* * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory if * not overridden on the command line. */ if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT) slab_max_order = SLAB_MAX_ORDER_HI; /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: * 1) initialize the kmem_cache cache: it contains the struct * kmem_cache structures of all caches, except kmem_cache itself: * kmem_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_cache_node structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. * 2) Create the first kmalloc cache. * The struct kmem_cache for the new cache is allocated normally. * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. * 4) Replace the __init data head arrays for kmem_cache and the first * kmalloc cache with kmalloc allocated arrays. * 5) Replace the __init data for kmem_cache_node for kmem_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ /* 1) create the kmem_cache */ /* * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ create_boot_cache(kmem_cache, "kmem_cache", offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); slab_state = PARTIAL; /* * Initialize the caches that provide memory for the kmem_cache_node * structures first. Without this, further allocations will bug. */ new_kmalloc_cache(INDEX_NODE, KMALLOC_NORMAL, ARCH_KMALLOC_FLAGS); slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); /* 5) Replace the bootstrap kmem_cache_node */ { int nid; for_each_online_node(nid) { init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); init_list(kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE], &init_kmem_cache_node[SIZE_NODE + nid], nid); } } create_kmalloc_caches(ARCH_KMALLOC_FLAGS); } void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; /* 6) resize the head arrays to their final sizes */ mutex_lock(&slab_mutex); list_for_each_entry(cachep, &slab_caches, list) if (enable_cpucache(cachep, GFP_NOWAIT)) BUG(); mutex_unlock(&slab_mutex); /* Done! */ slab_state = FULL; #ifdef CONFIG_NUMA /* * Register a memory hotplug callback that initializes and frees * node. */ hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); #endif /* * The reap timers are started later, with a module init call: That part * of the kernel is not yet operational. */ } static int __init cpucache_init(void) { int ret; /* * Register the timers that return unneeded pages to the page allocator */ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", slab_online_cpu, slab_offline_cpu); WARN_ON(ret < 0); return 0; } __initcall(cpucache_init); static noinline void slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) { #if DEBUG struct kmem_cache_node *n; unsigned long flags; int node; static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) return; pr_warn("SLAB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n", nodeid, gfpflags, &gfpflags); pr_warn(" cache: %s, object size: %d, order: %d\n", cachep->name, cachep->size, cachep->gfporder); for_each_kmem_cache_node(cachep, node, n) { unsigned long total_slabs, free_slabs, free_objs; raw_spin_lock_irqsave(&n->list_lock, flags); total_slabs = n->total_slabs; free_slabs = n->free_slabs; free_objs = n->free_objects; raw_spin_unlock_irqrestore(&n->list_lock, flags); pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", node, total_slabs - free_slabs, total_slabs, (total_slabs * cachep->num) - free_objs, total_slabs * cachep->num); } #endif } /* * Interface to system's page allocator. No need to hold the * kmem_cache_node ->list_lock. * * If we requested dmaable memory, we will get it. Even if we * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct folio *folio; struct slab *slab; flags |= cachep->allocflags; folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder); if (!folio) { slab_out_of_memory(cachep, flags, nodeid); return NULL; } slab = folio_slab(folio); account_slab(slab, cachep->gfporder, cachep, flags); __folio_set_slab(folio); /* Make the flag visible before any changes to folio->mapping */ smp_wmb(); /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ if (sk_memalloc_socks() && folio_is_pfmemalloc(folio)) slab_set_pfmemalloc(slab); return slab; } /* * Interface to system's page release. */ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) { int order = cachep->gfporder; struct folio *folio = slab_folio(slab); BUG_ON(!folio_test_slab(folio)); __slab_clear_pfmemalloc(slab); page_mapcount_reset(&folio->page); folio->mapping = NULL; /* Make the mapping reset visible before clearing the flag */ smp_wmb(); __folio_clear_slab(folio); mm_account_reclaimed_pages(1 << order); unaccount_slab(slab, order, cachep); __free_pages(&folio->page, order); } static void kmem_rcu_free(struct rcu_head *head) { struct kmem_cache *cachep; struct slab *slab; slab = container_of(head, struct slab, rcu_head); cachep = slab->slab_cache; kmem_freepages(cachep, slab); } #if DEBUG static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep) { return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && ((cachep->size % PAGE_SIZE) == 0); } #ifdef CONFIG_DEBUG_PAGEALLOC static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) { if (!is_debug_pagealloc_cache(cachep)) return; __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); } #else static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) {} #endif static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { int size = cachep->object_size; addr = &((char *)addr)[obj_offset(cachep)]; memset(addr, val, size); *(unsigned char *)(addr + size - 1) = POISON_END; } static void dump_line(char *data, int offset, int limit) { int i; unsigned char error = 0; int bad_count = 0; pr_err("%03x: ", offset); for (i = 0; i < limit; i++) { if (data[offset + i] != POISON_FREE) { error = data[offset + i]; bad_count++; } } print_hex_dump(KERN_CONT, "", 0, 16, 1, &data[offset], limit, 1); if (bad_count == 1) { error ^= POISON_FREE; if (!(error & (error - 1))) { pr_err("Single bit error detected. Probably bad RAM.\n"); #ifdef CONFIG_X86 pr_err("Run memtest86+ or a similar memory test tool.\n"); #else pr_err("Run a memory test tool.\n"); #endif } } } #endif #if DEBUG static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) { int i, size; char *realobj; if (cachep->flags & SLAB_RED_ZONE) { pr_err("Redzone: 0x%llx/0x%llx\n", *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } if (cachep->flags & SLAB_STORE_USER) pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); realobj = (char *)objp + obj_offset(cachep); size = cachep->object_size; for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; if (i + limit > size) limit = size - i; dump_line(realobj, i, limit); } } static void check_poison_obj(struct kmem_cache *cachep, void *objp) { char *realobj; int size, i; int lines = 0; if (is_debug_pagealloc_cache(cachep)) return; realobj = (char *)objp + obj_offset(cachep); size = cachep->object_size; for (i = 0; i < size; i++) { char exp = POISON_FREE; if (i == size - 1) exp = POISON_END; if (realobj[i] != exp) { int limit; /* Mismatch ! */ /* Print header */ if (lines == 0) { pr_err("Slab corruption (%s): %s start=%px, len=%d\n", print_tainted(), cachep->name, realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ i = (i / 16) * 16; limit = 16; if (i + limit > size) limit = size - i; dump_line(realobj, i, limit); i += 16; lines++; /* Limit to 5 lines */ if (lines > 5) break; } } if (lines != 0) { /* Print some data about the neighboring objects, if they * exist: */ struct slab *slab = virt_to_slab(objp); unsigned int objnr; objnr = obj_to_index(cachep, slab, objp); if (objnr) { objp = index_to_obj(cachep, slab, objnr - 1); realobj = (char *)objp + obj_offset(cachep); pr_err("Prev obj: start=%px, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } if (objnr + 1 < cachep->num) { objp = index_to_obj(cachep, slab, objnr + 1); realobj = (char *)objp + obj_offset(cachep); pr_err("Next obj: start=%px, len=%d\n", realobj, size); print_objinfo(cachep, objp, 2); } } } #endif #if DEBUG static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab) { int i; if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { poison_obj(cachep, slab->freelist - obj_offset(cachep), POISON_FREE); } for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slab, i); if (cachep->flags & SLAB_POISON) { check_poison_obj(cachep, objp); slab_kernel_map(cachep, objp, 1); } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "start of a freed object was overwritten"); if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "end of a freed object was overwritten"); } } } #else static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab) { } #endif /** * slab_destroy - destroy and release all objects in a slab * @cachep: cache pointer being destroyed * @slab: slab being destroyed * * Destroy all the objs in a slab, and release the mem back to the system. * Before calling the slab must have been unlinked from the cache. The * kmem_cache_node ->list_lock is not held/needed. */ static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) { void *freelist; freelist = slab->freelist; slab_destroy_debugcheck(cachep, slab); if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) call_rcu(&slab->rcu_head, kmem_rcu_free); else kmem_freepages(cachep, slab); /* * From now on, we don't use freelist * although actual page can be freed in rcu context */ if (OFF_SLAB(cachep)) kfree(freelist); } /* * Update the size of the caches before calling slabs_destroy as it may * recursively call kfree. */ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) { struct slab *slab, *n; list_for_each_entry_safe(slab, n, list, slab_list) { list_del(&slab->slab_list); slab_destroy(cachep, slab); } } /** * calculate_slab_order - calculate size (page order) of slabs * @cachep: pointer to the cache that is being created * @size: size of objects to be created in this cache. * @flags: slab allocation flags * * Also calculates the number of objects per slab. * * This could be made much more intelligent. For now, try to avoid using * high order pages for slabs. When the gfp() functions are more friendly * towards high-order requests, this should be changed. * * Return: number of left-over bytes in a slab */ static size_t calculate_slab_order(struct kmem_cache *cachep, size_t size, slab_flags_t flags) { size_t left_over = 0; int gfporder; for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { unsigned int num; size_t remainder; num = cache_estimate(gfporder, size, flags, &remainder); if (!num) continue; /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ if (num > SLAB_OBJ_MAX_NUM) break; if (flags & CFLGS_OFF_SLAB) { struct kmem_cache *freelist_cache; size_t freelist_size; size_t freelist_cache_size; freelist_size = num * sizeof(freelist_idx_t); if (freelist_size > KMALLOC_MAX_CACHE_SIZE) { freelist_cache_size = PAGE_SIZE << get_order(freelist_size); } else { freelist_cache = kmalloc_slab(freelist_size, 0u, _RET_IP_); if (!freelist_cache) continue; freelist_cache_size = freelist_cache->size; /* * Needed to avoid possible looping condition * in cache_grow_begin() */ if (OFF_SLAB(freelist_cache)) continue; } /* check if off slab has enough benefit */ if (freelist_cache_size > cachep->size / 2) continue; } /* Found something acceptable - save it away */ cachep->num = num; cachep->gfporder = gfporder; left_over = remainder; /* * A VFS-reclaimable slab tends to have most allocations * as GFP_NOFS and we really don't want to have to be allocating * higher-order pages when we are unable to shrink dcache. */ if (flags & SLAB_RECLAIM_ACCOUNT) break; /* * Large number of objects is good, but very large slabs are * currently bad for the gfp()s. */ if (gfporder >= slab_max_order) break; /* * Acceptable internal fragmentation? */ if (left_over * 8 <= (PAGE_SIZE << gfporder)) break; } return left_over; } static struct array_cache __percpu *alloc_kmem_cache_cpus( struct kmem_cache *cachep, int entries, int batchcount) { int cpu; size_t size; struct array_cache __percpu *cpu_cache; size = sizeof(void *) * entries + sizeof(struct array_cache); cpu_cache = __alloc_percpu(size, sizeof(void *)); if (!cpu_cache) return NULL; for_each_possible_cpu(cpu) { init_arraycache(per_cpu_ptr(cpu_cache, cpu), entries, batchcount); } return cpu_cache; } static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { if (slab_state >= FULL) return enable_cpucache(cachep, gfp); cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); if (!cachep->cpu_cache) return 1; if (slab_state == DOWN) { /* Creation of first cache (kmem_cache). */ set_up_node(kmem_cache, CACHE_CACHE); } else if (slab_state == PARTIAL) { /* For kmem_cache_node */ set_up_node(cachep, SIZE_NODE); } else { int node; for_each_online_node(node) { cachep->node[node] = kmalloc_node( sizeof(struct kmem_cache_node), gfp, node); BUG_ON(!cachep->node[node]); kmem_cache_node_init(cachep->node[node]); } } cachep->node[numa_mem_id()]->next_reap = jiffies + REAPTIMEOUT_NODE + ((unsigned long)cachep) % REAPTIMEOUT_NODE; cpu_cache_get(cachep)->avail = 0; cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; cpu_cache_get(cachep)->batchcount = 1; cpu_cache_get(cachep)->touched = 0; cachep->batchcount = 1; cachep->limit = BOOT_CPUCACHE_ENTRIES; return 0; } slab_flags_t kmem_cache_flags(unsigned int object_size, slab_flags_t flags, const char *name) { return flags; } struct kmem_cache * __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)) { struct kmem_cache *cachep; cachep = find_mergeable(size, align, flags, name, ctor); if (cachep) { cachep->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ cachep->object_size = max_t(int, cachep->object_size, size); } return cachep; } static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, size_t size, slab_flags_t flags) { size_t left; cachep->num = 0; /* * If slab auto-initialization on free is enabled, store the freelist * off-slab, so that its contents don't end up in one of the allocated * objects. */ if (unlikely(slab_want_init_on_free(cachep))) return false; if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) return false; left = calculate_slab_order(cachep, size, flags | CFLGS_OBJFREELIST_SLAB); if (!cachep->num) return false; if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) return false; cachep->colour = left / cachep->colour_off; return true; } static bool set_off_slab_cache(struct kmem_cache *cachep, size_t size, slab_flags_t flags) { size_t left; cachep->num = 0; /* * Always use on-slab management when SLAB_NOLEAKTRACE * to avoid recursive calls into kmemleak. */ if (flags & SLAB_NOLEAKTRACE) return false; /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). */ left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); if (!cachep->num) return false; /* * If the slab has been placed off-slab, and we have enough space then * move it on-slab. This is at the expense of any extra colouring. */ if (left >= cachep->num * sizeof(freelist_idx_t)) return false; cachep->colour = left / cachep->colour_off; return true; } static bool set_on_slab_cache(struct kmem_cache *cachep, size_t size, slab_flags_t flags) { size_t left; cachep->num = 0; left = calculate_slab_order(cachep, size, flags); if (!cachep->num) return false; cachep->colour = left / cachep->colour_off; return true; } /* * __kmem_cache_create - Create a cache. * @cachep: cache management descriptor * @flags: SLAB flags * * Returns zero on success, nonzero on failure. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) { size_t ralign = BYTES_PER_WORD; gfp_t gfp; int err; unsigned int size = cachep->size; #if DEBUG #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with * large objects, if the increased size would increase the object size * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */ if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + 2 * sizeof(unsigned long long))) flags |= SLAB_RED_ZONE | SLAB_STORE_USER; if (!(flags & SLAB_TYPESAFE_BY_RCU)) flags |= SLAB_POISON; #endif #endif /* * Check that size is in terms of words. This is needed to avoid * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. */ size = ALIGN(size, BYTES_PER_WORD); if (flags & SLAB_RED_ZONE) { ralign = REDZONE_ALIGN; /* If redzoning, ensure that the second redzone is suitably * aligned, by adjusting the object size accordingly. */ size = ALIGN(size, REDZONE_ALIGN); } /* 3) caller mandated alignment */ if (ralign < cachep->align) { ralign = cachep->align; } /* disable debug if necessary */ if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. */ cachep->align = ralign; cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ if (cachep->colour_off < cachep->align) cachep->colour_off = cachep->align; if (slab_is_available()) gfp = GFP_KERNEL; else gfp = GFP_NOWAIT; #if DEBUG /* * Both debugging options require word-alignment which is calculated * into align above. */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ cachep->obj_offset += sizeof(unsigned long long); size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of * the real object. But if the second red zone needs to be * aligned to 64 bits, we must allow that much space. */ if (flags & SLAB_RED_ZONE) size += REDZONE_ALIGN; else size += BYTES_PER_WORD; } #endif kasan_cache_create(cachep, &size, &flags); size = ALIGN(size, cachep->align); /* * We should restrict the number of objects in a slab to implement * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. */ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); #if DEBUG /* * To activate debug pagealloc, off-slab management is necessary * requirement. In early phase of initialization, small sized slab * doesn't get initialized so it would not be possible. So, we need * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && size >= 256 && cachep->object_size > cache_line_size()) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { size_t tmp_size = ALIGN(size, PAGE_SIZE); if (set_off_slab_cache(cachep, tmp_size, flags)) { flags |= CFLGS_OFF_SLAB; cachep->obj_offset += tmp_size - size; size = tmp_size; goto done; } } } #endif if (set_objfreelist_slab_cache(cachep, size, flags)) { flags |= CFLGS_OBJFREELIST_SLAB; goto done; } if (set_off_slab_cache(cachep, size, flags)) { flags |= CFLGS_OFF_SLAB; goto done; } if (set_on_slab_cache(cachep, size, flags)) goto done; return -E2BIG; done: cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); cachep->flags = flags; cachep->allocflags = __GFP_COMP; if (flags & SLAB_CACHE_DMA) cachep->allocflags |= GFP_DMA; if (flags & SLAB_CACHE_DMA32) cachep->allocflags |= GFP_DMA32; if (flags & SLAB_RECLAIM_ACCOUNT) cachep->allocflags |= __GFP_RECLAIMABLE; cachep->size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); #if DEBUG /* * If we're going to use the generic kernel_map_pages() * poisoning, then it's going to smash the contents of * the redzone and userword anyhow, so switch them off. */ if (IS_ENABLED(CONFIG_PAGE_POISONING) && (cachep->flags & SLAB_POISON) && is_debug_pagealloc_cache(cachep)) cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); #endif err = setup_cpu_cache(cachep, gfp); if (err) { __kmem_cache_release(cachep); return err; } return 0; } #if DEBUG static void check_irq_off(void) { BUG_ON(!irqs_disabled()); } static void check_irq_on(void) { BUG_ON(irqs_disabled()); } static void check_mutex_acquired(void) { BUG_ON(!mutex_is_locked(&slab_mutex)); } static void check_spinlock_acquired(struct kmem_cache *cachep) { #ifdef CONFIG_SMP check_irq_off(); assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); #endif } static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) { #ifdef CONFIG_SMP check_irq_off(); assert_raw_spin_locked(&get_node(cachep, node)->list_lock); #endif } #else #define check_irq_off() do { } while(0) #define check_irq_on() do { } while(0) #define check_mutex_acquired() do { } while(0) #define check_spinlock_acquired(x) do { } while(0) #define check_spinlock_acquired_node(x, y) do { } while(0) #endif static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, int node, bool free_all, struct list_head *list) { int tofree; if (!ac || !ac->avail) return; tofree = free_all ? ac->avail : (ac->limit + 4) / 5; if (tofree > ac->avail) tofree = (ac->avail + 1) / 2; free_block(cachep, ac->entry, tofree, node, list); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void *) * ac->avail); } static void do_drain(void *arg) { struct kmem_cache *cachep = arg; struct array_cache *ac; int node = numa_mem_id(); struct kmem_cache_node *n; LIST_HEAD(list); check_irq_off(); ac = cpu_cache_get(cachep); n = get_node(cachep, node); raw_spin_lock(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); raw_spin_unlock(&n->list_lock); ac->avail = 0; slabs_destroy(cachep, &list); } static void drain_cpu_caches(struct kmem_cache *cachep) { struct kmem_cache_node *n; int node; LIST_HEAD(list); on_each_cpu(do_drain, cachep, 1); check_irq_on(); for_each_kmem_cache_node(cachep, node, n) if (n->alien) drain_alien_cache(cachep, n->alien); for_each_kmem_cache_node(cachep, node, n) { raw_spin_lock_irq(&n->list_lock); drain_array_locked(cachep, n->shared, node, true, &list); raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } } /* * Remove slabs from the list of free slabs. * Specify the number of slabs to drain in tofree. * * Returns the actual number of slabs released. */ static int drain_freelist(struct kmem_cache *cache, struct kmem_cache_node *n, int tofree) { struct list_head *p; int nr_freed; struct slab *slab; nr_freed = 0; while (nr_freed < tofree && !list_empty(&n->slabs_free)) { raw_spin_lock_irq(&n->list_lock); p = n->slabs_free.prev; if (p == &n->slabs_free) { raw_spin_unlock_irq(&n->list_lock); goto out; } slab = list_entry(p, struct slab, slab_list); list_del(&slab->slab_list); n->free_slabs--; n->total_slabs--; /* * Safe to drop the lock. The slab is no longer linked * to the cache. */ n->free_objects -= cache->num; raw_spin_unlock_irq(&n->list_lock); slab_destroy(cache, slab); nr_freed++; cond_resched(); } out: return nr_freed; } bool __kmem_cache_empty(struct kmem_cache *s) { int node; struct kmem_cache_node *n; for_each_kmem_cache_node(s, node, n) if (!list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial)) return false; return true; } int __kmem_cache_shrink(struct kmem_cache *cachep) { int ret = 0; int node; struct kmem_cache_node *n; drain_cpu_caches(cachep); check_irq_on(); for_each_kmem_cache_node(cachep, node, n) { drain_freelist(cachep, n, INT_MAX); ret += !list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial); } return (ret ? 1 : 0); } int __kmem_cache_shutdown(struct kmem_cache *cachep) { return __kmem_cache_shrink(cachep); } void __kmem_cache_release(struct kmem_cache *cachep) { int i; struct kmem_cache_node *n; cache_random_seq_destroy(cachep); free_percpu(cachep->cpu_cache); /* NUMA: free the node structures */ for_each_kmem_cache_node(cachep, i, n) { kfree(n->shared); free_alien_cache(n->alien); kfree(n); cachep->node[i] = NULL; } } /* * Get the memory for a slab management obj. * * For a slab cache when the slab descriptor is off-slab, the * slab descriptor can't come from the same cache which is being created, * Because if it is the case, that means we defer the creation of * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. * And we eventually call down to __kmem_cache_create(), which * in turn looks up in the kmalloc_{dma,}_caches for the desired-size one. * This is a "chicken-and-egg" problem. * * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, * which are all initialized during kmem_cache_init(). */ static void *alloc_slabmgmt(struct kmem_cache *cachep, struct slab *slab, int colour_off, gfp_t local_flags, int nodeid) { void *freelist; void *addr = slab_address(slab); slab->s_mem = addr + colour_off; slab->active = 0; if (OBJFREELIST_SLAB(cachep)) freelist = NULL; else if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ freelist = kmalloc_node(cachep->freelist_size, local_flags, nodeid); } else { /* We will use last bytes at the slab for freelist */ freelist = addr + (PAGE_SIZE << cachep->gfporder) - cachep->freelist_size; } return freelist; } static inline freelist_idx_t get_free_obj(struct slab *slab, unsigned int idx) { return ((freelist_idx_t *) slab->freelist)[idx]; } static inline void set_free_obj(struct slab *slab, unsigned int idx, freelist_idx_t val) { ((freelist_idx_t *)(slab->freelist))[idx] = val; } static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) { #if DEBUG int i; for (i = 0; i < cachep->num; i++) { void *objp = index_to_obj(cachep, slab, i); if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = NULL; if (cachep->flags & SLAB_RED_ZONE) { *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } /* * Constructors are not allowed to allocate memory from the same * cache which they are a constructor for. Otherwise, deadlock. * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { kasan_unpoison_object_data(cachep, objp + obj_offset(cachep)); cachep->ctor(objp + obj_offset(cachep)); kasan_poison_object_data( cachep, objp + obj_offset(cachep)); } if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the end of an object"); if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the start of an object"); } /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) { poison_obj(cachep, objp, POISON_FREE); slab_kernel_map(cachep, objp, 0); } } #endif } #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Hold information during a freelist initialization */ struct freelist_init_state { unsigned int pos; unsigned int *list; unsigned int count; }; /* * Initialize the state based on the randomization method available. * return true if the pre-computed list is available, false otherwise. */ static bool freelist_state_initialize(struct freelist_init_state *state, struct kmem_cache *cachep, unsigned int count) { bool ret; if (!cachep->random_seq) { ret = false; } else { state->list = cachep->random_seq; state->count = count; state->pos = get_random_u32_below(count); ret = true; } return ret; } /* Get the next entry on the list and randomize it using a random shift */ static freelist_idx_t next_random_slot(struct freelist_init_state *state) { if (state->pos >= state->count) state->pos = 0; return state->list[state->pos++]; } /* Swap two freelist entries */ static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) { swap(((freelist_idx_t *) slab->freelist)[a], ((freelist_idx_t *) slab->freelist)[b]); } /* * Shuffle the freelist initialization state based on pre-computed lists. * return true if the list was successfully shuffled, false otherwise. */ static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) { unsigned int objfreelist = 0, i, rand, count = cachep->num; struct freelist_init_state state; bool precomputed; if (count < 2) return false; precomputed = freelist_state_initialize(&state, cachep, count); /* Take a random entry as the objfreelist */ if (OBJFREELIST_SLAB(cachep)) { if (!precomputed) objfreelist = count - 1; else objfreelist = next_random_slot(&state); slab->freelist = index_to_obj(cachep, slab, objfreelist) + obj_offset(cachep); count--; } /* * On early boot, generate the list dynamically. * Later use a pre-computed list for speed. */ if (!precomputed) { for (i = 0; i < count; i++) set_free_obj(slab, i, i); /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { rand = get_random_u32_below(i + 1); swap_free_obj(slab, i, rand); } } else { for (i = 0; i < count; i++) set_free_obj(slab, i, next_random_slot(&state)); } if (OBJFREELIST_SLAB(cachep)) set_free_obj(slab, cachep->num - 1, objfreelist); return true; } #else static inline bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) { return false; } #endif /* CONFIG_SLAB_FREELIST_RANDOM */ static void cache_init_objs(struct kmem_cache *cachep, struct slab *slab) { int i; void *objp; bool shuffled; cache_init_objs_debug(cachep, slab); /* Try to randomize the freelist if enabled */ shuffled = shuffle_freelist(cachep, slab); if (!shuffled && OBJFREELIST_SLAB(cachep)) { slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) + obj_offset(cachep); } for (i = 0; i < cachep->num; i++) { objp = index_to_obj(cachep, slab, i); objp = kasan_init_slab_obj(cachep, objp); /* constructor could break poison info */ if (DEBUG == 0 && cachep->ctor) { kasan_unpoison_object_data(cachep, objp); cachep->ctor(objp); kasan_poison_object_data(cachep, objp); } if (!shuffled) set_free_obj(slab, i, i); } } static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab) { void *objp; objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active)); slab->active++; return objp; } static void slab_put_obj(struct kmem_cache *cachep, struct slab *slab, void *objp) { unsigned int objnr = obj_to_index(cachep, slab, objp); #if DEBUG unsigned int i; /* Verify double free bug */ for (i = slab->active; i < cachep->num; i++) { if (get_free_obj(slab, i) == objnr) { pr_err("slab: double free detected in cache '%s', objp %px\n", cachep->name, objp); BUG(); } } #endif slab->active--; if (!slab->freelist) slab->freelist = objp + obj_offset(cachep); set_free_obj(slab, slab->active, objnr); } /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ static struct slab *cache_grow_begin(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *freelist; size_t offset; gfp_t local_flags; int slab_node; struct kmem_cache_node *n; struct slab *slab; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ if (unlikely(flags & GFP_SLAB_BUG_MASK)) flags = kmalloc_fix_flags(flags); WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); check_irq_off(); if (gfpflags_allow_blocking(local_flags)) local_irq_enable(); /* * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. */ slab = kmem_getpages(cachep, local_flags, nodeid); if (!slab) goto failed; slab_node = slab_nid(slab); n = get_node(cachep, slab_node); /* Get colour for the slab, and cal the next value. */ n->colour_next++; if (n->colour_next >= cachep->colour) n->colour_next = 0; offset = n->colour_next; if (offset >= cachep->colour) offset = 0; offset *= cachep->colour_off; /* * Call kasan_poison_slab() before calling alloc_slabmgmt(), so * page_address() in the latter returns a non-tagged pointer, * as it should be for slab pages. */ kasan_poison_slab(slab); /* Get slab management. */ freelist = alloc_slabmgmt(cachep, slab, offset, local_flags & ~GFP_CONSTRAINT_MASK, slab_node); if (OFF_SLAB(cachep) && !freelist) goto opps1; slab->slab_cache = cachep; slab->freelist = freelist; cache_init_objs(cachep, slab); if (gfpflags_allow_blocking(local_flags)) local_irq_disable(); return slab; opps1: kmem_freepages(cachep, slab); failed: if (gfpflags_allow_blocking(local_flags)) local_irq_disable(); return NULL; } static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab) { struct kmem_cache_node *n; void *list = NULL; check_irq_off(); if (!slab) return; INIT_LIST_HEAD(&slab->slab_list); n = get_node(cachep, slab_nid(slab)); raw_spin_lock(&n->list_lock); n->total_slabs++; if (!slab->active) { list_add_tail(&slab->slab_list, &n->slabs_free); n->free_slabs++; } else fixup_slab_list(cachep, n, slab, &list); STATS_INC_GROWN(cachep); n->free_objects += cachep->num - slab->active; raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); } #if DEBUG /* * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking */ static void kfree_debugcheck(const void *objp) { if (!virt_addr_valid(objp)) { pr_err("kfree_debugcheck: out of range ptr %lxh\n", (unsigned long)objp); BUG(); } } static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) { unsigned long long redzone1, redzone2; redzone1 = *dbg_redzone1(cache, obj); redzone2 = *dbg_redzone2(cache, obj); /* * Redzone is ok. */ if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) return; if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) slab_error(cache, "double free detected"); else slab_error(cache, "memory outside object was overwritten"); pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", obj, redzone1, redzone2); } static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, unsigned long caller) { unsigned int objnr; struct slab *slab; BUG_ON(virt_to_cache(objp) != cachep); objp -= obj_offset(cachep); kfree_debugcheck(objp); slab = virt_to_slab(objp); if (cachep->flags & SLAB_RED_ZONE) { verify_redzone_free(cachep, objp); *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = (void *)caller; objnr = obj_to_index(cachep, slab, objp); BUG_ON(objnr >= cachep->num); BUG_ON(objp != index_to_obj(cachep, slab, objnr)); if (cachep->flags & SLAB_POISON) { poison_obj(cachep, objp, POISON_FREE); slab_kernel_map(cachep, objp, 0); } return objp; } #else #define kfree_debugcheck(x) do { } while(0) #define cache_free_debugcheck(x, objp, z) (objp) #endif static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, void **list) { #if DEBUG void *next = *list; void *objp; while (next) { objp = next - obj_offset(cachep); next = *(void **)next; poison_obj(cachep, objp, POISON_FREE); } #endif } static inline void fixup_slab_list(struct kmem_cache *cachep, struct kmem_cache_node *n, struct slab *slab, void **list) { /* move slabp to correct slabp list: */ list_del(&slab->slab_list); if (slab->active == cachep->num) { list_add(&slab->slab_list, &n->slabs_full); if (OBJFREELIST_SLAB(cachep)) { #if DEBUG /* Poisoning will be done without holding the lock */ if (cachep->flags & SLAB_POISON) { void **objp = slab->freelist; *objp = *list; *list = objp; } #endif slab->freelist = NULL; } } else list_add(&slab->slab_list, &n->slabs_partial); } /* Try to find non-pfmemalloc slab if needed */ static noinline struct slab *get_valid_first_slab(struct kmem_cache_node *n, struct slab *slab, bool pfmemalloc) { if (!slab) return NULL; if (pfmemalloc) return slab; if (!slab_test_pfmemalloc(slab)) return slab; /* No need to keep pfmemalloc slab if we have enough free objects */ if (n->free_objects > n->free_limit) { slab_clear_pfmemalloc(slab); return slab; } /* Move pfmemalloc slab to the end of list to speed up next search */ list_del(&slab->slab_list); if (!slab->active) { list_add_tail(&slab->slab_list, &n->slabs_free); n->free_slabs++; } else list_add_tail(&slab->slab_list, &n->slabs_partial); list_for_each_entry(slab, &n->slabs_partial, slab_list) { if (!slab_test_pfmemalloc(slab)) return slab; } n->free_touched = 1; list_for_each_entry(slab, &n->slabs_free, slab_list) { if (!slab_test_pfmemalloc(slab)) { n->free_slabs--; return slab; } } return NULL; } static struct slab *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc) { struct slab *slab; assert_raw_spin_locked(&n->list_lock); slab = list_first_entry_or_null(&n->slabs_partial, struct slab, slab_list); if (!slab) { n->free_touched = 1; slab = list_first_entry_or_null(&n->slabs_free, struct slab, slab_list); if (slab) n->free_slabs--; } if (sk_memalloc_socks()) slab = get_valid_first_slab(n, slab, pfmemalloc); return slab; } static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, struct kmem_cache_node *n, gfp_t flags) { struct slab *slab; void *obj; void *list = NULL; if (!gfp_pfmemalloc_allowed(flags)) return NULL; raw_spin_lock(&n->list_lock); slab = get_first_slab(n, true); if (!slab) { raw_spin_unlock(&n->list_lock); return NULL; } obj = slab_get_obj(cachep, slab); n->free_objects--; fixup_slab_list(cachep, n, slab, &list); raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); return obj; } /* * Slab list should be fixed up by fixup_slab_list() for existing slab * or cache_grow_end() for new slab */ static __always_inline int alloc_block(struct kmem_cache *cachep, struct array_cache *ac, struct slab *slab, int batchcount) { /* * There must be at least one object available for * allocation. */ BUG_ON(slab->active >= cachep->num); while (slab->active < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); ac->entry[ac->avail++] = slab_get_obj(cachep, slab); } return batchcount; } static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_cache_node *n; struct array_cache *ac, *shared; int node; void *list = NULL; struct slab *slab; check_irq_off(); node = numa_mem_id(); ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* * If there was little recent activity on this cache, then * perform only a partial refill. Otherwise we could generate * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } n = get_node(cachep, node); BUG_ON(ac->avail > 0 || !n); shared = READ_ONCE(n->shared); if (!n->free_objects && (!shared || !shared->avail)) goto direct_grow; raw_spin_lock(&n->list_lock); shared = READ_ONCE(n->shared); /* See if we can refill from the shared array */ if (shared && transfer_objects(ac, shared, batchcount)) { shared->touched = 1; goto alloc_done; } while (batchcount > 0) { /* Get slab alloc is to come from. */ slab = get_first_slab(n, false); if (!slab) goto must_grow; check_spinlock_acquired(cachep); batchcount = alloc_block(cachep, ac, slab, batchcount); fixup_slab_list(cachep, n, slab, &list); } must_grow: n->free_objects -= ac->avail; alloc_done: raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); direct_grow: if (unlikely(!ac->avail)) { /* Check if we can use obj in pfmemalloc slab */ if (sk_memalloc_socks()) { void *obj = cache_alloc_pfmemalloc(cachep, n, flags); if (obj) return obj; } slab = cache_grow_begin(cachep, gfp_exact_node(flags), node); /* * cache_grow_begin() can reenable interrupts, * then ac could change. */ ac = cpu_cache_get(cachep); if (!ac->avail && slab) alloc_block(cachep, ac, slab, batchcount); cache_grow_end(cachep, slab); if (!ac->avail) return NULL; } ac->touched = 1; return ac->entry[--ac->avail]; } #if DEBUG static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags, void *objp, unsigned long caller) { WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); if (!objp || is_kfence_address(objp)) return objp; if (cachep->flags & SLAB_POISON) { check_poison_obj(cachep, objp); slab_kernel_map(cachep, objp, 1); poison_obj(cachep, objp, POISON_INUSE); } if (cachep->flags & SLAB_STORE_USER) *dbg_userword(cachep, objp) = (void *)caller; if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { slab_error(cachep, "double free, or memory outside object was overwritten"); pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) cachep->ctor(objp); if ((unsigned long)objp & (arch_slab_minalign() - 1)) { pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp, arch_slab_minalign()); } return objp; } #else #define cache_alloc_debugcheck_after(a, b, objp, d) (objp) #endif static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; struct array_cache *ac; check_irq_off(); ac = cpu_cache_get(cachep); if (likely(ac->avail)) { ac->touched = 1; objp = ac->entry[--ac->avail]; STATS_INC_ALLOCHIT(cachep); goto out; } STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); /* * the 'ac' may be updated by cache_alloc_refill(), * and kmemleak_erase() requires its correct value. */ ac = cpu_cache_get(cachep); out: /* * To avoid a false negative, if an object that is in one of the * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ if (objp) kmemleak_erase(&ac->entry[ac->avail]); return objp; } #ifdef CONFIG_NUMA static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); /* * Try allocating on another node if PFA_SPREAD_SLAB is a mempolicy is set. * * If we are in_interrupt, then process context, including cpusets and * mempolicy, may not apply and should not be used for allocation policy. */ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { int nid_alloc, nid_here; if (in_interrupt() || (flags & __GFP_THISNODE)) return NULL; nid_alloc = nid_here = numa_mem_id(); if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) nid_alloc = cpuset_slab_spread_node(); else if (current->mempolicy) nid_alloc = mempolicy_slab_node(); if (nid_alloc != nid_here) return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; } /* * Fallback function if there was no memory available and no objects on a * certain node and fall back is permitted. First we scan all the * available node for available objects. If that fails then we * perform an allocation without specifying a node. This allows the page * allocator to do its reclaim / fallback magic. We then insert the * slab into the proper nodelist and then allocate from it. */ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) { struct zonelist *zonelist; struct zoneref *z; struct zone *zone; enum zone_type highest_zoneidx = gfp_zone(flags); void *obj = NULL; struct slab *slab; int nid; unsigned int cpuset_mems_cookie; if (flags & __GFP_THISNODE) return NULL; retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); zonelist = node_zonelist(mempolicy_slab_node(), flags); retry: /* * Look through allowed nodes for objects available * from existing per node queues. */ for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { nid = zone_to_nid(zone); if (cpuset_zone_allowed(zone, flags) && get_node(cache, nid) && get_node(cache, nid)->free_objects) { obj = ____cache_alloc_node(cache, gfp_exact_node(flags), nid); if (obj) break; } } if (!obj) { /* * This allocation will be performed within the constraints * of the current cpuset / memory policy requirements. * We may trigger various forms of reclaim on the allowed * set and go into memory reserves if necessary. */ slab = cache_grow_begin(cache, flags, numa_mem_id()); cache_grow_end(cache, slab); if (slab) { nid = slab_nid(slab); obj = ____cache_alloc_node(cache, gfp_exact_node(flags), nid); /* * Another processor may allocate the objects in * the slab since we are not holding any locks. */ if (!obj) goto retry; } } if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return obj; } /* * An interface to enable slab creation on nodeid */ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { struct slab *slab; struct kmem_cache_node *n; void *obj = NULL; void *list = NULL; VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES); n = get_node(cachep, nodeid); BUG_ON(!n); check_irq_off(); raw_spin_lock(&n->list_lock); slab = get_first_slab(n, false); if (!slab) goto must_grow; check_spinlock_acquired_node(cachep, nodeid); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); BUG_ON(slab->active == cachep->num); obj = slab_get_obj(cachep, slab); n->free_objects--; fixup_slab_list(cachep, n, slab, &list); raw_spin_unlock(&n->list_lock); fixup_objfreelist_debug(cachep, &list); return obj; must_grow: raw_spin_unlock(&n->list_lock); slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); if (slab) { /* This slab isn't counted yet so don't update free_objects */ obj = slab_get_obj(cachep, slab); } cache_grow_end(cachep, slab); return obj ? obj : fallback_alloc(cachep, flags); } static __always_inline void * __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *objp = NULL; int slab_node = numa_mem_id(); if (nodeid == NUMA_NO_NODE) { if (current->mempolicy || cpuset_do_slab_mem_spread()) { objp = alternate_node_alloc(cachep, flags); if (objp) goto out; } /* * Use the locally cached objects if possible. * However ____cache_alloc does not allow fallback * to other nodes. It may fail while we still have * objects on other nodes available. */ objp = ____cache_alloc(cachep, flags); nodeid = slab_node; } else if (nodeid == slab_node) { objp = ____cache_alloc(cachep, flags); } else if (!get_node(cachep, nodeid)) { /* Node not bootstrapped yet */ objp = fallback_alloc(cachep, flags); goto out; } /* * We may just have run out of memory on the local node. * ____cache_alloc_node() knows how to locate memory on other nodes */ if (!objp) objp = ____cache_alloc_node(cachep, flags, nodeid); out: return objp; } #else static __always_inline void * __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused) { return ____cache_alloc(cachep, flags); } #endif /* CONFIG_NUMA */ static __always_inline void * slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, int nodeid, size_t orig_size, unsigned long caller) { unsigned long save_flags; void *objp; struct obj_cgroup *objcg = NULL; bool init = false; flags &= gfp_allowed_mask; cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags); if (unlikely(!cachep)) return NULL; objp = kfence_alloc(cachep, orig_size, flags); if (unlikely(objp)) goto out; local_irq_save(save_flags); objp = __do_cache_alloc(cachep, flags, nodeid); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); init = slab_want_init_on_alloc(flags, cachep); out: slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init, cachep->object_size); return objp; } static __always_inline void * slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, size_t orig_size, unsigned long caller) { return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size, caller); } /* * Caller needs to acquire correct kmem_cache_node's list_lock * @list: List of detached free slabs should be freed by caller */ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, int node, struct list_head *list) { int i; struct kmem_cache_node *n = get_node(cachep, node); struct slab *slab; n->free_objects += nr_objects; for (i = 0; i < nr_objects; i++) { void *objp; struct slab *slab; objp = objpp[i]; slab = virt_to_slab(objp); list_del(&slab->slab_list); check_spinlock_acquired_node(cachep, node); slab_put_obj(cachep, slab, objp); STATS_DEC_ACTIVE(cachep); /* fixup slab chains */ if (slab->active == 0) { list_add(&slab->slab_list, &n->slabs_free); n->free_slabs++; } else { /* Unconditionally move a slab to the end of the * partial list on free - maximum time for the * other objects to be freed, too. */ list_add_tail(&slab->slab_list, &n->slabs_partial); } } while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) { n->free_objects -= cachep->num; slab = list_last_entry(&n->slabs_free, struct slab, slab_list); list_move(&slab->slab_list, list); n->free_slabs--; n->total_slabs--; } } static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) { int batchcount; struct kmem_cache_node *n; int node = numa_mem_id(); LIST_HEAD(list); batchcount = ac->batchcount; check_irq_off(); n = get_node(cachep, node); raw_spin_lock(&n->list_lock); if (n->shared) { struct array_cache *shared_array = n->shared; int max = shared_array->limit - shared_array->avail; if (max) { if (batchcount > max) batchcount = max; memcpy(&(shared_array->entry[shared_array->avail]), ac->entry, sizeof(void *) * batchcount); shared_array->avail += batchcount; goto free_done; } } free_block(cachep, ac->entry, batchcount, node, &list); free_done: #if STATS { int i = 0; struct slab *slab; list_for_each_entry(slab, &n->slabs_free, slab_list) { BUG_ON(slab->active); i++; } STATS_SET_FREEABLE(cachep, i); } #endif raw_spin_unlock(&n->list_lock); ac->avail -= batchcount; memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); slabs_destroy(cachep, &list); } /* * Release an obj back to its cache. If the obj has a constructed state, it must * be in this state _before_ it is released. Called with disabled ints. */ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { bool init; memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); __kfence_free(objp); return; } /* * As memory initialization might be integrated into KASAN, * kasan_slab_free and initialization memset must be * kept together to avoid discrepancies in behavior. */ init = slab_want_init_on_free(cachep); if (init && !kasan_has_integrated_init()) memset(objp, 0, cachep->object_size); /* KASAN might put objp into memory quarantine, delaying its reuse. */ if (kasan_slab_free(cachep, objp, init)) return; /* Use KCSAN to help debug racy use-after-free. */ if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) __kcsan_check_access(objp, cachep->object_size, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT); ___cache_free(cachep, objp, caller); } void ___cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, caller); /* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which * is per page memory reference) to get nodeid. Instead use a global * variable to skip the call, which is mostly likely to be present in * the cache. */ if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) return; if (ac->avail < ac->limit) { STATS_INC_FREEHIT(cachep); } else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); } if (sk_memalloc_socks()) { struct slab *slab = virt_to_slab(objp); if (unlikely(slab_test_pfmemalloc(slab))) { cache_free_pfmemalloc(cachep, slab, objp); return; } } __free_one(ac, objp); } static __always_inline void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags) { void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, NUMA_NO_NODE); return ret; } void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { return __kmem_cache_alloc_lru(cachep, NULL, flags); } EXPORT_SYMBOL(kmem_cache_alloc); void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags) { return __kmem_cache_alloc_lru(cachep, lru, flags); } EXPORT_SYMBOL(kmem_cache_alloc_lru); static __always_inline void cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p, unsigned long caller) { size_t i; for (i = 0; i < size; i++) p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller); } int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct obj_cgroup *objcg = NULL; unsigned long irqflags; size_t i; s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); if (!s) return 0; local_irq_save(irqflags); for (i = 0; i < size; i++) { void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags, NUMA_NO_NODE); if (unlikely(!objp)) goto error; p[i] = objp; } local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); /* * memcg and kmem_cache debug support and memory initialization. * Done outside of the IRQ disabled section. */ slab_post_alloc_hook(s, objcg, flags, size, p, slab_want_init_on_alloc(flags, s), s->object_size); /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); kmem_cache_free_bulk(s, i, p); return 0; } EXPORT_SYMBOL(kmem_cache_alloc_bulk); /** * kmem_cache_alloc_node - Allocate an object on the specified node * @cachep: The cache to allocate from. * @flags: See kmalloc(). * @nodeid: node number of the target node. * * Identical to kmem_cache_alloc but it will allocate memory on the given * node, which can improve the performance for cpu bound structures. * * Fallback to other node is possible if __GFP_THISNODE is not set. * * Return: pointer to the new object or %NULL in case of error */ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, nodeid); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node); void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_size, unsigned long caller) { return slab_alloc_node(cachep, NULL, flags, nodeid, orig_size, caller); } #ifdef CONFIG_PRINTK void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) { struct kmem_cache *cachep; unsigned int objnr; void *objp; kpp->kp_ptr = object; kpp->kp_slab = slab; cachep = slab->slab_cache; kpp->kp_slab_cache = cachep; objp = object - obj_offset(cachep); kpp->kp_data_offset = obj_offset(cachep); slab = virt_to_slab(objp); objnr = obj_to_index(cachep, slab, objp); objp = index_to_obj(cachep, slab, objnr); kpp->kp_objp = objp; if (DEBUG && cachep->flags & SLAB_STORE_USER) kpp->kp_ret = *dbg_userword(cachep, objp); } #endif static __always_inline void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { unsigned long flags; local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); __cache_free(cachep, objp, caller); local_irq_restore(flags); } void __kmem_cache_free(struct kmem_cache *cachep, void *objp, unsigned long caller) { __do_kmem_cache_free(cachep, objp, caller); } /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. * @objp: The previously allocated object. * * Free an object which was previously allocated from this * cache. */ void kmem_cache_free(struct kmem_cache *cachep, void *objp) { cachep = cache_from_obj(cachep, objp); if (!cachep) return; trace_kmem_cache_free(_RET_IP_, objp, cachep); __do_kmem_cache_free(cachep, objp, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) { unsigned long flags; local_irq_save(flags); for (int i = 0; i < size; i++) { void *objp = p[i]; struct kmem_cache *s; if (!orig_s) { struct folio *folio = virt_to_folio(objp); /* called via kfree_bulk */ if (!folio_test_slab(folio)) { local_irq_restore(flags); free_large_kmalloc(folio, objp); local_irq_save(flags); continue; } s = folio_slab(folio)->slab_cache; } else { s = cache_from_obj(orig_s, objp); } if (!s) continue; debug_check_no_locks_freed(objp, s->object_size); if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, s->object_size); __cache_free(s, objp, _RET_IP_); } local_irq_restore(flags); /* FIXME: add tracing */ } EXPORT_SYMBOL(kmem_cache_free_bulk); /* * This initializes kmem_cache_node or resizes various caches for all nodes. */ static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) { int ret; int node; struct kmem_cache_node *n; for_each_online_node(node) { ret = setup_kmem_cache_node(cachep, node, gfp, true); if (ret) goto fail; } return 0; fail: if (!cachep->list.next) { /* Cache is not active yet. Roll back what we did */ node--; while (node >= 0) { n = get_node(cachep, node); if (n) { kfree(n->shared); free_alien_cache(n->alien); kfree(n); cachep->node[node] = NULL; } node--; } } return -ENOMEM; } /* Always called with the slab_mutex held */ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { struct array_cache __percpu *cpu_cache, *prev; int cpu; cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); if (!cpu_cache) return -ENOMEM; prev = cachep->cpu_cache; cachep->cpu_cache = cpu_cache; /* * Without a previous cpu_cache there's no need to synchronize remote * cpus, so skip the IPIs. */ if (prev) kick_all_cpus_sync(); check_irq_on(); cachep->batchcount = batchcount; cachep->limit = limit; cachep->shared = shared; if (!prev) goto setup_node; for_each_online_cpu(cpu) { LIST_HEAD(list); int node; struct kmem_cache_node *n; struct array_cache *ac = per_cpu_ptr(prev, cpu); node = cpu_to_mem(cpu); n = get_node(cachep, node); raw_spin_lock_irq(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } free_percpu(prev); setup_node: return setup_kmem_cache_nodes(cachep, gfp); } /* Called with slab_mutex held always */ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { int err; int limit = 0; int shared = 0; int batchcount = 0; err = cache_random_seq_create(cachep, cachep->num, gfp); if (err) goto end; /* * The head array serves three purposes: * - create a LIFO ordering, i.e. return objects that are cache-warm * - reduce the number of spinlock operations. * - reduce the number of linked list operations on the slab and * bufctl chains: array operations are cheaper. * The numbers are guessed, we should auto-tune as described by * Bonwick. */ if (cachep->size > 131072) limit = 1; else if (cachep->size > PAGE_SIZE) limit = 8; else if (cachep->size > 1024) limit = 24; else if (cachep->size > 256) limit = 54; else limit = 120; /* * CPU bound tasks (e.g. network routing) can exhibit cpu bound * allocation behaviour: Most allocs on one cpu, most free operations * on another cpu. For these cases, an efficient object passing between * cpus is necessary. This is provided by a shared array. The array * replaces Bonwick's magazine layer. * On uniprocessor, it's functionally equivalent (but less efficient) * to a larger limit. Thus disabled by default. */ shared = 0; if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) shared = 8; #if DEBUG /* * With debugging enabled, large batchcount lead to excessively long * periods with disabled local interrupts. Limit the batchcount */ if (limit > 32) limit = 32; #endif batchcount = (limit + 1) / 2; err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); end: if (err) pr_err("enable_cpucache failed for %s, error %d\n", cachep->name, -err); return err; } /* * Drain an array if it contains any elements taking the node lock only if * necessary. Note that the node listlock also protects the array_cache * if drain_array() is used on the shared array. */ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, struct array_cache *ac, int node) { LIST_HEAD(list); /* ac from n->shared can be freed if we don't hold the slab_mutex. */ check_mutex_acquired(); if (!ac || !ac->avail) return; if (ac->touched) { ac->touched = 0; return; } raw_spin_lock_irq(&n->list_lock); drain_array_locked(cachep, ac, node, false, &list); raw_spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); } /** * cache_reap - Reclaim memory from caches. * @w: work descriptor * * Called from workqueue/eventd every few seconds. * Purpose: * - clear the per-cpu caches for this CPU. * - return freeable pages to the main free memory pool. * * If we cannot acquire the cache chain mutex then just give up - we'll try * again on the next iteration. */ static void cache_reap(struct work_struct *w) { struct kmem_cache *searchp; struct kmem_cache_node *n; int node = numa_mem_id(); struct delayed_work *work = to_delayed_work(w); if (!mutex_trylock(&slab_mutex)) /* Give up. Setup the next iteration. */ goto out; list_for_each_entry(searchp, &slab_caches, list) { check_irq_on(); /* * We only take the node lock if absolutely necessary and we * have established with reasonable certainty that * we can do some work if the lock was obtained. */ n = get_node(searchp, node); reap_alien(searchp, n); drain_array(searchp, n, cpu_cache_get(searchp), node); /* * These are racy checks but it does not matter * if we skip one check or scan twice. */ if (time_after(n->next_reap, jiffies)) goto next; n->next_reap = jiffies + REAPTIMEOUT_NODE; drain_array(searchp, n, n->shared, node); if (n->free_touched) n->free_touched = 0; else { int freed; freed = drain_freelist(searchp, n, (n->free_limit + 5 * searchp->num - 1) / (5 * searchp->num)); STATS_ADD_REAPED(searchp, freed); } next: cond_resched(); } check_irq_on(); mutex_unlock(&slab_mutex); next_reap_node(); out: /* Set up the next iteration */ schedule_delayed_work_on(smp_processor_id(), work, round_jiffies_relative(REAPTIMEOUT_AC)); } void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) { unsigned long active_objs, num_objs, active_slabs; unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0; unsigned long free_slabs = 0; int node; struct kmem_cache_node *n; for_each_kmem_cache_node(cachep, node, n) { check_irq_on(); raw_spin_lock_irq(&n->list_lock); total_slabs += n->total_slabs; free_slabs += n->free_slabs; free_objs += n->free_objects; if (n->shared) shared_avail += n->shared->avail; raw_spin_unlock_irq(&n->list_lock); } num_objs = total_slabs * cachep->num; active_slabs = total_slabs - free_slabs; active_objs = num_objs - free_objs; sinfo->active_objs = active_objs; sinfo->num_objs = num_objs; sinfo->active_slabs = active_slabs; sinfo->num_slabs = total_slabs; sinfo->shared_avail = shared_avail; sinfo->limit = cachep->limit; sinfo->batchcount = cachep->batchcount; sinfo->shared = cachep->shared; sinfo->objects_per_slab = cachep->num; sinfo->cache_order = cachep->gfporder; } void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) { #if STATS { /* node stats */ unsigned long high = cachep->high_mark; unsigned long allocs = cachep->num_allocations; unsigned long grown = cachep->grown; unsigned long reaped = cachep->reaped; unsigned long errors = cachep->errors; unsigned long max_freeable = cachep->max_freeable; unsigned long node_allocs = cachep->node_allocs; unsigned long node_frees = cachep->node_frees; unsigned long overflows = cachep->node_overflow; seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees, overflows); } /* cpu stats */ { unsigned long allochit = atomic_read(&cachep->allochit); unsigned long allocmiss = atomic_read(&cachep->allocmiss); unsigned long freehit = atomic_read(&cachep->freehit); unsigned long freemiss = atomic_read(&cachep->freemiss); seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", allochit, allocmiss, freehit, freemiss); } #endif } #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator * @file: unused * @buffer: user buffer * @count: data length * @ppos: unused * * Return: %0 on success, negative error code otherwise. */ ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; int limit, batchcount, shared, res; struct kmem_cache *cachep; if (count > MAX_SLABINFO_WRITE) return -EINVAL; if (copy_from_user(&kbuf, buffer, count)) return -EFAULT; kbuf[MAX_SLABINFO_WRITE] = '\0'; tmp = strchr(kbuf, ' '); if (!tmp) return -EINVAL; *tmp = '\0'; tmp++; if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3) return -EINVAL; /* Find the cache in the chain of caches. */ mutex_lock(&slab_mutex); res = -EINVAL; list_for_each_entry(cachep, &slab_caches, list) { if (!strcmp(cachep->name, kbuf)) { if (limit < 1 || batchcount < 1 || batchcount > limit || shared < 0) { res = 0; } else { res = do_tune_cpucache(cachep, limit, batchcount, shared, GFP_KERNEL); } break; } } mutex_unlock(&slab_mutex); if (res >= 0) res = count; return res; } #ifdef CONFIG_HARDENED_USERCOPY /* * Rejects incorrectly sized objects and objects that are to be copied * to/from userspace but do not fall entirely within the containing slab * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. */ void __check_heap_object(const void *ptr, unsigned long n, const struct slab *slab, bool to_user) { struct kmem_cache *cachep; unsigned int objnr; unsigned long offset; ptr = kasan_reset_tag(ptr); /* Find and validate object. */ cachep = slab->slab_cache; objnr = obj_to_index(cachep, slab, (void *)ptr); BUG_ON(objnr >= cachep->num); /* Find offset within object. */ if (is_kfence_address(ptr)) offset = ptr - kfence_object_start(ptr); else offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep); /* Allow address range falling entirely within usercopy region. */ if (offset >= cachep->useroffset && offset - cachep->useroffset <= cachep->usersize && n <= cachep->useroffset - offset + cachep->usersize) return; usercopy_abort("SLAB object", cachep->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */
linux-master
mm/slab.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mincore.c * * Copyright (C) 1994-2006 Linus Torvalds */ /* * The mincore() system call. */ #include <linux/pagemap.h> #include <linux/gfp.h> #include <linux/pagewalk.h> #include <linux/mman.h> #include <linux/syscalls.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/shmem_fs.h> #include <linux/hugetlb.h> #include <linux/pgtable.h> #include <linux/uaccess.h> #include "swap.h" static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { #ifdef CONFIG_HUGETLB_PAGE unsigned char present; unsigned char *vec = walk->private; /* * Hugepages under user process are always in RAM and never * swapped out, but theoretically it needs to be checked. */ present = pte && !huge_pte_none_mostly(huge_ptep_get(pte)); for (; addr != end; vec++, addr += PAGE_SIZE) *vec = present; walk->private = vec; #else BUG(); #endif return 0; } /* * Later we can get more picky about what "in core" means precisely. * For now, simply check to see if the page is in the page cache, * and is up to date; i.e. that no page-in operation would be required * at this time if an application were to map and access this page. */ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) { unsigned char present = 0; struct folio *folio; /* * When tmpfs swaps out a page from a file, any process mapping that * file will not get a swp_entry_t in its pte, but rather it is like * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. */ folio = filemap_get_incore_folio(mapping, index); if (!IS_ERR(folio)) { present = folio_test_uptodate(folio); folio_put(folio); } return present; } static int __mincore_unmapped_range(unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec) { unsigned long nr = (end - addr) >> PAGE_SHIFT; int i; if (vma->vm_file) { pgoff_t pgoff; pgoff = linear_page_index(vma, addr); for (i = 0; i < nr; i++, pgoff++) vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); } else { for (i = 0; i < nr; i++) vec[i] = 0; } return nr; } static int mincore_unmapped_range(unsigned long addr, unsigned long end, __always_unused int depth, struct mm_walk *walk) { walk->private += __mincore_unmapped_range(addr, end, walk->vma, walk->private); return 0; } static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { spinlock_t *ptl; struct vm_area_struct *vma = walk->vma; pte_t *ptep; unsigned char *vec = walk->private; int nr = (end - addr) >> PAGE_SHIFT; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { memset(vec, 1, nr); spin_unlock(ptl); goto out; } ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!ptep) { walk->action = ACTION_AGAIN; return 0; } for (; addr != end; ptep++, addr += PAGE_SIZE) { pte_t pte = ptep_get(ptep); /* We need to do cache lookup too for pte markers */ if (pte_none_mostly(pte)) __mincore_unmapped_range(addr, addr + PAGE_SIZE, vma, vec); else if (pte_present(pte)) *vec = 1; else { /* pte is a swap entry */ swp_entry_t entry = pte_to_swp_entry(pte); if (non_swap_entry(entry)) { /* * migration or hwpoison entries are always * uptodate */ *vec = 1; } else { #ifdef CONFIG_SWAP *vec = mincore_page(swap_address_space(entry), swp_offset(entry)); #else WARN_ON(1); *vec = 1; #endif } } vec++; } pte_unmap_unlock(ptep - 1, ptl); out: walk->private += nr; cond_resched(); return 0; } static inline bool can_do_mincore(struct vm_area_struct *vma) { if (vma_is_anonymous(vma)) return true; if (!vma->vm_file) return false; /* * Reveal pagecache information only for non-anonymous mappings that * correspond to the files the calling process could (if tried) open * for writing; otherwise we'd be including shared non-exclusive * mappings, which opens a side channel. */ return inode_owner_or_capable(&nop_mnt_idmap, file_inode(vma->vm_file)) || file_permission(vma->vm_file, MAY_WRITE) == 0; } static const struct mm_walk_ops mincore_walk_ops = { .pmd_entry = mincore_pte_range, .pte_hole = mincore_unmapped_range, .hugetlb_entry = mincore_hugetlb, .walk_lock = PGWALK_RDLOCK, }; /* * Do a chunk of "sys_mincore()". We've already checked * all the arguments, we hold the mmap semaphore: we should * just return the amount of info we're asked for. */ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) { struct vm_area_struct *vma; unsigned long end; int err; vma = vma_lookup(current->mm, addr); if (!vma) return -ENOMEM; end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); if (!can_do_mincore(vma)) { unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); memset(vec, 1, pages); return pages; } err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); if (err < 0) return err; return (end - addr) >> PAGE_SHIFT; } /* * The mincore(2) system call. * * mincore() returns the memory residency status of the pages in the * current process's address space specified by [addr, addr + len). * The status is returned in a vector of bytes. The least significant * bit of each byte is 1 if the referenced page is in memory, otherwise * it is zero. * * Because the status of a page can change after mincore() checks it * but before it returns to the application, the returned vector may * contain stale information. Only locked pages are guaranteed to * remain in memory. * * return values: * zero - success * -EFAULT - vec points to an illegal address * -EINVAL - addr is not a multiple of PAGE_SIZE * -ENOMEM - Addresses in the range [addr, addr + len] are * invalid for the address space of this process, or * specify one or more pages which are not currently * mapped * -EAGAIN - A kernel resource was temporarily unavailable. */ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec) { long retval; unsigned long pages; unsigned char *tmp; start = untagged_addr(start); /* Check the start address: needs to be page-aligned.. */ if (start & ~PAGE_MASK) return -EINVAL; /* ..and we need to be passed a valid user-space range */ if (!access_ok((void __user *) start, len)) return -ENOMEM; /* This also avoids any overflows on PAGE_ALIGN */ pages = len >> PAGE_SHIFT; pages += (offset_in_page(len)) != 0; if (!access_ok(vec, pages)) return -EFAULT; tmp = (void *) __get_free_page(GFP_USER); if (!tmp) return -EAGAIN; retval = 0; while (pages) { /* * Do at most PAGE_SIZE entries per iteration, due to * the temporary buffer size. */ mmap_read_lock(current->mm); retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); mmap_read_unlock(current->mm); if (retval <= 0) break; if (copy_to_user(vec, tmp, retval)) { retval = -EFAULT; break; } pages -= retval; vec += retval; start += retval << PAGE_SHIFT; retval = 0; } free_page((unsigned long) tmp); return retval; }
linux-master
mm/mincore.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu.c - percpu memory allocator * * Copyright (C) 2009 SUSE Linux Products GmbH * Copyright (C) 2009 Tejun Heo <[email protected]> * * Copyright (C) 2017 Facebook Inc. * Copyright (C) 2017 Dennis Zhou <[email protected]> * * The percpu allocator handles both static and dynamic areas. Percpu * areas are allocated in chunks which are divided into units. There is * a 1-to-1 mapping for units to possible cpus. These units are grouped * based on NUMA properties of the machine. * * c0 c1 c2 * ------------------- ------------------- ------------ * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u * ------------------- ...... ------------------- .... ------------ * * Allocation is done by offsets into a unit's address space. Ie., an * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear * and even sparse. Access is handled by configuring percpu base * registers according to the cpu to unit mappings and offsetting the * base address using pcpu_unit_size. * * There is special consideration for the first chunk which must handle * the static percpu variables in the kernel image as allocation services * are not online yet. In short, the first chunk is structured like so: * * <Static | [Reserved] | Dynamic> * * The static data is copied from the original section managed by the * linker. The reserved section, if non-zero, primarily manages static * percpu variables from kernel modules. Finally, the dynamic section * takes care of normal allocations. * * The allocator organizes chunks into lists according to free size and * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT * flag should be passed. All memcg-aware allocations are sharing one set * of chunks and all unaccounted allocations and allocations performed * by processes belonging to the root memory cgroup are using the second set. * * The allocator tries to allocate from the fullest chunk first. Each chunk * is managed by a bitmap with metadata blocks. The allocation map is updated * on every allocation and free to reflect the current state while the boundary * map is only updated on allocation. Each metadata block contains * information to help mitigate the need to iterate over large portions * of the bitmap. The reverse mapping from page to chunk is stored in * the page's index. Lastly, units are lazily backed and grow in unison. * * There is a unique conversion that goes on here between bytes and bits. * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk * tracks the number of pages it is responsible for in nr_pages. Helper * functions are used to convert from between the bytes, bits, and blocks. * All hints are managed in bits unless explicitly stated. * * To use this allocator, arch code should do the following: * * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate * regular address to percpu pointer and back if they need to be * different from the default * * - use pcpu_setup_first_chunk() during percpu area initialization to * setup the first chunk containing the kernel static percpu area */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitmap.h> #include <linux/cpumask.h> #include <linux/memblock.h> #include <linux/err.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/percpu.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/memcontrol.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/io.h> #define CREATE_TRACE_POINTS #include <trace/events/percpu.h> #include "percpu-internal.h" /* * The slots are sorted by the size of the biggest continuous free area. * 1-31 bytes share the same slot. */ #define PCPU_SLOT_BASE_SHIFT 5 /* chunks in slots below this are subject to being sidelined on failed alloc */ #define PCPU_SLOT_FAIL_THRESHOLD 3 #define PCPU_EMPTY_POP_PAGES_LOW 2 #define PCPU_EMPTY_POP_PAGES_HIGH 4 #ifdef CONFIG_SMP /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ #ifndef __addr_to_pcpu_ptr #define __addr_to_pcpu_ptr(addr) \ (void __percpu *)((unsigned long)(addr) - \ (unsigned long)pcpu_base_addr + \ (unsigned long)__per_cpu_start) #endif #ifndef __pcpu_ptr_to_addr #define __pcpu_ptr_to_addr(ptr) \ (void __force *)((unsigned long)(ptr) + \ (unsigned long)pcpu_base_addr - \ (unsigned long)__per_cpu_start) #endif #else /* CONFIG_SMP */ /* on UP, it's always identity mapped */ #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ static int pcpu_unit_pages __ro_after_init; static int pcpu_unit_size __ro_after_init; static int pcpu_nr_units __ro_after_init; static int pcpu_atom_size __ro_after_init; int pcpu_nr_slots __ro_after_init; static int pcpu_free_slot __ro_after_init; int pcpu_sidelined_slot __ro_after_init; int pcpu_to_depopulate_slot __ro_after_init; static size_t pcpu_chunk_struct_size __ro_after_init; /* cpus with the lowest and highest unit addresses */ static unsigned int pcpu_low_unit_cpu __ro_after_init; static unsigned int pcpu_high_unit_cpu __ro_after_init; /* the address of the first chunk which starts with the kernel static area */ void *pcpu_base_addr __ro_after_init; static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ /* group information, used for vm allocation */ static int pcpu_nr_groups __ro_after_init; static const unsigned long *pcpu_group_offsets __ro_after_init; static const size_t *pcpu_group_sizes __ro_after_init; /* * The first chunk which always exists. Note that unlike other * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ struct pcpu_chunk *pcpu_first_chunk __ro_after_init; /* * Optional reserved chunk. This chunk reserves part of the first * chunk and serves it for reserved allocations. When the reserved * region doesn't exist, the following variable is NULL. */ struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ /* * The number of empty populated pages, protected by pcpu_lock. * The reserved chunk doesn't contribute to the count. */ int pcpu_nr_empty_pop_pages; /* * The number of populated pages in use by the allocator, protected by * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets * allocated/deallocated, it is allocated/deallocated in all units of a chunk * and increments/decrements this count by 1). */ static unsigned long pcpu_nr_populated; /* * Balance work is used to populate or destroy chunks asynchronously. We * try to keep the number of populated free pages between * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one * empty chunk. */ static void pcpu_balance_workfn(struct work_struct *work); static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); static bool pcpu_async_enabled __read_mostly; static bool pcpu_atomic_alloc_failed; static void pcpu_schedule_balance_work(void) { if (pcpu_async_enabled) schedule_work(&pcpu_balance_work); } /** * pcpu_addr_in_chunk - check if the address is served from this chunk * @chunk: chunk of interest * @addr: percpu address * * RETURNS: * True if the address is served from this chunk. */ static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) { void *start_addr, *end_addr; if (!chunk) return false; start_addr = chunk->base_addr + chunk->start_offset; end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - chunk->end_offset; return addr >= start_addr && addr < end_addr; } static int __pcpu_size_to_slot(int size) { int highbit = fls(size); /* size is in bytes */ return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); } static int pcpu_size_to_slot(int size) { if (size == pcpu_unit_size) return pcpu_free_slot; return __pcpu_size_to_slot(size); } static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) { const struct pcpu_block_md *chunk_md = &chunk->chunk_md; if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || chunk_md->contig_hint == 0) return 0; return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); } /* set the pointer to a chunk in a page struct */ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) { page->index = (unsigned long)pcpu; } /* obtain pointer to a chunk from a page struct */ static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) { return (struct pcpu_chunk *)page->index; } static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) { return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; } static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) { return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); } static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { return (unsigned long)chunk->base_addr + pcpu_unit_page_offset(cpu, page_idx); } /* * The following are helper functions to help access bitmaps and convert * between bitmap offsets to address offsets. */ static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) { return chunk->alloc_map + (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); } static unsigned long pcpu_off_to_block_index(int off) { return off / PCPU_BITMAP_BLOCK_BITS; } static unsigned long pcpu_off_to_block_off(int off) { return off & (PCPU_BITMAP_BLOCK_BITS - 1); } static unsigned long pcpu_block_off_to_off(int index, int off) { return index * PCPU_BITMAP_BLOCK_BITS + off; } /** * pcpu_check_block_hint - check against the contig hint * @block: block of interest * @bits: size of allocation * @align: alignment of area (max PAGE_SIZE) * * Check to see if the allocation can fit in the block's contig hint. * Note, a chunk uses the same hints as a block so this can also check against * the chunk's contig hint. */ static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, size_t align) { int bit_off = ALIGN(block->contig_hint_start, align) - block->contig_hint_start; return bit_off + bits <= block->contig_hint; } /* * pcpu_next_hint - determine which hint to use * @block: block of interest * @alloc_bits: size of allocation * * This determines if we should scan based on the scan_hint or first_free. * In general, we want to scan from first_free to fulfill allocations by * first fit. However, if we know a scan_hint at position scan_hint_start * cannot fulfill an allocation, we can begin scanning from there knowing * the contig_hint will be our fallback. */ static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) { /* * The three conditions below determine if we can skip past the * scan_hint. First, does the scan hint exist. Second, is the * contig_hint after the scan_hint (possibly not true iff * contig_hint == scan_hint). Third, is the allocation request * larger than the scan_hint. */ if (block->scan_hint && block->contig_hint_start > block->scan_hint_start && alloc_bits > block->scan_hint) return block->scan_hint_start + block->scan_hint; return block->first_free; } /** * pcpu_next_md_free_region - finds the next hint free area * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of free area * * Helper function for pcpu_for_each_md_free_region. It checks * block->contig_hint and performs aggregation across blocks to find the * next hint. It modifies bit_off and bits in-place to be consumed in the * loop. */ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, int *bits) { int i = pcpu_off_to_block_index(*bit_off); int block_off = pcpu_off_to_block_off(*bit_off); struct pcpu_block_md *block; *bits = 0; for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); block++, i++) { /* handles contig area across blocks */ if (*bits) { *bits += block->left_free; if (block->left_free == PCPU_BITMAP_BLOCK_BITS) continue; return; } /* * This checks three things. First is there a contig_hint to * check. Second, have we checked this hint before by * comparing the block_off. Third, is this the same as the * right contig hint. In the last case, it spills over into * the next block and should be handled by the contig area * across blocks code. */ *bits = block->contig_hint; if (*bits && block->contig_hint_start >= block_off && *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { *bit_off = pcpu_block_off_to_off(i, block->contig_hint_start); return; } /* reset to satisfy the second predicate above */ block_off = 0; *bits = block->right_free; *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; } } /** * pcpu_next_fit_region - finds fit areas for a given allocation request * @chunk: chunk of interest * @alloc_bits: size of allocation * @align: alignment of area (max PAGE_SIZE) * @bit_off: chunk offset * @bits: size of free area * * Finds the next free region that is viable for use with a given size and * alignment. This only returns if there is a valid area to be used for this * allocation. block->first_free is returned if the allocation request fits * within the block to see if the request can be fulfilled prior to the contig * hint. */ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, int align, int *bit_off, int *bits) { int i = pcpu_off_to_block_index(*bit_off); int block_off = pcpu_off_to_block_off(*bit_off); struct pcpu_block_md *block; *bits = 0; for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); block++, i++) { /* handles contig area across blocks */ if (*bits) { *bits += block->left_free; if (*bits >= alloc_bits) return; if (block->left_free == PCPU_BITMAP_BLOCK_BITS) continue; } /* check block->contig_hint */ *bits = ALIGN(block->contig_hint_start, align) - block->contig_hint_start; /* * This uses the block offset to determine if this has been * checked in the prior iteration. */ if (block->contig_hint && block->contig_hint_start >= block_off && block->contig_hint >= *bits + alloc_bits) { int start = pcpu_next_hint(block, alloc_bits); *bits += alloc_bits + block->contig_hint_start - start; *bit_off = pcpu_block_off_to_off(i, start); return; } /* reset to satisfy the second predicate above */ block_off = 0; *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, align); *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; *bit_off = pcpu_block_off_to_off(i, *bit_off); if (*bits >= alloc_bits) return; } /* no valid offsets were found - fail condition */ *bit_off = pcpu_chunk_map_bits(chunk); } /* * Metadata free area iterators. These perform aggregation of free areas * based on the metadata blocks and return the offset @bit_off and size in * bits of the free area @bits. pcpu_for_each_fit_region only returns when * a fit is found for the allocation request. */ #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ (bit_off) < pcpu_chunk_map_bits((chunk)); \ (bit_off) += (bits) + 1, \ pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ &(bits)); \ (bit_off) < pcpu_chunk_map_bits((chunk)); \ (bit_off) += (bits), \ pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ &(bits))) /** * pcpu_mem_zalloc - allocate memory * @size: bytes to allocate * @gfp: allocation flags * * Allocate @size bytes. If @size is smaller than PAGE_SIZE, * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. * This is to facilitate passing through whitelisted flags. The * returned memory is always zeroed. * * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) { if (WARN_ON_ONCE(!slab_is_available())) return NULL; if (size <= PAGE_SIZE) return kzalloc(size, gfp); else return __vmalloc(size, gfp | __GFP_ZERO); } /** * pcpu_mem_free - free memory * @ptr: memory to free * * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). */ static void pcpu_mem_free(void *ptr) { kvfree(ptr); } static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, bool move_front) { if (chunk != pcpu_reserved_chunk) { if (move_front) list_move(&chunk->list, &pcpu_chunk_lists[slot]); else list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); } } static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) { __pcpu_chunk_move(chunk, slot, true); } /** * pcpu_chunk_relocate - put chunk in the appropriate chunk slot * @chunk: chunk of interest * @oslot: the previous slot it was on * * This function is called after an allocation or free changed @chunk. * New slot according to the changed state is determined and @chunk is * moved to the slot. Note that the reserved chunk is never put on * chunk slots. * * CONTEXT: * pcpu_lock. */ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) { int nslot = pcpu_chunk_slot(chunk); /* leave isolated chunks in-place */ if (chunk->isolated) return; if (oslot != nslot) __pcpu_chunk_move(chunk, nslot, oslot < nslot); } static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) { lockdep_assert_held(&pcpu_lock); if (!chunk->isolated) { chunk->isolated = true; pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; } list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); } static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) { lockdep_assert_held(&pcpu_lock); if (chunk->isolated) { chunk->isolated = false; pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; pcpu_chunk_relocate(chunk, -1); } } /* * pcpu_update_empty_pages - update empty page counters * @chunk: chunk of interest * @nr: nr of empty pages * * This is used to keep track of the empty pages now based on the premise * a md_block covers a page. The hint update functions recognize if a block * is made full or broken to calculate deltas for keeping track of free pages. */ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) { chunk->nr_empty_pop_pages += nr; if (chunk != pcpu_reserved_chunk && !chunk->isolated) pcpu_nr_empty_pop_pages += nr; } /* * pcpu_region_overlap - determines if two regions overlap * @a: start of first region, inclusive * @b: end of first region, exclusive * @x: start of second region, inclusive * @y: end of second region, exclusive * * This is used to determine if the hint region [a, b) overlaps with the * allocated region [x, y). */ static inline bool pcpu_region_overlap(int a, int b, int x, int y) { return (a < y) && (x < b); } /** * pcpu_block_update - updates a block given a free area * @block: block of interest * @start: start offset in block * @end: end offset in block * * Updates a block given a known free area. The region [start, end) is * expected to be the entirety of the free area within a block. Chooses * the best starting offset if the contig hints are equal. */ static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) { int contig = end - start; block->first_free = min(block->first_free, start); if (start == 0) block->left_free = contig; if (end == block->nr_bits) block->right_free = contig; if (contig > block->contig_hint) { /* promote the old contig_hint to be the new scan_hint */ if (start > block->contig_hint_start) { if (block->contig_hint > block->scan_hint) { block->scan_hint_start = block->contig_hint_start; block->scan_hint = block->contig_hint; } else if (start < block->scan_hint_start) { /* * The old contig_hint == scan_hint. But, the * new contig is larger so hold the invariant * scan_hint_start < contig_hint_start. */ block->scan_hint = 0; } } else { block->scan_hint = 0; } block->contig_hint_start = start; block->contig_hint = contig; } else if (contig == block->contig_hint) { if (block->contig_hint_start && (!start || __ffs(start) > __ffs(block->contig_hint_start))) { /* start has a better alignment so use it */ block->contig_hint_start = start; if (start < block->scan_hint_start && block->contig_hint > block->scan_hint) block->scan_hint = 0; } else if (start > block->scan_hint_start || block->contig_hint > block->scan_hint) { /* * Knowing contig == contig_hint, update the scan_hint * if it is farther than or larger than the current * scan_hint. */ block->scan_hint_start = start; block->scan_hint = contig; } } else { /* * The region is smaller than the contig_hint. So only update * the scan_hint if it is larger than or equal and farther than * the current scan_hint. */ if ((start < block->contig_hint_start && (contig > block->scan_hint || (contig == block->scan_hint && start > block->scan_hint_start)))) { block->scan_hint_start = start; block->scan_hint = contig; } } } /* * pcpu_block_update_scan - update a block given a free area from a scan * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of free area * * Finding the final allocation spot first goes through pcpu_find_block_fit() * to find a block that can hold the allocation and then pcpu_alloc_area() * where a scan is used. When allocations require specific alignments, * we can inadvertently create holes which will not be seen in the alloc * or free paths. * * This takes a given free area hole and updates a block as it may change the * scan_hint. We need to scan backwards to ensure we don't miss free bits * from alignment. */ static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, int bits) { int s_off = pcpu_off_to_block_off(bit_off); int e_off = s_off + bits; int s_index, l_bit; struct pcpu_block_md *block; if (e_off > PCPU_BITMAP_BLOCK_BITS) return; s_index = pcpu_off_to_block_index(bit_off); block = chunk->md_blocks + s_index; /* scan backwards in case of alignment skipping free bits */ l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); s_off = (s_off == l_bit) ? 0 : l_bit + 1; pcpu_block_update(block, s_off, e_off); } /** * pcpu_chunk_refresh_hint - updates metadata about a chunk * @chunk: chunk of interest * @full_scan: if we should scan from the beginning * * Iterates over the metadata blocks to find the largest contig area. * A full scan can be avoided on the allocation path as this is triggered * if we broke the contig_hint. In doing so, the scan_hint will be before * the contig_hint or after if the scan_hint == contig_hint. This cannot * be prevented on freeing as we want to find the largest area possibly * spanning blocks. */ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits; /* promote scan_hint to contig_hint */ if (!full_scan && chunk_md->scan_hint) { bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; chunk_md->contig_hint_start = chunk_md->scan_hint_start; chunk_md->contig_hint = chunk_md->scan_hint; chunk_md->scan_hint = 0; } else { bit_off = chunk_md->first_free; chunk_md->contig_hint = 0; } bits = 0; pcpu_for_each_md_free_region(chunk, bit_off, bits) pcpu_block_update(chunk_md, bit_off, bit_off + bits); } /** * pcpu_block_refresh_hint * @chunk: chunk of interest * @index: index of the metadata block * * Scans over the block beginning at first_free and updates the block * metadata accordingly. */ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) { struct pcpu_block_md *block = chunk->md_blocks + index; unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); unsigned int start, end; /* region start, region end */ /* promote scan_hint to contig_hint */ if (block->scan_hint) { start = block->scan_hint_start + block->scan_hint; block->contig_hint_start = block->scan_hint_start; block->contig_hint = block->scan_hint; block->scan_hint = 0; } else { start = block->first_free; block->contig_hint = 0; } block->right_free = 0; /* iterate over free areas and update the contig hints */ for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS) pcpu_block_update(block, start, end); } /** * pcpu_block_update_hint_alloc - update hint on allocation path * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of request * * Updates metadata for the allocation path. The metadata only has to be * refreshed by a full scan iff the chunk's contig hint is broken. Block level * scans are required if the block's contig hint is broken. */ static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, int bits) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int nr_empty_pages = 0; struct pcpu_block_md *s_block, *e_block, *block; int s_index, e_index; /* block indexes of the freed allocation */ int s_off, e_off; /* block offsets of the freed allocation */ /* * Calculate per block offsets. * The calculation uses an inclusive range, but the resulting offsets * are [start, end). e_index always points to the last block in the * range. */ s_index = pcpu_off_to_block_index(bit_off); e_index = pcpu_off_to_block_index(bit_off + bits - 1); s_off = pcpu_off_to_block_off(bit_off); e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; s_block = chunk->md_blocks + s_index; e_block = chunk->md_blocks + e_index; /* * Update s_block. */ if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; /* * block->first_free must be updated if the allocation takes its place. * If the allocation breaks the contig_hint, a scan is required to * restore this hint. */ if (s_off == s_block->first_free) s_block->first_free = find_next_zero_bit( pcpu_index_alloc_map(chunk, s_index), PCPU_BITMAP_BLOCK_BITS, s_off + bits); if (pcpu_region_overlap(s_block->scan_hint_start, s_block->scan_hint_start + s_block->scan_hint, s_off, s_off + bits)) s_block->scan_hint = 0; if (pcpu_region_overlap(s_block->contig_hint_start, s_block->contig_hint_start + s_block->contig_hint, s_off, s_off + bits)) { /* block contig hint is broken - scan to fix it */ if (!s_off) s_block->left_free = 0; pcpu_block_refresh_hint(chunk, s_index); } else { /* update left and right contig manually */ s_block->left_free = min(s_block->left_free, s_off); if (s_index == e_index) s_block->right_free = min_t(int, s_block->right_free, PCPU_BITMAP_BLOCK_BITS - e_off); else s_block->right_free = 0; } /* * Update e_block. */ if (s_index != e_index) { if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; /* * When the allocation is across blocks, the end is along * the left part of the e_block. */ e_block->first_free = find_next_zero_bit( pcpu_index_alloc_map(chunk, e_index), PCPU_BITMAP_BLOCK_BITS, e_off); if (e_off == PCPU_BITMAP_BLOCK_BITS) { /* reset the block */ e_block++; } else { if (e_off > e_block->scan_hint_start) e_block->scan_hint = 0; e_block->left_free = 0; if (e_off > e_block->contig_hint_start) { /* contig hint is broken - scan to fix it */ pcpu_block_refresh_hint(chunk, e_index); } else { e_block->right_free = min_t(int, e_block->right_free, PCPU_BITMAP_BLOCK_BITS - e_off); } } /* update in-between md_blocks */ nr_empty_pages += (e_index - s_index - 1); for (block = s_block + 1; block < e_block; block++) { block->scan_hint = 0; block->contig_hint = 0; block->left_free = 0; block->right_free = 0; } } /* * If the allocation is not atomic, some blocks may not be * populated with pages, while we account it here. The number * of pages will be added back with pcpu_chunk_populated() * when populating pages. */ if (nr_empty_pages) pcpu_update_empty_pages(chunk, -nr_empty_pages); if (pcpu_region_overlap(chunk_md->scan_hint_start, chunk_md->scan_hint_start + chunk_md->scan_hint, bit_off, bit_off + bits)) chunk_md->scan_hint = 0; /* * The only time a full chunk scan is required is if the chunk * contig hint is broken. Otherwise, it means a smaller space * was used and therefore the chunk contig hint is still correct. */ if (pcpu_region_overlap(chunk_md->contig_hint_start, chunk_md->contig_hint_start + chunk_md->contig_hint, bit_off, bit_off + bits)) pcpu_chunk_refresh_hint(chunk, false); } /** * pcpu_block_update_hint_free - updates the block hints on the free path * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of request * * Updates metadata for the allocation path. This avoids a blind block * refresh by making use of the block contig hints. If this fails, it scans * forward and backward to determine the extent of the free area. This is * capped at the boundary of blocks. * * A chunk update is triggered if a page becomes free, a block becomes free, * or the free spans across blocks. This tradeoff is to minimize iterating * over the block metadata to update chunk_md->contig_hint. * chunk_md->contig_hint may be off by up to a page, but it will never be more * than the available space. If the contig hint is contained in one block, it * will be accurate. */ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, int bits) { int nr_empty_pages = 0; struct pcpu_block_md *s_block, *e_block, *block; int s_index, e_index; /* block indexes of the freed allocation */ int s_off, e_off; /* block offsets of the freed allocation */ int start, end; /* start and end of the whole free area */ /* * Calculate per block offsets. * The calculation uses an inclusive range, but the resulting offsets * are [start, end). e_index always points to the last block in the * range. */ s_index = pcpu_off_to_block_index(bit_off); e_index = pcpu_off_to_block_index(bit_off + bits - 1); s_off = pcpu_off_to_block_off(bit_off); e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; s_block = chunk->md_blocks + s_index; e_block = chunk->md_blocks + e_index; /* * Check if the freed area aligns with the block->contig_hint. * If it does, then the scan to find the beginning/end of the * larger free area can be avoided. * * start and end refer to beginning and end of the free area * within each their respective blocks. This is not necessarily * the entire free area as it may span blocks past the beginning * or end of the block. */ start = s_off; if (s_off == s_block->contig_hint + s_block->contig_hint_start) { start = s_block->contig_hint_start; } else { /* * Scan backwards to find the extent of the free area. * find_last_bit returns the starting bit, so if the start bit * is returned, that means there was no last bit and the * remainder of the chunk is free. */ int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), start); start = (start == l_bit) ? 0 : l_bit + 1; } end = e_off; if (e_off == e_block->contig_hint_start) end = e_block->contig_hint_start + e_block->contig_hint; else end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), PCPU_BITMAP_BLOCK_BITS, end); /* update s_block */ e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; pcpu_block_update(s_block, start, e_off); /* freeing in the same block */ if (s_index != e_index) { /* update e_block */ if (end == PCPU_BITMAP_BLOCK_BITS) nr_empty_pages++; pcpu_block_update(e_block, 0, end); /* reset md_blocks in the middle */ nr_empty_pages += (e_index - s_index - 1); for (block = s_block + 1; block < e_block; block++) { block->first_free = 0; block->scan_hint = 0; block->contig_hint_start = 0; block->contig_hint = PCPU_BITMAP_BLOCK_BITS; block->left_free = PCPU_BITMAP_BLOCK_BITS; block->right_free = PCPU_BITMAP_BLOCK_BITS; } } if (nr_empty_pages) pcpu_update_empty_pages(chunk, nr_empty_pages); /* * Refresh chunk metadata when the free makes a block free or spans * across blocks. The contig_hint may be off by up to a page, but if * the contig_hint is contained in a block, it will be accurate with * the else condition below. */ if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) pcpu_chunk_refresh_hint(chunk, true); else pcpu_block_update(&chunk->chunk_md, pcpu_block_off_to_off(s_index, start), end); } /** * pcpu_is_populated - determines if the region is populated * @chunk: chunk of interest * @bit_off: chunk offset * @bits: size of area * @next_off: return value for the next offset to start searching * * For atomic allocations, check if the backing pages are populated. * * RETURNS: * Bool if the backing pages are populated. * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. */ static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, int *next_off) { unsigned int start, end; start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); start = find_next_zero_bit(chunk->populated, end, start); if (start >= end) return true; end = find_next_bit(chunk->populated, end, start + 1); *next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; return false; } /** * pcpu_find_block_fit - finds the block index to start searching * @chunk: chunk of interest * @alloc_bits: size of request in allocation units * @align: alignment of area (max PAGE_SIZE bytes) * @pop_only: use populated regions only * * Given a chunk and an allocation spec, find the offset to begin searching * for a free region. This iterates over the bitmap metadata blocks to * find an offset that will be guaranteed to fit the requirements. It is * not quite first fit as if the allocation does not fit in the contig hint * of a block or chunk, it is skipped. This errs on the side of caution * to prevent excess iteration. Poor alignment can cause the allocator to * skip over blocks and chunks that have valid free areas. * * RETURNS: * The offset in the bitmap to begin searching. * -1 if no offset is found. */ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, size_t align, bool pop_only) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits, next_off; /* * This is an optimization to prevent scanning by assuming if the * allocation cannot fit in the global hint, there is memory pressure * and creating a new chunk would happen soon. */ if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) return -1; bit_off = pcpu_next_hint(chunk_md, alloc_bits); bits = 0; pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, &next_off)) break; bit_off = next_off; bits = 0; } if (bit_off == pcpu_chunk_map_bits(chunk)) return -1; return bit_off; } /* * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() * @map: the address to base the search on * @size: the bitmap size in bits * @start: the bitnumber to start searching at * @nr: the number of zeroed bits we're looking for * @align_mask: alignment mask for zero area * @largest_off: offset of the largest area skipped * @largest_bits: size of the largest area skipped * * The @align_mask should be one less than a power of 2. * * This is a modified version of bitmap_find_next_zero_area_off() to remember * the largest area that was skipped. This is imperfect, but in general is * good enough. The largest remembered region is the largest failed region * seen. This does not include anything we possibly skipped due to alignment. * pcpu_block_update_scan() does scan backwards to try and recover what was * lost to alignment. While this can cause scanning to miss earlier possible * free areas, smaller allocations will eventually fill those holes. */ static unsigned long pcpu_find_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned long nr, unsigned long align_mask, unsigned long *largest_off, unsigned long *largest_bits) { unsigned long index, end, i, area_off, area_bits; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ index = __ALIGN_MASK(index, align_mask); area_off = index; end = index + nr; if (end > size) return end; i = find_next_bit(map, end, index); if (i < end) { area_bits = i - area_off; /* remember largest unused area with best alignment */ if (area_bits > *largest_bits || (area_bits == *largest_bits && *largest_off && (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { *largest_off = area_off; *largest_bits = area_bits; } start = i + 1; goto again; } return index; } /** * pcpu_alloc_area - allocates an area from a pcpu_chunk * @chunk: chunk of interest * @alloc_bits: size of request in allocation units * @align: alignment of area (max PAGE_SIZE) * @start: bit_off to start searching * * This function takes in a @start offset to begin searching to fit an * allocation of @alloc_bits with alignment @align. It needs to scan * the allocation map because if it fits within the block's contig hint, * @start will be block->first_free. This is an attempt to fill the * allocation prior to breaking the contig hint. The allocation and * boundary maps are updated accordingly if it confirms a valid * free area. * * RETURNS: * Allocated addr offset in @chunk on success. * -1 if no matching area is found. */ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, size_t align, int start) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; size_t align_mask = (align) ? (align - 1) : 0; unsigned long area_off = 0, area_bits = 0; int bit_off, end, oslot; lockdep_assert_held(&pcpu_lock); oslot = pcpu_chunk_slot(chunk); /* * Search to find a fit. */ end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, pcpu_chunk_map_bits(chunk)); bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, align_mask, &area_off, &area_bits); if (bit_off >= end) return -1; if (area_bits) pcpu_block_update_scan(chunk, area_off, area_bits); /* update alloc map */ bitmap_set(chunk->alloc_map, bit_off, alloc_bits); /* update boundary map */ set_bit(bit_off, chunk->bound_map); bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); set_bit(bit_off + alloc_bits, chunk->bound_map); chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; /* update first free bit */ if (bit_off == chunk_md->first_free) chunk_md->first_free = find_next_zero_bit( chunk->alloc_map, pcpu_chunk_map_bits(chunk), bit_off + alloc_bits); pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); pcpu_chunk_relocate(chunk, oslot); return bit_off * PCPU_MIN_ALLOC_SIZE; } /** * pcpu_free_area - frees the corresponding offset * @chunk: chunk of interest * @off: addr offset into chunk * * This function determines the size of an allocation to free using * the boundary bitmap and clears the allocation map. * * RETURNS: * Number of freed bytes. */ static int pcpu_free_area(struct pcpu_chunk *chunk, int off) { struct pcpu_block_md *chunk_md = &chunk->chunk_md; int bit_off, bits, end, oslot, freed; lockdep_assert_held(&pcpu_lock); pcpu_stats_area_dealloc(chunk); oslot = pcpu_chunk_slot(chunk); bit_off = off / PCPU_MIN_ALLOC_SIZE; /* find end index */ end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), bit_off + 1); bits = end - bit_off; bitmap_clear(chunk->alloc_map, bit_off, bits); freed = bits * PCPU_MIN_ALLOC_SIZE; /* update metadata */ chunk->free_bytes += freed; /* update first free bit */ chunk_md->first_free = min(chunk_md->first_free, bit_off); pcpu_block_update_hint_free(chunk, bit_off, bits); pcpu_chunk_relocate(chunk, oslot); return freed; } static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) { block->scan_hint = 0; block->contig_hint = nr_bits; block->left_free = nr_bits; block->right_free = nr_bits; block->first_free = 0; block->nr_bits = nr_bits; } static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) { struct pcpu_block_md *md_block; /* init the chunk's block */ pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); for (md_block = chunk->md_blocks; md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); md_block++) pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); } /** * pcpu_alloc_first_chunk - creates chunks that serve the first chunk * @tmp_addr: the start of the region served * @map_size: size of the region served * * This is responsible for creating the chunks that serve the first chunk. The * base_addr is page aligned down of @tmp_addr while the region end is page * aligned up. Offsets are kept track of to determine the region served. All * this is done to appease the bitmap allocator in avoiding partial blocks. * * RETURNS: * Chunk serving the region at @tmp_addr of @map_size. */ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, int map_size) { struct pcpu_chunk *chunk; unsigned long aligned_addr; int start_offset, offset_bits, region_size, region_bits; size_t alloc_size; /* region calculations */ aligned_addr = tmp_addr & PAGE_MASK; start_offset = tmp_addr - aligned_addr; region_size = ALIGN(start_offset + map_size, PAGE_SIZE); /* allocate chunk */ alloc_size = struct_size(chunk, populated, BITS_TO_LONGS(region_size >> PAGE_SHIFT)); chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); INIT_LIST_HEAD(&chunk->list); chunk->base_addr = (void *)aligned_addr; chunk->start_offset = start_offset; chunk->end_offset = region_size - chunk->start_offset - map_size; chunk->nr_pages = region_size >> PAGE_SHIFT; region_bits = pcpu_chunk_map_bits(chunk); alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk->alloc_map) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); alloc_size = BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk->bound_map) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!chunk->md_blocks) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); #ifdef CONFIG_MEMCG_KMEM /* first chunk is free to use */ chunk->obj_cgroups = NULL; #endif pcpu_init_md_blocks(chunk); /* manage populated page bitmap */ chunk->immutable = true; bitmap_fill(chunk->populated, chunk->nr_pages); chunk->nr_populated = chunk->nr_pages; chunk->nr_empty_pop_pages = chunk->nr_pages; chunk->free_bytes = map_size; if (chunk->start_offset) { /* hide the beginning of the bitmap */ offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; bitmap_set(chunk->alloc_map, 0, offset_bits); set_bit(0, chunk->bound_map); set_bit(offset_bits, chunk->bound_map); chunk->chunk_md.first_free = offset_bits; pcpu_block_update_hint_alloc(chunk, 0, offset_bits); } if (chunk->end_offset) { /* hide the end of the bitmap */ offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; bitmap_set(chunk->alloc_map, pcpu_chunk_map_bits(chunk) - offset_bits, offset_bits); set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, chunk->bound_map); set_bit(region_bits, chunk->bound_map); pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) - offset_bits, offset_bits); } return chunk; } static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; int region_bits; chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->list); chunk->nr_pages = pcpu_unit_pages; region_bits = pcpu_chunk_map_bits(chunk); chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]), gfp); if (!chunk->alloc_map) goto alloc_map_fail; chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]), gfp); if (!chunk->bound_map) goto bound_map_fail; chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]), gfp); if (!chunk->md_blocks) goto md_blocks_fail; #ifdef CONFIG_MEMCG_KMEM if (!mem_cgroup_kmem_disabled()) { chunk->obj_cgroups = pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * sizeof(struct obj_cgroup *), gfp); if (!chunk->obj_cgroups) goto objcg_fail; } #endif pcpu_init_md_blocks(chunk); /* init metadata */ chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; return chunk; #ifdef CONFIG_MEMCG_KMEM objcg_fail: pcpu_mem_free(chunk->md_blocks); #endif md_blocks_fail: pcpu_mem_free(chunk->bound_map); bound_map_fail: pcpu_mem_free(chunk->alloc_map); alloc_map_fail: pcpu_mem_free(chunk); return NULL; } static void pcpu_free_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; #ifdef CONFIG_MEMCG_KMEM pcpu_mem_free(chunk->obj_cgroups); #endif pcpu_mem_free(chunk->md_blocks); pcpu_mem_free(chunk->bound_map); pcpu_mem_free(chunk->alloc_map); pcpu_mem_free(chunk); } /** * pcpu_chunk_populated - post-population bookkeeping * @chunk: pcpu_chunk which got populated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been populated to @chunk. Update * the bookkeeping information accordingly. Must be called after each * successful population. */ static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_set(chunk->populated, page_start, nr); chunk->nr_populated += nr; pcpu_nr_populated += nr; pcpu_update_empty_pages(chunk, nr); } /** * pcpu_chunk_depopulated - post-depopulation bookkeeping * @chunk: pcpu_chunk which got depopulated * @page_start: the start page * @page_end: the end page * * Pages in [@page_start,@page_end) have been depopulated from @chunk. * Update the bookkeeping information accordingly. Must be called after * each successful depopulation. */ static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, int page_start, int page_end) { int nr = page_end - page_start; lockdep_assert_held(&pcpu_lock); bitmap_clear(chunk->populated, page_start, nr); chunk->nr_populated -= nr; pcpu_nr_populated -= nr; pcpu_update_empty_pages(chunk, -nr); } /* * Chunk management implementation. * * To allow different implementations, chunk alloc/free and * [de]population are implemented in a separate file which is pulled * into this file and compiled together. The following functions * should be implemented. * * pcpu_populate_chunk - populate the specified range of a chunk * pcpu_depopulate_chunk - depopulate the specified range of a chunk * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk * pcpu_create_chunk - create a new chunk * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop * pcpu_addr_to_page - translate address to physical address * pcpu_verify_alloc_info - check alloc_info is acceptable during init */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp); static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end); static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end); static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); static struct page *pcpu_addr_to_page(void *addr); static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); #ifdef CONFIG_NEED_PER_CPU_KM #include "percpu-km.c" #else #include "percpu-vm.c" #endif /** * pcpu_chunk_addr_search - determine chunk containing specified address * @addr: address for which the chunk needs to be determined. * * This is an internal function that handles all but static allocations. * Static percpu address values should never be passed into the allocator. * * RETURNS: * The address of the found chunk. */ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) { /* is it in the dynamic region (first chunk)? */ if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) return pcpu_first_chunk; /* is it in the reserved region? */ if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) return pcpu_reserved_chunk; /* * The address is relative to unit0 which might be unused and * thus unmapped. Offset the address to the unit space of the * current processor before looking it up in the vmalloc * space. Note that any possible cpu id can be used here, so * there's no need to worry about preemption or cpu hotplug. */ addr += pcpu_unit_offsets[raw_smp_processor_id()]; return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); } #ifdef CONFIG_MEMCG_KMEM static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) { struct obj_cgroup *objcg; if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT)) return true; objcg = get_obj_cgroup_from_current(); if (!objcg) return true; if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) { obj_cgroup_put(objcg); return false; } *objcgp = objcg; return true; } static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) { if (!objcg) return; if (likely(chunk && chunk->obj_cgroups)) { chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; rcu_read_lock(); mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, pcpu_obj_full_size(size)); rcu_read_unlock(); } else { obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); obj_cgroup_put(objcg); } } static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { struct obj_cgroup *objcg; if (unlikely(!chunk->obj_cgroups)) return; objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; if (!objcg) return; chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size)); rcu_read_lock(); mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, -pcpu_obj_full_size(size)); rcu_read_unlock(); obj_cgroup_put(objcg); } #else /* CONFIG_MEMCG_KMEM */ static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) { return true; } static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, struct pcpu_chunk *chunk, int off, size_t size) { } static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) { } #endif /* CONFIG_MEMCG_KMEM */ /** * pcpu_alloc - the percpu allocator * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @reserved: allocate from the reserved chunk if available * @gfp: allocation flags * * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN * then no warning will be triggered on invalid or failed allocation * requests. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, gfp_t gfp) { gfp_t pcpu_gfp; bool is_atomic; bool do_warn; struct obj_cgroup *objcg = NULL; static int warn_limit = 10; struct pcpu_chunk *chunk, *next; const char *err; int slot, off, cpu, ret; unsigned long flags; void __percpu *ptr; size_t bits, bit_align; gfp = current_gfp_context(gfp); /* whitelisted flags that can be passed to the backing allocators */ pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; do_warn = !(gfp & __GFP_NOWARN); /* * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, * therefore alignment must be a minimum of that many bytes. * An allocation may have internal fragmentation from rounding up * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. */ if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) align = PCPU_MIN_ALLOC_SIZE; size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); bits = size >> PCPU_MIN_ALLOC_SHIFT; bit_align = align >> PCPU_MIN_ALLOC_SHIFT; if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || !is_power_of_2(align))) { WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", size, align); return NULL; } if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) return NULL; if (!is_atomic) { /* * pcpu_balance_workfn() allocates memory under this mutex, * and it may wait for memory reclaim. Allow current task * to become OOM victim, in case of memory pressure. */ if (gfp & __GFP_NOFAIL) { mutex_lock(&pcpu_alloc_mutex); } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); return NULL; } } spin_lock_irqsave(&pcpu_lock, flags); /* serve reserved allocations from the reserved chunk if available */ if (reserved && pcpu_reserved_chunk) { chunk = pcpu_reserved_chunk; off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); if (off < 0) { err = "alloc from reserved chunk failed"; goto fail_unlock; } off = pcpu_alloc_area(chunk, bits, bit_align, off); if (off >= 0) goto area_found; err = "alloc from reserved chunk failed"; goto fail_unlock; } restart: /* search through normal chunks */ for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], list) { off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); if (off < 0) { if (slot < PCPU_SLOT_FAIL_THRESHOLD) pcpu_chunk_move(chunk, 0); continue; } off = pcpu_alloc_area(chunk, bits, bit_align, off); if (off >= 0) { pcpu_reintegrate_chunk(chunk); goto area_found; } } } spin_unlock_irqrestore(&pcpu_lock, flags); if (is_atomic) { err = "atomic alloc failed, no space left"; goto fail; } /* No space left. Create a new chunk. */ if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { chunk = pcpu_create_chunk(pcpu_gfp); if (!chunk) { err = "failed to allocate new chunk"; goto fail; } spin_lock_irqsave(&pcpu_lock, flags); pcpu_chunk_relocate(chunk, -1); } else { spin_lock_irqsave(&pcpu_lock, flags); } goto restart; area_found: pcpu_stats_area_alloc(chunk, size); spin_unlock_irqrestore(&pcpu_lock, flags); /* populate if not all pages are already there */ if (!is_atomic) { unsigned int page_end, rs, re; rs = PFN_DOWN(off); page_end = PFN_UP(off + size); for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) { WARN_ON(chunk->immutable); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); spin_lock_irqsave(&pcpu_lock, flags); if (ret) { pcpu_free_area(chunk, off); err = "failed to populate"; goto fail_unlock; } pcpu_chunk_populated(chunk, rs, re); spin_unlock_irqrestore(&pcpu_lock, flags); } mutex_unlock(&pcpu_alloc_mutex); } if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) pcpu_schedule_balance_work(); /* clear the areas and return address relative to base address */ for_each_possible_cpu(cpu) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); kmemleak_alloc_percpu(ptr, size, gfp); trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align, chunk->base_addr, off, ptr, pcpu_obj_full_size(size), gfp); pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); return ptr; fail_unlock: spin_unlock_irqrestore(&pcpu_lock, flags); fail: trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); if (do_warn && warn_limit) { pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", size, align, is_atomic, err); if (!is_atomic) dump_stack(); if (!--warn_limit) pr_info("limit reached, disable warning\n"); } if (is_atomic) { /* see the flag handling in pcpu_balance_workfn() */ pcpu_atomic_alloc_failed = true; pcpu_schedule_balance_work(); } else { mutex_unlock(&pcpu_alloc_mutex); } pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); return NULL; } /** * __alloc_percpu_gfp - allocate dynamic percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * @gfp: allocation flags * * Allocate zero-filled percpu area of @size bytes aligned at @align. If * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can * be called from any context but is a lot more likely to fail. If @gfp * has __GFP_NOWARN then no warning will be triggered on invalid or failed * allocation requests. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) { return pcpu_alloc(size, align, false, gfp); } EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); /** * __alloc_percpu - allocate dynamic percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). */ void __percpu *__alloc_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, false, GFP_KERNEL); } EXPORT_SYMBOL_GPL(__alloc_percpu); /** * __alloc_reserved_percpu - allocate reserved percpu area * @size: size of area to allocate in bytes * @align: alignment of area (max PAGE_SIZE) * * Allocate zero-filled percpu area of @size bytes aligned at @align * from reserved percpu area if arch has set it up; otherwise, * allocation is served from the same dynamic area. Might sleep. * Might trigger writeouts. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ void __percpu *__alloc_reserved_percpu(size_t size, size_t align) { return pcpu_alloc(size, align, true, GFP_KERNEL); } /** * pcpu_balance_free - manage the amount of free chunks * @empty_only: free chunks only if there are no populated pages * * If empty_only is %false, reclaim all fully free chunks regardless of the * number of populated pages. Otherwise, only reclaim chunks that have no * populated pages. * * CONTEXT: * pcpu_lock (can be dropped temporarily) */ static void pcpu_balance_free(bool empty_only) { LIST_HEAD(to_free); struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; struct pcpu_chunk *chunk, *next; lockdep_assert_held(&pcpu_lock); /* * There's no reason to keep around multiple unused chunks and VM * areas can be scarce. Destroy all free chunks except for one. */ list_for_each_entry_safe(chunk, next, free_head, list) { WARN_ON(chunk->immutable); /* spare the first one */ if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) continue; if (!empty_only || chunk->nr_empty_pop_pages == 0) list_move(&chunk->list, &to_free); } if (list_empty(&to_free)) return; spin_unlock_irq(&pcpu_lock); list_for_each_entry_safe(chunk, next, &to_free, list) { unsigned int rs, re; for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) { pcpu_depopulate_chunk(chunk, rs, re); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, rs, re); spin_unlock_irq(&pcpu_lock); } pcpu_destroy_chunk(chunk); cond_resched(); } spin_lock_irq(&pcpu_lock); } /** * pcpu_balance_populated - manage the amount of populated pages * * Maintain a certain amount of populated pages to satisfy atomic allocations. * It is possible that this is called when physical memory is scarce causing * OOM killer to be triggered. We should avoid doing so until an actual * allocation causes the failure as it is possible that requests can be * serviced from already backed regions. * * CONTEXT: * pcpu_lock (can be dropped temporarily) */ static void pcpu_balance_populated(void) { /* gfp flags passed to underlying allocators */ const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; struct pcpu_chunk *chunk; int slot, nr_to_pop, ret; lockdep_assert_held(&pcpu_lock); /* * Ensure there are certain number of free populated pages for * atomic allocs. Fill up from the most packed so that atomic * allocs don't increase fragmentation. If atomic allocation * failed previously, always populate the maximum amount. This * should prevent atomic allocs larger than PAGE_SIZE from keeping * failing indefinitely; however, large atomic allocs are not * something we support properly and can be highly unreliable and * inefficient. */ retry_pop: if (pcpu_atomic_alloc_failed) { nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; /* best effort anyway, don't worry about synchronization */ pcpu_atomic_alloc_failed = false; } else { nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - pcpu_nr_empty_pop_pages, 0, PCPU_EMPTY_POP_PAGES_HIGH); } for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { unsigned int nr_unpop = 0, rs, re; if (!nr_to_pop) break; list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { nr_unpop = chunk->nr_pages - chunk->nr_populated; if (nr_unpop) break; } if (!nr_unpop) continue; /* @chunk can't go away while pcpu_alloc_mutex is held */ for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) { int nr = min_t(int, re - rs, nr_to_pop); spin_unlock_irq(&pcpu_lock); ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); cond_resched(); spin_lock_irq(&pcpu_lock); if (!ret) { nr_to_pop -= nr; pcpu_chunk_populated(chunk, rs, rs + nr); } else { nr_to_pop = 0; } if (!nr_to_pop) break; } } if (nr_to_pop) { /* ran out of chunks to populate, create a new one and retry */ spin_unlock_irq(&pcpu_lock); chunk = pcpu_create_chunk(gfp); cond_resched(); spin_lock_irq(&pcpu_lock); if (chunk) { pcpu_chunk_relocate(chunk, -1); goto retry_pop; } } } /** * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages * * Scan over chunks in the depopulate list and try to release unused populated * pages back to the system. Depopulated chunks are sidelined to prevent * repopulating these pages unless required. Fully free chunks are reintegrated * and freed accordingly (1 is kept around). If we drop below the empty * populated pages threshold, reintegrate the chunk if it has empty free pages. * Each chunk is scanned in the reverse order to keep populated pages close to * the beginning of the chunk. * * CONTEXT: * pcpu_lock (can be dropped temporarily) * */ static void pcpu_reclaim_populated(void) { struct pcpu_chunk *chunk; struct pcpu_block_md *block; int freed_page_start, freed_page_end; int i, end; bool reintegrate; lockdep_assert_held(&pcpu_lock); /* * Once a chunk is isolated to the to_depopulate list, the chunk is no * longer discoverable to allocations whom may populate pages. The only * other accessor is the free path which only returns area back to the * allocator not touching the populated bitmap. */ while ((chunk = list_first_entry_or_null( &pcpu_chunk_lists[pcpu_to_depopulate_slot], struct pcpu_chunk, list))) { WARN_ON(chunk->immutable); /* * Scan chunk's pages in the reverse order to keep populated * pages close to the beginning of the chunk. */ freed_page_start = chunk->nr_pages; freed_page_end = 0; reintegrate = false; for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { /* no more work to do */ if (chunk->nr_empty_pop_pages == 0) break; /* reintegrate chunk to prevent atomic alloc failures */ if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { reintegrate = true; break; } /* * If the page is empty and populated, start or * extend the (i, end) range. If i == 0, decrease * i and perform the depopulation to cover the last * (first) page in the chunk. */ block = chunk->md_blocks + i; if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && test_bit(i, chunk->populated)) { if (end == -1) end = i; if (i > 0) continue; i--; } /* depopulate if there is an active range */ if (end == -1) continue; spin_unlock_irq(&pcpu_lock); pcpu_depopulate_chunk(chunk, i + 1, end + 1); cond_resched(); spin_lock_irq(&pcpu_lock); pcpu_chunk_depopulated(chunk, i + 1, end + 1); freed_page_start = min(freed_page_start, i + 1); freed_page_end = max(freed_page_end, end + 1); /* reset the range and continue */ end = -1; } /* batch tlb flush per chunk to amortize cost */ if (freed_page_start < freed_page_end) { spin_unlock_irq(&pcpu_lock); pcpu_post_unmap_tlb_flush(chunk, freed_page_start, freed_page_end); cond_resched(); spin_lock_irq(&pcpu_lock); } if (reintegrate || chunk->free_bytes == pcpu_unit_size) pcpu_reintegrate_chunk(chunk); else list_move_tail(&chunk->list, &pcpu_chunk_lists[pcpu_sidelined_slot]); } } /** * pcpu_balance_workfn - manage the amount of free chunks and populated pages * @work: unused * * For each chunk type, manage the number of fully free chunks and the number of * populated pages. An important thing to consider is when pages are freed and * how they contribute to the global counts. */ static void pcpu_balance_workfn(struct work_struct *work) { /* * pcpu_balance_free() is called twice because the first time we may * trim pages in the active pcpu_nr_empty_pop_pages which may cause us * to grow other chunks. This then gives pcpu_reclaim_populated() time * to move fully free chunks to the active list to be freed if * appropriate. */ mutex_lock(&pcpu_alloc_mutex); spin_lock_irq(&pcpu_lock); pcpu_balance_free(false); pcpu_reclaim_populated(); pcpu_balance_populated(); pcpu_balance_free(true); spin_unlock_irq(&pcpu_lock); mutex_unlock(&pcpu_alloc_mutex); } /** * free_percpu - free percpu area * @ptr: pointer to area to free * * Free percpu area @ptr. * * CONTEXT: * Can be called from atomic context. */ void free_percpu(void __percpu *ptr) { void *addr; struct pcpu_chunk *chunk; unsigned long flags; int size, off; bool need_balance = false; if (!ptr) return; kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); spin_lock_irqsave(&pcpu_lock, flags); chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; size = pcpu_free_area(chunk, off); pcpu_memcg_free_hook(chunk, off, size); /* * If there are more than one fully free chunks, wake up grim reaper. * If the chunk is isolated, it may be in the process of being * reclaimed. Let reclaim manage cleaning up of that chunk. */ if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { struct pcpu_chunk *pos; list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) if (pos != chunk) { need_balance = true; break; } } else if (pcpu_should_reclaim_chunk(chunk)) { pcpu_isolate_chunk(chunk); need_balance = true; } trace_percpu_free_percpu(chunk->base_addr, off, ptr); spin_unlock_irqrestore(&pcpu_lock, flags); if (need_balance) pcpu_schedule_balance_work(); } EXPORT_SYMBOL_GPL(free_percpu); bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); unsigned int cpu; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); void *va = (void *)addr; if (va >= start && va < start + static_size) { if (can_addr) { *can_addr = (unsigned long) (va - start); *can_addr += (unsigned long) per_cpu_ptr(base, get_boot_cpu_id()); } return true; } } #endif /* on UP, can't distinguish from other static vars, always false */ return false; } /** * is_kernel_percpu_address - test whether address is from static percpu area * @addr: address to test * * Test whether @addr belongs to in-kernel static percpu area. Module * static percpu areas are not considered. For those, use * is_module_percpu_address(). * * RETURNS: * %true if @addr is from in-kernel static percpu area, %false otherwise. */ bool is_kernel_percpu_address(unsigned long addr) { return __is_kernel_percpu_address(addr, NULL); } /** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * * Given @addr which is dereferenceable address obtained via one of * percpu access macros, this function translates it into its physical * address. The caller is responsible for ensuring @addr stays valid * until this function finishes. * * percpu allocator has special setup for the first chunk, which currently * supports either embedding in linear address space or vmalloc mapping, * and, from the second one, the backing allocator (currently either vm or * km) provides translation. * * The addr can be translated simply without checking if it falls into the * first chunk. But the current code reflects better how percpu allocator * actually works, and the verification can discover both bugs in percpu * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current * code. * * RETURNS: * The physical address for @addr. */ phys_addr_t per_cpu_ptr_to_phys(void *addr) { void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); bool in_first_chunk = false; unsigned long first_low, first_high; unsigned int cpu; /* * The following test on unit_low/high isn't strictly * necessary but will speed up lookups of addresses which * aren't in the first chunk. * * The address check is against full chunk sizes. pcpu_base_addr * points to the beginning of the first chunk including the * static region. Assumes good intent as the first chunk may * not be full (ie. < pcpu_unit_pages in size). */ first_low = (unsigned long)pcpu_base_addr + pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); first_high = (unsigned long)pcpu_base_addr + pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); if ((unsigned long)addr >= first_low && (unsigned long)addr < first_high) { for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); if (addr >= start && addr < start + pcpu_unit_size) { in_first_chunk = true; break; } } } if (in_first_chunk) { if (!is_vmalloc_addr(addr)) return __pa(addr); else return page_to_phys(vmalloc_to_page(addr)) + offset_in_page(addr); } else return page_to_phys(pcpu_addr_to_page(addr)) + offset_in_page(addr); } /** * pcpu_alloc_alloc_info - allocate percpu allocation info * @nr_groups: the number of groups * @nr_units: the number of units * * Allocate ai which is large enough for @nr_groups groups containing * @nr_units units. The returned ai's groups[0].cpu_map points to the * cpu_map array which is long enough for @nr_units and filled with * NR_CPUS. It's the caller's responsibility to initialize cpu_map * pointer of other groups. * * RETURNS: * Pointer to the allocated pcpu_alloc_info on success, NULL on * failure. */ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, int nr_units) { struct pcpu_alloc_info *ai; size_t base_size, ai_size; void *ptr; int unit; base_size = ALIGN(struct_size(ai, groups, nr_groups), __alignof__(ai->groups[0].cpu_map[0])); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); if (!ptr) return NULL; ai = ptr; ptr += base_size; ai->groups[0].cpu_map = ptr; for (unit = 0; unit < nr_units; unit++) ai->groups[0].cpu_map[unit] = NR_CPUS; ai->nr_groups = nr_groups; ai->__ai_size = PFN_ALIGN(ai_size); return ai; } /** * pcpu_free_alloc_info - free percpu allocation info * @ai: pcpu_alloc_info to free * * Free @ai which was allocated by pcpu_alloc_alloc_info(). */ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) { memblock_free(ai, ai->__ai_size); } /** * pcpu_dump_alloc_info - print out information about pcpu_alloc_info * @lvl: loglevel * @ai: allocation info to dump * * Print out information about @ai using loglevel @lvl. */ static void pcpu_dump_alloc_info(const char *lvl, const struct pcpu_alloc_info *ai) { int group_width = 1, cpu_width = 1, width; char empty_str[] = "--------"; int alloc = 0, alloc_end = 0; int group, v; int upa, apl; /* units per alloc, allocs per line */ v = ai->nr_groups; while (v /= 10) group_width++; v = num_possible_cpus(); while (v /= 10) cpu_width++; empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; upa = ai->alloc_size / ai->unit_size; width = upa * (cpu_width + 1) + group_width + 3; apl = rounddown_pow_of_two(max(60 / width, 1)); printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", lvl, ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); for (group = 0; group < ai->nr_groups; group++) { const struct pcpu_group_info *gi = &ai->groups[group]; int unit = 0, unit_end = 0; BUG_ON(gi->nr_units % upa); for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { pr_cont("\n"); printk("%spcpu-alloc: ", lvl); } pr_cont("[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) pr_cont("%0*d ", cpu_width, gi->cpu_map[unit]); else pr_cont("%s ", empty_str); } } pr_cont("\n"); } /** * pcpu_setup_first_chunk - initialize the first percpu chunk * @ai: pcpu_alloc_info describing how to percpu area is shaped * @base_addr: mapped address * * Initialize the first percpu chunk which contains the kernel static * percpu area. This function is to be called from arch percpu area * setup path. * * @ai contains all information necessary to initialize the first * chunk and prime the dynamic percpu allocator. * * @ai->static_size is the size of static percpu area. * * @ai->reserved_size, if non-zero, specifies the amount of bytes to * reserve after the static area in the first chunk. This reserves * the first chunk such that it's available only through reserved * percpu allocation. This is primarily used to serve module percpu * static areas on architectures where the addressing model has * limited offset range for symbol relocations to guarantee module * percpu symbols fall inside the relocatable range. * * @ai->dyn_size determines the number of bytes available for dynamic * allocation in the first chunk. The area between @ai->static_size + * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. * * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE * and equal to or larger than @ai->static_size + @ai->reserved_size + * @ai->dyn_size. * * @ai->atom_size is the allocation atom size and used as alignment * for vm areas. * * @ai->alloc_size is the allocation size and always multiple of * @ai->atom_size. This is larger than @ai->atom_size if * @ai->unit_size is larger than @ai->atom_size. * * @ai->nr_groups and @ai->groups describe virtual memory layout of * percpu areas. Units which should be colocated are put into the * same group. Dynamic VM areas will be allocated according to these * groupings. If @ai->nr_groups is zero, a single group containing * all units is assumed. * * The caller should have mapped the first chunk at @base_addr and * copied static data to each unit. * * The first chunk will always contain a static and a dynamic region. * However, the static region is not managed by any chunk. If the first * chunk also contains a reserved region, it is served by two chunks - * one for the reserved region and one for the dynamic region. They * share the same vm, but use offset regions in the area allocation map. * The chunk serving the dynamic region is circulated in the chunk slots * and available for dynamic allocation like any other chunk. */ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, void *base_addr) { size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; size_t static_size, dyn_size; unsigned long *group_offsets; size_t *group_sizes; unsigned long *unit_off; unsigned int cpu; int *unit_map; int group, unit, i; unsigned long tmp_addr; size_t alloc_size; #define PCPU_SETUP_BUG_ON(cond) do { \ if (unlikely(cond)) { \ pr_emerg("failed to initialize, %s\n", #cond); \ pr_emerg("cpu_possible_mask=%*pb\n", \ cpumask_pr_args(cpu_possible_mask)); \ pcpu_dump_alloc_info(KERN_EMERG, ai); \ BUG(); \ } \ } while (0) /* sanity checks */ PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); #ifdef CONFIG_SMP PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); #endif PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); /* process group information and build config tables accordingly */ alloc_size = ai->nr_groups * sizeof(group_offsets[0]); group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!group_offsets) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); alloc_size = ai->nr_groups * sizeof(group_sizes[0]); group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!group_sizes) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); alloc_size = nr_cpu_ids * sizeof(unit_map[0]); unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!unit_map) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); alloc_size = nr_cpu_ids * sizeof(unit_off[0]); unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); if (!unit_off) panic("%s: Failed to allocate %zu bytes\n", __func__, alloc_size); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; pcpu_low_unit_cpu = NR_CPUS; pcpu_high_unit_cpu = NR_CPUS; for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { const struct pcpu_group_info *gi = &ai->groups[group]; group_offsets[group] = gi->base_offset; group_sizes[group] = gi->nr_units * ai->unit_size; for (i = 0; i < gi->nr_units; i++) { cpu = gi->cpu_map[i]; if (cpu == NR_CPUS) continue; PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); unit_map[cpu] = unit + i; unit_off[cpu] = gi->base_offset + i * ai->unit_size; /* determine low/high unit_cpu */ if (pcpu_low_unit_cpu == NR_CPUS || unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) pcpu_low_unit_cpu = cpu; if (pcpu_high_unit_cpu == NR_CPUS || unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) pcpu_high_unit_cpu = cpu; } } pcpu_nr_units = unit; for_each_possible_cpu(cpu) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); /* we're done parsing the input, undefine BUG macro and dump config */ #undef PCPU_SETUP_BUG_ON pcpu_dump_alloc_info(KERN_DEBUG, ai); pcpu_nr_groups = ai->nr_groups; pcpu_group_offsets = group_offsets; pcpu_group_sizes = group_sizes; pcpu_unit_map = unit_map; pcpu_unit_offsets = unit_off; /* determine basic parameters */ pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; pcpu_atom_size = ai->atom_size; pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated, BITS_TO_LONGS(pcpu_unit_pages)); pcpu_stats_save_ai(ai); /* * Allocate chunk slots. The slots after the active slots are: * sidelined_slot - isolated, depopulated chunks * free_slot - fully free chunks * to_depopulate_slot - isolated, chunks to depopulate */ pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; pcpu_free_slot = pcpu_sidelined_slot + 1; pcpu_to_depopulate_slot = pcpu_free_slot + 1; pcpu_nr_slots = pcpu_to_depopulate_slot + 1; pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]), SMP_CACHE_BYTES); if (!pcpu_chunk_lists) panic("%s: Failed to allocate %zu bytes\n", __func__, pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_chunk_lists[i]); /* * The end of the static region needs to be aligned with the * minimum allocation size as this offsets the reserved and * dynamic region. The first chunk ends page aligned by * expanding the dynamic region, therefore the dynamic region * can be shrunk to compensate while still staying above the * configured sizes. */ static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); dyn_size = ai->dyn_size - (static_size - ai->static_size); /* * Initialize first chunk: * This chunk is broken up into 3 parts: * < static | [reserved] | dynamic > * - static - there is no backing chunk because these allocations can * never be freed. * - reserved (pcpu_reserved_chunk) - exists primarily to serve * allocations from module load. * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first * chunk. */ tmp_addr = (unsigned long)base_addr + static_size; if (ai->reserved_size) pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr, ai->reserved_size); tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size; pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size); pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; pcpu_chunk_relocate(pcpu_first_chunk, -1); /* include all regions of the first chunk */ pcpu_nr_populated += PFN_DOWN(size_sum); pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(base_addr); /* we're done */ pcpu_base_addr = base_addr; } #ifdef CONFIG_SMP const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { [PCPU_FC_AUTO] = "auto", [PCPU_FC_EMBED] = "embed", [PCPU_FC_PAGE] = "page", }; enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; static int __init percpu_alloc_setup(char *str) { if (!str) return -EINVAL; if (0) /* nada */; #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK else if (!strcmp(str, "embed")) pcpu_chosen_fc = PCPU_FC_EMBED; #endif #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK else if (!strcmp(str, "page")) pcpu_chosen_fc = PCPU_FC_PAGE; #endif else pr_warn("unknown allocator %s specified\n", str); return 0; } early_param("percpu_alloc", percpu_alloc_setup); /* * pcpu_embed_first_chunk() is used by the generic percpu setup. * Build it if needed by the arch config or the generic setup is going * to be used. */ #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) #define BUILD_EMBED_FIRST_CHUNK #endif /* build pcpu_page_first_chunk() iff needed by the arch config */ #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) #define BUILD_PAGE_FIRST_CHUNK #endif /* pcpu_build_alloc_info() is used by both embed and page first chunk */ #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) /** * pcpu_build_alloc_info - build alloc_info considering distances between CPUs * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * * This function determines grouping of units, their mappings to cpus * and other parameters considering needed percpu size, allocation * atom size and distances between CPUs. * * Groups are always multiples of atom size and CPUs which are of * LOCAL_DISTANCE both ways are grouped together and share space for * units in the same group. The returned configuration is guaranteed * to have CPUs on different nodes on different groups and >=75% usage * of allocated virtual address space. * * RETURNS: * On success, pointer to the new allocation_info is returned. On * failure, ERR_PTR value is returned. */ static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn) { static int group_map[NR_CPUS] __initdata; static int group_cnt[NR_CPUS] __initdata; static struct cpumask mask __initdata; const size_t static_size = __per_cpu_end - __per_cpu_start; int nr_groups = 1, nr_units = 0; size_t size_sum, min_unit_size, alloc_size; int upa, max_upa, best_upa; /* units_per_alloc */ int last_allocs, group, unit; unsigned int cpu, tcpu; struct pcpu_alloc_info *ai; unsigned int *cpu_map; /* this function may be called multiple times */ memset(group_map, 0, sizeof(group_map)); memset(group_cnt, 0, sizeof(group_cnt)); cpumask_clear(&mask); /* calculate size_sum and ensure dyn_size is enough for early alloc */ size_sum = PFN_ALIGN(static_size + reserved_size + max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); dyn_size = size_sum - static_size - reserved_size; /* * Determine min_unit_size, alloc_size and max_upa such that * alloc_size is multiple of atom_size and is the smallest * which can accommodate 4k aligned segments which are equal to * or larger than min_unit_size. */ min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); /* determine the maximum # of units that can fit in an allocation */ alloc_size = roundup(min_unit_size, atom_size); upa = alloc_size / min_unit_size; while (alloc_size % upa || (offset_in_page(alloc_size / upa))) upa--; max_upa = upa; cpumask_copy(&mask, cpu_possible_mask); /* group cpus according to their proximity */ for (group = 0; !cpumask_empty(&mask); group++) { /* pop the group's first cpu */ cpu = cpumask_first(&mask); group_map[cpu] = group; group_cnt[group]++; cpumask_clear_cpu(cpu, &mask); for_each_cpu(tcpu, &mask) { if (!cpu_distance_fn || (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { group_map[tcpu] = group; group_cnt[group]++; cpumask_clear_cpu(tcpu, &mask); } } } nr_groups = group; /* * Wasted space is caused by a ratio imbalance of upa to group_cnt. * Expand the unit_size until we use >= 75% of the units allocated. * Related to atom_size, which could be much larger than the unit_size. */ last_allocs = INT_MAX; best_upa = 0; for (upa = max_upa; upa; upa--) { int allocs = 0, wasted = 0; if (alloc_size % upa || (offset_in_page(alloc_size / upa))) continue; for (group = 0; group < nr_groups; group++) { int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); allocs += this_allocs; wasted += this_allocs * upa - group_cnt[group]; } /* * Don't accept if wastage is over 1/3. The * greater-than comparison ensures upa==1 always * passes the following check. */ if (wasted > num_possible_cpus() / 3) continue; /* and then don't consume more memory */ if (allocs > last_allocs) break; last_allocs = allocs; best_upa = upa; } BUG_ON(!best_upa); upa = best_upa; /* allocate and fill alloc_info */ for (group = 0; group < nr_groups; group++) nr_units += roundup(group_cnt[group], upa); ai = pcpu_alloc_alloc_info(nr_groups, nr_units); if (!ai) return ERR_PTR(-ENOMEM); cpu_map = ai->groups[0].cpu_map; for (group = 0; group < nr_groups; group++) { ai->groups[group].cpu_map = cpu_map; cpu_map += roundup(group_cnt[group], upa); } ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = alloc_size / upa; ai->atom_size = atom_size; ai->alloc_size = alloc_size; for (group = 0, unit = 0; group < nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; /* * Initialize base_offset as if all groups are located * back-to-back. The caller should update this to * reflect actual allocation. */ gi->base_offset = unit * ai->unit_size; for_each_possible_cpu(cpu) if (group_map[cpu] == group) gi->cpu_map[gi->nr_units++] = cpu; gi->nr_units = roundup(gi->nr_units, upa); unit += gi->nr_units; } BUG_ON(unit != nr_units); return ai; } static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NUMA int node = NUMA_NO_NODE; void *ptr; if (cpu_to_nd_fn) node = cpu_to_nd_fn(cpu); if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) { ptr = memblock_alloc_from(size, align, goal); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n", cpu, size, (u64)__pa(ptr)); } else { ptr = memblock_alloc_try_nid(size, align, goal, MEMBLOCK_ALLOC_ACCESSIBLE, node); pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n", cpu, size, node, (u64)__pa(ptr)); } return ptr; #else return memblock_alloc_from(size, align, goal); #endif } static void __init pcpu_fc_free(void *ptr, size_t size) { memblock_free(ptr, size); } #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ #if defined(BUILD_EMBED_FIRST_CHUNK) /** * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem * @reserved_size: the size of reserved percpu area in bytes * @dyn_size: minimum free size for dynamic allocation in bytes * @atom_size: allocation atom size * @cpu_distance_fn: callback to determine distance between cpus, optional * @cpu_to_nd_fn: callback to convert cpu to it's node, optional * * This is a helper to ease setting up embedded first percpu chunk and * can be called where pcpu_setup_first_chunk() is expected. * * If this function is used to setup the first chunk, it is allocated * by calling pcpu_fc_alloc and used as-is without being mapped into * vmalloc area. Allocations are always whole multiples of @atom_size * aligned to @atom_size. * * This enables the first chunk to piggy back on the linear physical * mapping which often uses larger page size. Please note that this * can result in very sparse cpu->unit mapping on NUMA machines thus * requiring large vmalloc address space. Don't use this allocator if * vmalloc space is not orders of magnitude larger than distances * between node memory addresses (ie. 32bit NUMA machines). * * @dyn_size specifies the minimum dynamic area size. * * If the needed size is smaller than the minimum or specified unit * size, the leftover is returned using pcpu_fc_free. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, size_t atom_size, pcpu_fc_cpu_distance_fn_t cpu_distance_fn, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { void *base = (void *)ULONG_MAX; void **areas = NULL; struct pcpu_alloc_info *ai; size_t size_sum, areas_size; unsigned long max_distance; int group, i, highest_group, rc = 0; ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, cpu_distance_fn); if (IS_ERR(ai)) return PTR_ERR(ai); size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); if (!areas) { rc = -ENOMEM; goto out_free; } /* allocate, copy and determine base address & max_distance */ highest_group = 0; for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; unsigned int cpu = NR_CPUS; void *ptr; for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) cpu = gi->cpu_map[i]; BUG_ON(cpu == NR_CPUS); /* allocate space for the whole group */ ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn); if (!ptr) { rc = -ENOMEM; goto out_free_areas; } /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(ptr)); areas[group] = ptr; base = min(ptr, base); if (ptr > areas[highest_group]) highest_group = group; } max_distance = areas[highest_group] - base; max_distance += ai->unit_size * ai->groups[highest_group].nr_units; /* warn if maximum distance is further than 75% of vmalloc space */ if (max_distance > VMALLOC_TOTAL * 3 / 4) { pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", max_distance, VMALLOC_TOTAL); #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK /* and fail if we have fallback */ rc = -EINVAL; goto out_free_areas; #endif } /* * Copy data and free unused parts. This should happen after all * allocations are complete; otherwise, we may end up with * overlapping groups. */ for (group = 0; group < ai->nr_groups; group++) { struct pcpu_group_info *gi = &ai->groups[group]; void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { /* unused unit, free whole */ pcpu_fc_free(ptr, ai->unit_size); continue; } /* copy and return the unused part */ memcpy(ptr, __per_cpu_load, ai->static_size); pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum); } } /* base address is now known, determine group base offsets */ for (group = 0; group < ai->nr_groups; group++) { ai->groups[group].base_offset = areas[group] - base; } pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, ai->dyn_size, ai->unit_size); pcpu_setup_first_chunk(ai, base); goto out_free; out_free_areas: for (group = 0; group < ai->nr_groups; group++) if (areas[group]) pcpu_fc_free(areas[group], ai->groups[group].nr_units * ai->unit_size); out_free: pcpu_free_alloc_info(ai); if (areas) memblock_free(areas, areas_size); return rc; } #endif /* BUILD_EMBED_FIRST_CHUNK */ #ifdef BUILD_PAGE_FIRST_CHUNK #include <asm/pgalloc.h> #ifndef P4D_TABLE_SIZE #define P4D_TABLE_SIZE PAGE_SIZE #endif #ifndef PUD_TABLE_SIZE #define PUD_TABLE_SIZE PAGE_SIZE #endif #ifndef PMD_TABLE_SIZE #define PMD_TABLE_SIZE PAGE_SIZE #endif #ifndef PTE_TABLE_SIZE #define PTE_TABLE_SIZE PAGE_SIZE #endif void __init __weak pcpu_populate_pte(unsigned long addr) { pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; if (pgd_none(*pgd)) { p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE); if (!p4d) goto err_alloc; pgd_populate(&init_mm, pgd, p4d); } p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); if (!pud) goto err_alloc; p4d_populate(&init_mm, p4d, pud); } pud = pud_offset(p4d, addr); if (pud_none(*pud)) { pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); if (!pmd) goto err_alloc; pud_populate(&init_mm, pud, pmd); } pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) { pte_t *new; new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); if (!new) goto err_alloc; pmd_populate_kernel(&init_mm, pmd, new); } return; err_alloc: panic("%s: Failed to allocate memory\n", __func__); } /** * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages * @reserved_size: the size of reserved percpu area in bytes * @cpu_to_nd_fn: callback to convert cpu to it's node, optional * * This is a helper to ease setting up page-remapped first percpu * chunk and can be called where pcpu_setup_first_chunk() is expected. * * This is the basic allocator. Static percpu area is allocated * page-by-page into vmalloc area. * * RETURNS: * 0 on success, -errno on failure. */ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn) { static struct vm_struct vm; struct pcpu_alloc_info *ai; char psize_str[16]; int unit_pages; size_t pages_size; struct page **pages; int unit, i, j, rc = 0; int upa; int nr_g0_units; snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); if (IS_ERR(ai)) return PTR_ERR(ai); BUG_ON(ai->nr_groups != 1); upa = ai->alloc_size/ai->unit_size; nr_g0_units = roundup(num_possible_cpus(), upa); if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { pcpu_free_alloc_info(ai); return -EINVAL; } unit_pages = ai->unit_size >> PAGE_SHIFT; /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); if (!pages) panic("%s: Failed to allocate %zu bytes\n", __func__, pages_size); /* allocate pages */ j = 0; for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned int cpu = ai->groups[0].cpu_map[unit]; for (i = 0; i < unit_pages; i++) { void *ptr; ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn); if (!ptr) { pr_warn("failed to allocate %s page for cpu%u\n", psize_str, cpu); goto enomem; } /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(ptr)); pages[j++] = virt_to_page(ptr); } } /* allocate vm area, map the pages and copy static data */ vm.flags = VM_ALLOC; vm.size = num_possible_cpus() * ai->unit_size; vm_area_register_early(&vm, PAGE_SIZE); for (unit = 0; unit < num_possible_cpus(); unit++) { unsigned long unit_addr = (unsigned long)vm.addr + unit * ai->unit_size; for (i = 0; i < unit_pages; i++) pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT)); /* pte already populated, the following shouldn't fail */ rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], unit_pages); if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); /* * FIXME: Archs with virtual cache should flush local * cache for the linear mapping here - something * equivalent to flush_cache_vmap() on the local cpu. * flush_cache_vmap() can't be used as most supporting * data structures are not set up yet. */ /* copy static data */ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); } /* we're ready, commit */ pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", unit_pages, psize_str, ai->static_size, ai->reserved_size, ai->dyn_size); pcpu_setup_first_chunk(ai, vm.addr); goto out_free_ar; enomem: while (--j >= 0) pcpu_fc_free(page_address(pages[j]), PAGE_SIZE); rc = -ENOMEM; out_free_ar: memblock_free(pages, pages_size); pcpu_free_alloc_info(ai); return rc; } #endif /* BUILD_PAGE_FIRST_CHUNK */ #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA /* * Generic SMP percpu area setup. * * The embedding helper is used because its behavior closely resembles * the original non-dynamic generic percpu area setup. This is * important because many archs have addressing restrictions and might * fail if the percpu area is located far away from the previous * location. As an added bonus, in non-NUMA cases, embedding is * generally a good idea TLB-wise because percpu area can piggy back * on the physical linear memory mapping which uses large page * mappings on applicable archs. */ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; int rc; /* * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, NULL); if (rc < 0) panic("Failed to initialize percpu areas."); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; } #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #else /* CONFIG_SMP */ /* * UP percpu area setup. * * UP always uses km-based percpu allocator with identity mapping. * Static percpu variables are indistinguishable from the usual static * variables and don't require any special preparation. */ void __init setup_per_cpu_areas(void) { const size_t unit_size = roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, PERCPU_DYNAMIC_RESERVE)); struct pcpu_alloc_info *ai; void *fc; ai = pcpu_alloc_alloc_info(1, 1); fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); /* kmemleak tracks the percpu allocations separately */ kmemleak_ignore_phys(__pa(fc)); ai->dyn_size = unit_size; ai->unit_size = unit_size; ai->atom_size = unit_size; ai->alloc_size = unit_size; ai->groups[0].nr_units = 1; ai->groups[0].cpu_map[0] = 0; pcpu_setup_first_chunk(ai, fc); pcpu_free_alloc_info(ai); } #endif /* CONFIG_SMP */ /* * pcpu_nr_pages - calculate total number of populated backing pages * * This reflects the number of pages populated to back chunks. Metadata is * excluded in the number exposed in meminfo as the number of backing pages * scales with the number of cpus and can quickly outweigh the memory used for * metadata. It also keeps this calculation nice and simple. * * RETURNS: * Total number of populated backing pages in use by the allocator. */ unsigned long pcpu_nr_pages(void) { return pcpu_nr_populated * pcpu_nr_units; } /* * Percpu allocator is initialized early during boot when neither slab or * workqueue is available. Plug async management until everything is up * and running. */ static int __init percpu_enable_async(void) { pcpu_async_enabled = true; return 0; } subsys_initcall(percpu_enable_async);
linux-master
mm/percpu.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/memory.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus */ /* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. */ /* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root. */ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh ([email protected]) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * ([email protected]) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/memremap.h> #include <linux/kmsan.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/delayacct.h> #include <linux/init.h> #include <linux/pfn_t.h> #include <linux/writeback.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/swapops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/migrate.h> #include <linux/string.h> #include <linux/memory-tiers.h> #include <linux/debugfs.h> #include <linux/userfaultfd_k.h> #include <linux/dax.h> #include <linux/oom.h> #include <linux/numa.h> #include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/vmalloc.h> #include <linux/sched/sysctl.h> #include <trace/events/kmem.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include "pgalloc-track.h" #include "internal.h" #include "swap.h" #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif #ifndef CONFIG_NUMA unsigned long max_mapnr; EXPORT_SYMBOL(max_mapnr); struct page *mem_map; EXPORT_SYMBOL(mem_map); #endif static vm_fault_t do_fault(struct vm_fault *vmf); static vm_fault_t do_anonymous_page(struct vm_fault *vmf); static bool vmf_pte_changed(struct vm_fault *vmf); /* * Return true if the original pte was a uffd-wp pte marker (so the pte was * wr-protected). */ static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) { if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) return false; return pte_marker_uffd_wp(vmf->orig_pte); } /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL * and ZONE_HIGHMEM. */ void *high_memory; EXPORT_SYMBOL(high_memory); /* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. ) */ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK 1; #else 2; #endif #ifndef arch_wants_old_prefaulted_pte static inline bool arch_wants_old_prefaulted_pte(void) { /* * Transitioning a PTE from 'old' to 'young' can be expensive on * some architectures, even if it's performed in hardware. By * default, "false" means prefaulted entries will be 'young'. */ return false; } #endif static int __init disable_randmaps(char *s) { randomize_va_space = 0; return 1; } __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; EXPORT_SYMBOL(zero_pfn); unsigned long highest_memmap_pfn __read_mostly; /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ static int __init init_zero_pfn(void) { zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } early_initcall(init_zero_pfn); void mm_trace_rss_stat(struct mm_struct *mm, int member) { trace_rss_stat(mm, member); } /* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); } static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); mm_dec_nr_pmds(tlb->mm); } static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= P4D_MASK; if (start < floor) return; if (ceiling) { ceiling &= P4D_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(p4d, start); p4d_clear(p4d); pud_free_tlb(tlb, pud, start); mm_dec_nr_puds(tlb->mm); } static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { p4d_t *p4d; unsigned long next; unsigned long start; start = addr; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; free_pud_range(tlb, p4d, addr, next, floor, ceiling); } while (p4d++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; p4d = p4d_offset(pgd, start); pgd_clear(pgd); p4d_free_tlb(tlb, p4d, start); } /* * This function frees user-level page tables of a process. */ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that. */ addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; /* * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need */ tlb_change_page_size(tlb, PAGE_SIZE); pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; free_p4d_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling, bool mm_wr_locked) { do { unsigned long addr = vma->vm_start; struct vm_area_struct *next; /* * Note: USER_PGTABLES_CEILING may be passed as ceiling and may * be 0. This will underflow and is okay. */ next = mas_find(mas, ceiling - 1); /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ if (mm_wr_locked) vma_start_write(vma); unlink_anon_vmas(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } else { /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = mas_find(mas, ceiling - 1); if (mm_wr_locked) vma_start_write(vma); unlink_anon_vmas(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } vma = next; } while (vma); } void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) { spinlock_t *ptl = pmd_lock(mm, pmd); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ mm_inc_nr_ptes(mm); /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_rmb() barriers in page table walking code. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ pmd_populate(mm, pmd, *pte); *pte = NULL; } spin_unlock(ptl); } int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) { pgtable_t new = pte_alloc_one(mm); if (!new) return -ENOMEM; pmd_install(mm, pmd, &new); if (new) pte_free(mm, new); return 0; } int __pte_alloc_kernel(pmd_t *pmd) { pte_t *new = pte_alloc_one_kernel(&init_mm); if (!new) return -ENOMEM; spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ smp_wmb(); /* See comment in pmd_install() */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; } spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); return 0; } static inline void init_rss_vec(int *rss) { memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); } static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) { int i; if (current->mm == mm) sync_mm_rss(mm); for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]); } /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error. */ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { pr_alert("BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page, "bad pte"); pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, mapping ? mapping->a_ops->read_folio : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } /* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The disadvantage is that pages are refcounted * (which can be slower and simply not an option for some PFNMAP users). The * advantage is that we don't have to follow the strict linearity rule of * PFNMAP mappings in order to support COWable mappings. * */ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; if (pte_devmap(pte)) /* * NOTE: New users of ZONE_DEVICE will not set pte_devmap() * and will have refcounts incremented on their struct pages * when they are inserted into PTEs, thus they are safe to * return here. Legacy ZONE_DEVICE pages that set pte_devmap() * do not have refcounts. Example of legacy ZONE_DEVICE is * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. */ return NULL; print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { struct page *page = vm_normal_page(vma, addr, pte); if (page) return page_folio(page); return NULL; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) { unsigned long pfn = pmd_pfn(pmd); /* * There is no pmd_special() but there may be special pmds, e.g. * in a direct-access (dax) mapping, so let's just replicate the * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (pmd_devmap(pmd)) return NULL; if (is_huge_zero_pmd(pmd)) return NULL; if (unlikely(pfn > highest_memmap_pfn)) return NULL; /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } #endif static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { pte_t orig_pte; pte_t pte; swp_entry_t entry; orig_pte = ptep_get(ptep); pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); entry = pte_to_swp_entry(orig_pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); /* * No need to take a page reference as one was already * created when the swap entry was made. */ if (PageAnon(page)) page_add_anon_rmap(page, vma, address, RMAP_NONE); else /* * Currently device exclusive access only supports anonymous * memory so the entry shouldn't point to a filebacked page. */ WARN_ON_ONCE(1); set_pte_at(vma->vm_mm, address, ptep, pte); /* * No need to invalidate - it was non-present before. However * secondary CPUs may have mappings that need invalidating. */ update_mmu_cache(vma, address, ptep); } /* * Tries to restore an exclusive pte if the page lock can be acquired without * sleeping. */ static int try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr) { swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); struct page *page = pfn_swap_entry_to_page(entry); if (trylock_page(page)) { restore_exclusive_pte(vma, page, addr, src_pte); unlock_page(page); return 0; } return -EBUSY; } /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma. */ static unsigned long copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long addr, int *rss) { unsigned long vm_flags = dst_vma->vm_flags; pte_t orig_pte = ptep_get(src_pte); pte_t pte = orig_pte; struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) return -EIO; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist)) list_add(&dst_mm->mmlist, &src_mm->mmlist); spin_unlock(&mmlist_lock); } /* Mark the swap entry as shared. */ if (pte_swp_exclusive(orig_pte)) { pte = pte_swp_clear_exclusive(orig_pte); set_pte_at(src_mm, addr, src_pte, pte); } rss[MM_SWAPENTS]++; } else if (is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); rss[mm_counter(page)]++; if (!is_readable_migration_entry(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both parent and child * to be set to read. A previously exclusive entry is * now shared. */ entry = make_readable_migration_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } else if (is_device_private_entry(entry)) { page = pfn_swap_entry_to_page(entry); /* * Update rss count even for unaddressable pages, as * they should treated just like normal pages in this * respect. * * We will likely want to have some new rss counters * for unaddressable pages, at some point. But for now * keep things as they are. */ get_page(page); rss[mm_counter(page)]++; /* Cannot fail as these pages cannot get pinned. */ BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); /* * We do not preserve soft-dirty information, because so * far, checkpoint/restore is the only feature that * requires that. And checkpoint/restore does not work * when a device driver is involved (you cannot easily * save and restore device driver state). */ if (is_writable_device_private_entry(entry) && is_cow_mapping(vm_flags)) { entry = make_readable_device_private_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } else if (is_device_exclusive_entry(entry)) { /* * Make device exclusive entries present by restoring the * original entry then copying as for a present pte. Device * exclusive entries currently only support private writable * (ie. COW) mappings. */ VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); if (try_restore_exclusive_pte(src_pte, src_vma, addr)) return -EBUSY; return -ENOENT; } else if (is_pte_marker_entry(entry)) { pte_marker marker = copy_pte_marker(entry, dst_vma); if (marker) set_pte_at(dst_mm, addr, dst_pte, make_pte_marker(marker)); return 0; } if (!userfaultfd_wp(dst_vma)) pte = pte_swp_clear_uffd_wp(pte); set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } /* * Copy a present and normal page. * * NOTE! The usual case is that this isn't required; * instead, the caller can just increase the page refcount * and re-use the pte the traditional way. * * And if we need a pre-allocated page but don't yet have * one, return a negative error to let the preallocation * code know so that it can do so outside the page table * lock. */ static inline int copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct folio **prealloc, struct page *page) { struct folio *new_folio; pte_t pte; new_folio = *prealloc; if (!new_folio) return -EAGAIN; /* * We have a prealloc page, all good! Take it * over and copy the page & arm it. */ *prealloc = NULL; copy_user_highpage(&new_folio->page, page, addr, src_vma); __folio_mark_uptodate(new_folio); folio_add_new_anon_rmap(new_folio, dst_vma, addr); folio_add_lru_vma(new_folio, dst_vma); rss[MM_ANONPAGES]++; /* All done, just insert the new page copy in the child */ pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) /* Uffd-wp needs to be delivered to dest pte as well */ pte = pte_mkuffd_wp(pte); set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); return 0; } /* * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page * is required to copy this pte. */ static inline int copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct folio **prealloc) { struct mm_struct *src_mm = src_vma->vm_mm; unsigned long vm_flags = src_vma->vm_flags; pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; page = vm_normal_page(src_vma, addr, pte); if (page) folio = page_folio(page); if (page && folio_test_anon(folio)) { /* * If this page may have been pinned by the parent process, * copy the page immediately for the child so that we'll always * guarantee the pinned page won't be randomly replaced in the * future. */ folio_get(folio); if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, addr, rss, prealloc, page); } rss[MM_ANONPAGES]++; } else if (page) { folio_get(folio); page_dup_file_rmap(page, false); rss[mm_counter_file(page)]++; } /* * If it's a COW mapping, write protect it both * in the parent and the child */ if (is_cow_mapping(vm_flags) && pte_write(pte)) { ptep_set_wrprotect(src_mm, addr, src_pte); pte = pte_wrprotect(pte); } VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page)); /* * If it's a shared mapping, mark it clean in * the child */ if (vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); if (!userfaultfd_wp(dst_vma)) pte = pte_clear_uffd_wp(pte); set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); return 0; } static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, unsigned long addr) { struct folio *new_folio; new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); if (!new_folio) return NULL; if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { folio_put(new_folio); return NULL; } folio_throttle_swaprate(new_folio, GFP_KERNEL); return new_folio; } static int copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; pte_t ptent; spinlock_t *src_ptl, *dst_ptl; int progress, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; struct folio *prealloc = NULL; again: progress = 0; init_rss_vec(rss); /* * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the * error handling here, assume that exclusive mmap_lock on dst and src * protects anon from unexpected THP transitions; with shmem and file * protected by mmap_lock-less collapse skipping areas with anon_vma * (whereas vma_needs_copy() skips areas without anon_vma). A rework * can remove such assumptions later, but this is good enough for now. */ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) { ret = -ENOMEM; goto out; } src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl); if (!src_pte) { pte_unmap_unlock(dst_pte, dst_ptl); /* ret == 0 */ goto out; } spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; arch_enter_lazy_mmu_mode(); do { /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ if (progress >= 32) { progress = 0; if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } ptent = ptep_get(src_pte); if (pte_none(ptent)) { progress++; continue; } if (unlikely(!pte_present(ptent))) { ret = copy_nonpresent_pte(dst_mm, src_mm, dst_pte, src_pte, dst_vma, src_vma, addr, rss); if (ret == -EIO) { entry = pte_to_swp_entry(ptep_get(src_pte)); break; } else if (ret == -EBUSY) { break; } else if (!ret) { progress += 8; continue; } /* * Device exclusive entry restored, continue by copying * the now present pte. */ WARN_ON_ONCE(ret != -ENOENT); } /* copy_present_pte() will clear `*prealloc' if consumed */ ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, addr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. */ if (unlikely(ret == -EAGAIN)) break; if (unlikely(prealloc)) { /* * pre-alloc page cannot be reused by next time so as * to strictly follow mempolicy (e.g., alloc_page_vma() * will allocate page according to address). This * could only happen if one pinned pte changed. */ folio_put(prealloc); prealloc = NULL; } progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(orig_src_pte, src_ptl); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); if (ret == -EIO) { VM_WARN_ON_ONCE(!entry.val); if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { ret = -ENOMEM; goto out; } entry.val = 0; } else if (ret == -EBUSY) { goto out; } else if (ret == -EAGAIN) { prealloc = page_copy_prealloc(src_mm, src_vma, addr); if (!prealloc) return -ENOMEM; } else if (ret) { VM_WARN_ON_ONCE(1); } /* We've captured and resolved the error. Reset, try again. */ ret = 0; if (addr != end) goto again; out: if (unlikely(prealloc)) folio_put(prealloc); return ret; } static inline int copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pmd_t *src_pmd, *dst_pmd; unsigned long next; dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd) return -ENOMEM; src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { int err; VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, dst_vma, src_vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static inline int copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pud_t *src_pud, *dst_pud; unsigned long next; dst_pud = pud_alloc(dst_mm, dst_p4d, addr); if (!dst_pud) return -ENOMEM; src_pud = pud_offset(src_p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { int err; VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); err = copy_huge_pud(dst_mm, src_mm, dst_pud, src_pud, addr, src_vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } static inline int copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; p4d_t *src_p4d, *dst_p4d; unsigned long next; dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); if (!dst_p4d) return -ENOMEM; src_p4d = p4d_offset(src_pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(src_p4d)) continue; if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, addr, next)) return -ENOMEM; } while (dst_p4d++, src_p4d++, addr = next, addr != end); return 0; } /* * Return true if the vma needs to copy the pgtable during this fork(). Return * false when we can speed up fork() by allowing lazy page faults later until * when the child accesses the memory range. */ static bool vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { /* * Always copy pgtables when dst_vma has uffd-wp enabled even if it's * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable * contains uffd-wp protection information, that's something we can't * retrieve from page cache, and skip copying will lose those info. */ if (userfaultfd_wp(dst_vma)) return true; if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return true; if (src_vma->anon_vma) return true; /* * Don't copy ptes where a page fault will fill them correctly. Fork * becomes much lighter when there are big shared or private readonly * mappings. The tradeoff is that copy_page_range is more efficient * than faulting. */ return false; } int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pgd_t *src_pgd, *dst_pgd; unsigned long next; unsigned long addr = src_vma->vm_start; unsigned long end = src_vma->vm_end; struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; bool is_cow; int ret; if (!vma_needs_copy(dst_vma, src_vma)) return 0; if (is_vm_hugetlb_page(src_vma)) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { /* * We do not free on error cases below as remove_vma * gets called on error from higher level routine */ ret = track_pfn_copy(src_vma); if (ret) return ret; } /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true. */ is_cow = is_cow_mapping(src_vma->vm_flags); if (is_cow) { mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, src_mm, addr, end); mmu_notifier_invalidate_range_start(&range); /* * Disabling preemption is not needed for the write side, as * the read side doesn't spin, but goes to the mmap_lock. * * Use the raw variant of the seqcount_t write API to avoid * lockdep complaining about preemptibility. */ vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src_mm->write_protect_seq); } ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, addr, next))) { untrack_pfn_clear(dst_vma); ret = -ENOMEM; break; } } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow) { raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } return ret; } /* Whether we should zap all COWed (private) pages too */ static inline bool should_zap_cows(struct zap_details *details) { /* By default, zap all pages */ if (!details) return true; /* Or, we zap COWed pages only if the caller wants to */ return details->even_cows; } /* Decides whether we should zap this page with the page pointer specified */ static inline bool should_zap_page(struct zap_details *details, struct page *page) { /* If we can make a decision without *page.. */ if (should_zap_cows(details)) return true; /* E.g. the caller passes NULL for the case of a zero page */ if (!page) return true; /* Otherwise we should only zap non-anon pages */ return !PageAnon(page); } static inline bool zap_drop_file_uffd_wp(struct zap_details *details) { if (!details) return false; return details->zap_flags & ZAP_FLAG_DROP_MARKER; } /* * This function makes sure that we'll replace the none pte with an uffd-wp * swap special pte marker when necessary. Must be with the pgtable lock held. */ static inline void zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, struct zap_details *details, pte_t pteval) { /* Zap on anonymous always means dropping everything */ if (vma_is_anonymous(vma)) return; if (zap_drop_file_uffd_wp(details)) return; pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); } static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { struct mm_struct *mm = tlb->mm; int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; swp_entry_t entry; tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte) return addr; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); struct page *page; if (pte_none(ptent)) continue; if (need_resched()) break; if (pte_present(ptent)) { unsigned int delay_rmap; page = vm_normal_page(vma, addr, ptent); if (unlikely(!should_zap_page(details, page))) continue; ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); if (unlikely(!page)) { ksm_might_unmap_zero_page(mm, ptent); continue; } delay_rmap = 0; if (!PageAnon(page)) { if (pte_dirty(ptent)) { set_page_dirty(page); if (tlb_delay_rmap(tlb)) { delay_rmap = 1; force_flush = 1; } } if (pte_young(ptent) && likely(vma_has_recency(vma))) mark_page_accessed(page); } rss[mm_counter(page)]--; if (!delay_rmap) { page_remove_rmap(page, vma, false); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); } if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { force_flush = 1; addr += PAGE_SIZE; break; } continue; } entry = pte_to_swp_entry(ptent); if (is_device_private_entry(entry) || is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); if (unlikely(!should_zap_page(details, page))) continue; /* * Both device private/exclusive mappings should only * work with anonymous page so far, so we don't need to * consider uffd-wp bit when zap. For more information, * see zap_install_uffd_wp_if_needed(). */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(page)]--; if (is_device_private_entry(entry)) page_remove_rmap(page, vma, false); put_page(page); } else if (!non_swap_entry(entry)) { /* Genuine swap entry, hence a private anon page */ if (!should_zap_cows(details)) continue; rss[MM_SWAPENTS]--; if (unlikely(!free_swap_and_cache(entry))) print_bad_pte(vma, addr, ptent, NULL); } else if (is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); if (!should_zap_page(details, page)) continue; rss[mm_counter(page)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only * drop the marker if explicitly requested. */ if (!vma_is_anonymous(vma) && !zap_drop_file_uffd_wp(details)) continue; } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { if (!should_zap_cows(details)) continue; } else { /* We should have covered all the swap entry types */ WARN_ON_ONCE(1); } pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); } while (pte++, addr += PAGE_SIZE, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); /* Do the actual TLB flush before dropping ptl */ if (force_flush) { tlb_flush_mmu_tlbonly(tlb); tlb_flush_rmaps(tlb, vma); } pte_unmap_unlock(start_pte, ptl); /* * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched * memory too. Come back again if we didn't do everything. */ if (force_flush) tlb_flush_mmu(tlb); return addr; } static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); else if (zap_huge_pmd(tlb, vma, pmd, addr)) { addr = next; continue; } /* fall through */ } else if (details && details->single_folio && folio_test_pmd_mappable(details->single_folio) && next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { spinlock_t *ptl = pmd_lock(tlb->mm, pmd); /* * Take and drop THP pmd lock so that we cannot return * prematurely, while zap_huge_pmd() has cleared *pmd, * but not yet decremented compound_mapcount(). */ spin_unlock(ptl); } if (pmd_none(*pmd)) { addr = next; continue; } addr = zap_pte_range(tlb, vma, pmd, addr, next, details); if (addr != next) pmd--; } while (pmd++, cond_resched(), addr != end); return addr; } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud) || pud_devmap(*pud)) { if (next - addr != HPAGE_PUD_SIZE) { mmap_assert_locked(tlb->mm); split_huge_pud(vma, pud, addr); } else if (zap_huge_pud(tlb, vma, pud, addr)) goto next; /* fall through */ } if (pud_none_or_clear_bad(pud)) continue; next = zap_pmd_range(tlb, vma, pud, addr, next, details); next: cond_resched(); } while (pud++, addr = next, addr != end); return addr; } static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; next = zap_pud_range(tlb, vma, p4d, addr, next, details); } while (p4d++, addr = next, addr != end); return addr; } void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); } static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details, bool mm_wr_locked) { unsigned long start = max(vma->vm_start, start_addr); unsigned long end; if (start >= vma->vm_end) return; end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return; if (vma->vm_file) uprobe_munmap(vma, start, end); if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn(vma, 0, 0, mm_wr_locked); if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of mmap_region. When * hugetlbfs ->mmap method fails, * mmap_region() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case. */ if (vma->vm_file) { zap_flags_t zap_flags = details ? details->zap_flags : 0; __unmap_hugepage_range_final(tlb, vma, start, end, NULL, zap_flags); } } else unmap_page_range(tlb, vma, start, end, details); } } /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @mas: the maple state * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * @tree_end: The maximum index to check * @mm_wr_locked: lock flag * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long tree_end, bool mm_wr_locked) { struct mmu_notifier_range range; struct zap_details details = { .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, /* Careful - we need to zap private pages too! */ .even_cows = true, }; mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(&range); do { unmap_single_vma(tlb, vma, start_addr, end_addr, &details, mm_wr_locked); } while ((vma = mas_find(mas, tree_end - 1)) != NULL); mmu_notifier_invalidate_range_end(&range); } /** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of shared cache invalidation * * The range must fit into one VMA. */ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { const unsigned long end = address + size; struct mmu_notifier_range range; struct mmu_gather tlb; lru_add_drain(); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, end); if (is_vm_hugetlb_page(vma)) adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); tlb_gather_mmu(&tlb, vma->vm_mm); update_hiwater_rss(vma->vm_mm); mmu_notifier_invalidate_range_start(&range); /* * unmap 'address-end' not 'range.start-range.end' as range * could have been expanded for hugetlb pmd sharing. */ unmap_single_vma(&tlb, vma, address, end, details, false); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); } /** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. * */ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { if (!range_in_vma(vma, address, address + size) || !(vma->vm_flags & VM_PFNMAP)) return; zap_page_range_single(vma, address, size, NULL); } EXPORT_SYMBOL_GPL(zap_vma_ptes); static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); return pmd; } pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pmd_t *pmd = walk_to_pmd(mm, addr); if (!pmd) return NULL; return pte_alloc_map_lock(mm, pmd, addr, ptl); } static int validate_page_before_insert(struct page *page) { if (PageAnon(page) || PageSlab(page) || page_has_type(page)) return -EINVAL; flush_dcache_page(page); return 0; } static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ get_page(page); inc_mm_counter(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, vma, false); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } /* * This is the old fallback for page remapping. * * For historical reasons, it only allows reserved pages. Only * old drivers should use this, and they needed to mark their * pages reserved for the old functions anyway. */ static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { int retval; pte_t *pte; spinlock_t *ptl; retval = validate_page_before_insert(page); if (retval) goto out; retval = -ENOMEM; pte = get_locked_pte(vma->vm_mm, addr, &ptl); if (!pte) goto out; retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); pte_unmap_unlock(pte, ptl); out: return retval; } static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { int err; if (!page_count(page)) return -EINVAL; err = validate_page_before_insert(page); if (err) return err; return insert_page_into_pte_locked(vma, pte, addr, page, prot); } /* insert_pages() amortizes the cost of spinlock operations * when inserting pages in a loop. */ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) { pmd_t *pmd = NULL; pte_t *start_pte, *pte; spinlock_t *pte_lock; struct mm_struct *const mm = vma->vm_mm; unsigned long curr_page_idx = 0; unsigned long remaining_pages_total = *num; unsigned long pages_to_write_in_pmd; int ret; more: ret = -EFAULT; pmd = walk_to_pmd(mm, addr); if (!pmd) goto out; pages_to_write_in_pmd = min_t(unsigned long, remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); /* Allocate the PTE if necessary; takes PMD lock once only. */ ret = -ENOMEM; if (pte_alloc(mm, pmd)) goto out; while (pages_to_write_in_pmd) { int pte_idx = 0; const int batch_size = min_t(int, pages_to_write_in_pmd, 8); start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); if (!start_pte) { ret = -EFAULT; goto out; } for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { int err = insert_page_in_batch_locked(vma, pte, addr, pages[curr_page_idx], prot); if (unlikely(err)) { pte_unmap_unlock(start_pte, pte_lock); ret = err; remaining_pages_total -= pte_idx; goto out; } addr += PAGE_SIZE; ++curr_page_idx; } pte_unmap_unlock(start_pte, pte_lock); pages_to_write_in_pmd -= batch_size; remaining_pages_total -= batch_size; } if (remaining_pages_total) goto more; ret = 0; out: *num = remaining_pages_total; return ret; } /** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. * @vma: user vma to map to * @addr: target start user address of these pages * @pages: source kernel pages * @num: in: number of pages to map. out: number of pages that were *not* * mapped. (0 means all pages were successfully mapped). * * Preferred over vm_insert_page() when inserting multiple pages. * * In case of error, we may have mapped a subset of the provided * pages. It is the caller's responsibility to account for this case. * * The same restrictions apply as in vm_insert_page(). */ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) { const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; if (addr < vma->vm_start || end_addr >= vma->vm_end) return -EFAULT; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(mmap_read_trylock(vma->vm_mm)); BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_pages); /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page * * This allows drivers to insert individual pages they've allocated * into a user vma. * * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow * that. Your vma protection will have to be set up correctly, which * means that if you want a shared writable mapping, you'd better * ask for a shared writable mapping! * * The page does not need to be reserved. * * Usually this function is called from f_op->mmap() handler * under mm->mmap_lock write-lock, so it can change vma->vm_flags. * Caller must set VM_MIXEDMAP on vma if it wants to call this * function from other places, for example from page-fault handler. * * Return: %0 on success, negative error code otherwise. */ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!page_count(page)) return -EINVAL; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(mmap_read_trylock(vma->vm_mm)); BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } return insert_page(vma, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); /* * __vm_map_pages - maps range of kernel pages into user vma * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * @offset: user's requested vm_pgoff * * This allows drivers to map range of kernel pages into a user vma. * * Return: 0 on success and error code otherwise. */ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num, unsigned long offset) { unsigned long count = vma_pages(vma); unsigned long uaddr = vma->vm_start; int ret, i; /* Fail if the user requested offset is beyond the end of the object */ if (offset >= num) return -ENXIO; /* Fail if the user requested size exceeds available object size */ if (count > num - offset) return -ENXIO; for (i = 0; i < count; i++) { ret = vm_insert_page(vma, uaddr, pages[offset + i]); if (ret < 0) return ret; uaddr += PAGE_SIZE; } return 0; } /** * vm_map_pages - maps range of kernel pages starts with non zero offset * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * * Maps an object consisting of @num pages, catering for the user's * requested vm_pgoff * * If we fail to insert any page into the vma, the function will return * immediately leaving any previously inserted pages present. Callers * from the mmap handler may immediately return the error as their caller * will destroy the vma, removing any successfully inserted pages. Other * callers should make their own arrangements for calling unmap_region(). * * Context: Process context. Called by mmap handlers. * Return: 0 on success and error code otherwise. */ int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num) { return __vm_map_pages(vma, pages, num, vma->vm_pgoff); } EXPORT_SYMBOL(vm_map_pages); /** * vm_map_pages_zero - map range of kernel pages starts with zero offset * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * * Similar to vm_map_pages(), except that it explicitly sets the offset * to 0. This function is intended for the drivers that did not consider * vm_pgoff. * * Context: Process context. Called by mmap handlers. * Return: 0 on success and error code otherwise. */ int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num) { return __vm_map_pages(vma, pages, num, 0); } EXPORT_SYMBOL(vm_map_pages_zero); static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t prot, bool mkwrite) { struct mm_struct *mm = vma->vm_mm; pte_t *pte, entry; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); if (!pte) return VM_FAULT_OOM; entry = ptep_get(pte); if (!pte_none(entry)) { if (mkwrite) { /* * For read faults on private mappings the PFN passed * in may not match the PFN we have mapped if the * mapped PFN is a writeable COW page. In the mkwrite * case we are creating a writable PTE for a shared * mapping and we expect the PFNs to match. If they * don't match, we are likely racing with block * allocation and mapping invalidation so just skip the * update. */ if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); goto out_unlock; } entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, addr, pte, entry, 1)) update_mmu_cache(vma, addr, pte); } goto out_unlock; } /* Ok, finally just insert the thing.. */ if (pfn_t_devmap(pfn)) entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); else entry = pte_mkspecial(pfn_t_pte(pfn, prot)); if (mkwrite) { entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); } set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ out_unlock: pte_unmap_unlock(pte, ptl); return VM_FAULT_NOPAGE; } /** * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * @pgprot: pgprot flags for the inserted page * * This is exactly like vmf_insert_pfn(), except that it allows drivers * to override pgprot on a per-page basis. * * This only makes sense for IO mappings, and it makes no sense for * COW mappings. In general, using multiple vmas is preferable; * vmf_insert_pfn_prot should only be used if using multiple VMAs is * impractical. * * pgprot typically only differs from @vma->vm_page_prot when drivers set * caching- and encryption bits different than those of @vma->vm_page_prot, * because the caching- or encryption mode may not be known at mmap() time. * * This is ok as long as @vma->vm_page_prot is not used by the core vm * to set caching and encryption bits for those vmas (except for COW pages). * This is ensured by core vm only modifying these page table entries using * functions that don't touch caching- or encryption bits, using pte_modify() * if needed. (See for example mprotect()). * * Also when new page-table entries are created, this is only done using the * fault() callback, and never using the value of vma->vm_page_prot, * except for page-table entries that point to anonymous pages as the result * of COW. * * Context: Process context. May allocate using %GFP_KERNEL. * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot) { /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like * consistency in testing and feature parity among all, so we should * try to keep these invariants in place for everybody. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; if (!pfn_modify_allowed(pfn, pgprot)) return VM_FAULT_SIGBUS; track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, false); } EXPORT_SYMBOL(vmf_insert_pfn_prot); /** * vmf_insert_pfn - insert single pfn into user vma * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * * Similar to vm_insert_page, this allows drivers to insert individual pages * they've allocated into a user vma. Same comments apply. * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return the result of this function. * * vma cannot be a COW mapping. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. * * Context: Process context. May allocate using %GFP_KERNEL. * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vmf_insert_pfn); static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) { /* these checks mirror the abort conditions in vm_normal_page */ if (vma->vm_flags & VM_MIXEDMAP) return true; if (pfn_t_devmap(pfn)) return true; if (pfn_t_special(pfn)) return true; if (is_zero_pfn(pfn_t_to_pfn(pfn))) return true; return false; } static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, bool mkwrite) { pgprot_t pgprot = vma->vm_page_prot; int err; BUG_ON(!vm_mixed_ok(vma, pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; track_pfn_insert(vma, &pgprot, pfn); if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) return VM_FAULT_SIGBUS; /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* * refcount the page if pfn_valid is true (hence insert_page rather * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { struct page *page; /* * At this point we are committed to insert_page() * regardless of whether the caller specified flags that * result in pfn_t_has_page() == false. */ page = pfn_to_page(pfn_t_to_pfn(pfn)); err = insert_page(vma, addr, page, pgprot); } else { return insert_pfn(vma, addr, pfn, pgprot, mkwrite); } if (err == -ENOMEM) return VM_FAULT_OOM; if (err < 0 && err != -EBUSY) return VM_FAULT_SIGBUS; return VM_FAULT_NOPAGE; } vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { return __vm_insert_mixed(vma, addr, pfn, false); } EXPORT_SYMBOL(vmf_insert_mixed); /* * If the insertion of PTE failed because someone else already added a * different entry in the mean time, we treat that as success as we assume * the same entry was actually inserted. */ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { return __vm_insert_mixed(vma, addr, pfn, true); } EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte, *mapped_pte; spinlock_t *ptl; int err = 0; mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(ptep_get(pte))); if (!pfn_modify_allowed(pfn, prot)) { err = -EACCES; break; } set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(mapped_pte, ptl); return err; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); err = remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pmd++, addr = next, addr != end); return 0; } static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, p4d, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); err = remap_pmd_range(mm, pud, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pud++, addr = next, addr != end); return 0; } static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { p4d_t *p4d; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); err = remap_pud_range(mm, p4d, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (p4d++, addr = next, addr != end); return 0; } /* * Variant of remap_pfn_range that does not call track_pfn_remap. The caller * must have pre-validated the caching bits of the pgprot_t. */ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; int err; if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) return -EINVAL; /* * Physically remapped pages are special. Tell the * rest of the world about it: * VM_IO tells people not to look at these pages * (accesses can have side effects). * VM_PFNMAP tells the core MM that the base pages are just * raw PFN mappings, and do not have a "struct page" associated * with them. * VM_DONTEXPAND * Disable vma merging and expanding with mremap(). * VM_DONTDUMP * Omit vma from core dump, even when VM_IO turned off. * * There's a horrible special case to handle copy-on-write * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". * See vm_normal_page() for details. */ if (is_cow_mapping(vma->vm_flags)) { if (addr != vma->vm_start || end != vma->vm_end) return -EINVAL; vma->vm_pgoff = pfn; } vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); err = remap_p4d_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pgd++, addr = next, addr != end); return 0; } /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to * @addr: target page aligned user address to start at * @pfn: page frame number of kernel physical memory address * @size: size of mapping area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. * * Return: %0 on success, negative error code otherwise. */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { int err; err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); if (err) return -EINVAL; err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); if (err) untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); return err; } EXPORT_SYMBOL(remap_pfn_range); /** * vm_iomap_memory - remap memory to userspace * @vma: user vma to map to * @start: start of the physical memory to be mapped * @len: size of area * * This is a simplified io_remap_pfn_range() for common driver use. The * driver just needs to give us the physical memory range to be mapped, * we'll figure out the rest from the vma information. * * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get * whatever write-combining details or similar. * * Return: %0 on success, negative error code otherwise. */ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) { unsigned long vm_len, pfn, pages; /* Check that the physical memory area passed in looks valid */ if (start + len < start) return -EINVAL; /* * You *really* shouldn't map things that aren't page-aligned, * but we've historically allowed it because IO memory might * just have smaller alignment. */ len += start & ~PAGE_MASK; pfn = start >> PAGE_SHIFT; pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; if (pfn + pages < pfn) return -EINVAL; /* We start the mapping 'vm_pgoff' pages into the area */ if (vma->vm_pgoff > pages) return -EINVAL; pfn += vma->vm_pgoff; pages -= vma->vm_pgoff; /* Can we fit all of the mapping? */ vm_len = vma->vm_end - vma->vm_start; if (vm_len >> PAGE_SHIFT > pages) return -EINVAL; /* Ok, let it rip */ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); } EXPORT_SYMBOL(vm_iomap_memory); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pte_t *pte, *mapped_pte; int err = 0; spinlock_t *ptl; if (create) { mapped_pte = pte = (mm == &init_mm) ? pte_alloc_kernel_track(pmd, addr, mask) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; } else { mapped_pte = pte = (mm == &init_mm) ? pte_offset_kernel(pmd, addr) : pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte) return -EINVAL; } arch_enter_lazy_mmu_mode(); if (fn) { do { if (create || !pte_none(ptep_get(pte))) { err = fn(pte++, addr, data); if (err) break; } } while (addr += PAGE_SIZE, addr != end); } *mask |= PGTBL_PTE_MODIFIED; arch_leave_lazy_mmu_mode(); if (mm != &init_mm) pte_unmap_unlock(mapped_pte, ptl); return err; } static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; int err = 0; BUG_ON(pud_huge(*pud)); if (create) { pmd = pmd_alloc_track(mm, pud, addr, mask); if (!pmd) return -ENOMEM; } else { pmd = pmd_offset(pud, addr); } do { next = pmd_addr_end(addr, end); if (pmd_none(*pmd) && !create) continue; if (WARN_ON_ONCE(pmd_leaf(*pmd))) return -EINVAL; if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { if (!create) continue; pmd_clear_bad(pmd); } err = apply_to_pte_range(mm, pmd, addr, next, fn, data, create, mask); if (err) break; } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int err = 0; if (create) { pud = pud_alloc_track(mm, p4d, addr, mask); if (!pud) return -ENOMEM; } else { pud = pud_offset(p4d, addr); } do { next = pud_addr_end(addr, end); if (pud_none(*pud) && !create) continue; if (WARN_ON_ONCE(pud_leaf(*pud))) return -EINVAL; if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { if (!create) continue; pud_clear_bad(pud); } err = apply_to_pmd_range(mm, pud, addr, next, fn, data, create, mask); if (err) break; } while (pud++, addr = next, addr != end); return err; } static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; int err = 0; if (create) { p4d = p4d_alloc_track(mm, pgd, addr, mask); if (!p4d) return -ENOMEM; } else { p4d = p4d_offset(pgd, addr); } do { next = p4d_addr_end(addr, end); if (p4d_none(*p4d) && !create) continue; if (WARN_ON_ONCE(p4d_leaf(*p4d))) return -EINVAL; if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { if (!create) continue; p4d_clear_bad(p4d); } err = apply_to_pud_range(mm, p4d, addr, next, fn, data, create, mask); if (err) break; } while (p4d++, addr = next, addr != end); return err; } static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data, bool create) { pgd_t *pgd; unsigned long start = addr, next; unsigned long end = addr + size; pgtbl_mod_mask mask = 0; int err = 0; if (WARN_ON(addr >= end)) return -EINVAL; pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; if (WARN_ON_ONCE(pgd_leaf(*pgd))) return -EINVAL; if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; pgd_clear_bad(pgd); } err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask); if (err) break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, start + size); return err; } /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. */ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { return __apply_to_page_range(mm, addr, size, fn, data, true); } EXPORT_SYMBOL_GPL(apply_to_page_range); /* * Scan a region of virtual memory, calling a provided function on * each leaf page table where it exists. * * Unlike apply_to_page_range, this does _not_ fill in page tables * where they are absent. */ int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { return __apply_to_page_range(mm, addr, size, fn, data, false); } EXPORT_SYMBOL_GPL(apply_to_existing_page_range); /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures * or configurations (e.g. i386 with PAE) which might give a mix of unmatched * parts, do_swap_page must check under lock before unmapping the pte and * proceeding (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct vm_fault *vmf) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { spin_lock(vmf->ptl); same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); spin_unlock(vmf->ptl); } #endif pte_unmap(vmf->pte); vmf->pte = NULL; return same; } /* * Return: * 0: copied succeeded * -EHWPOISON: copy failed due to hwpoison in source page * -EAGAIN: copied failed (some other reason) */ static inline int __wp_page_copy_user(struct page *dst, struct page *src, struct vm_fault *vmf) { int ret; void *kaddr; void __user *uaddr; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; if (likely(src)) { if (copy_mc_user_highpage(dst, src, addr, vma)) { memory_failure_queue(page_to_pfn(src), 0); return -EHWPOISON; } return 0; } /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ kaddr = kmap_atomic(dst); uaddr = (void __user *)(addr & PAGE_MASK); /* * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ vmf->pte = NULL; if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } entry = pte_mkyoung(vmf->orig_pte); if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); } /* * This really shouldn't fail, because the page is there * in the page tables. But it might just be unreadable, * in which case we just give up and fill the result with * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { if (vmf->pte) goto warn; /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } /* * The same page can be mapped back since last copy attempt. * Try to copy again under PTL. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { /* * Give a warn in case there can be some obscure * use-case */ warn: WARN_ON_ONCE(1); clear_page(kaddr); } } ret = 0; pte_unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); kunmap_atomic(kaddr); flush_dcache_page(dst); return ret; } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) { struct file *vm_file = vma->vm_file; if (vm_file) return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; /* * Special mappings (e.g. VDSO) do not have any file so fake * a default GFP_KERNEL for them. */ return GFP_KERNEL; } /* * Notify the address space that the page is about to become writable so that * it can prohibit this or wait for the page to get into an appropriate state. * * We do this without the lock held, so that it can sleep if it needs to. */ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) { vm_fault_t ret; unsigned int old_flags = vmf->flags; vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; if (vmf->vma->vm_file && IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) return VM_FAULT_SIGBUS; ret = vmf->vma->vm_ops->page_mkwrite(vmf); /* Restore original flags so that caller is not surprised */ vmf->flags = old_flags; if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { folio_lock(folio); if (!folio->mapping) { folio_unlock(folio); return 0; /* retry */ } ret |= VM_FAULT_LOCKED; } else VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); return ret; } /* * Handle dirtying of a page in shared file mapping on a write fault. * * The function expects the page to be locked and unlocks it. */ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct address_space *mapping; struct folio *folio = page_folio(vmf->page); bool dirtied; bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; dirtied = folio_mark_dirty(folio); VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); /* * Take a local copy of the address_space - folio.mapping may be zeroed * by truncate after folio_unlock(). The address_space itself remains * pinned by vma->vm_file's reference. We rely on folio_unlock()'s * release semantics to prevent the compiler from undoing this copying. */ mapping = folio_raw_mapping(folio); folio_unlock(folio); if (!page_mkwrite) file_update_time(vma->vm_file); /* * Throttle page dirtying rate down to writeback speed. * * mapping may be NULL here because some device drivers do not * set page.mapping but still dirty their pages * * Drop the mmap_lock before waiting on IO, if we can. The file * is pinning the mapping, as per above. */ if ((dirtied || page_mkwrite) && mapping) { struct file *fpin; fpin = maybe_unlock_mmap_for_io(vmf, NULL); balance_dirty_pages_ratelimited(mapping); if (fpin) { fput(fpin); return VM_FAULT_COMPLETED; } } return 0; } /* * Handle write page faults for pages that can be reused in the current vma * * This can happen either due to the mapping being with the VM_SHARED flag, * or due to us being the last reference standing to the page. In either * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ static inline void wp_page_reuse(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; struct page *page = vmf->page; pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); /* * Clear the pages cpupid information as the existing * information potentially belongs to a now completely * unrelated process. */ if (page) page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); pte_unmap_unlock(vmf->pte, vmf->ptl); count_vm_event(PGREUSE); } /* * Handle the case of a page which we actually need to copy to a new page, * either due to COW or unsharing. * * Called with mmap_lock locked and the old page referenced, but * without the ptl held. * * High level logic flow: * * - Allocate a page, copy the content of the old page to the new one. * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. * - Take the PTL. If the pte changed, bail out and release the allocated page * - If the pte is still the way we remember it, update the page table and all * relevant references. This includes dropping the reference the page-table * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ static vm_fault_t wp_page_copy(struct vm_fault *vmf) { const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct folio *old_folio = NULL; struct folio *new_folio = NULL; pte_t entry; int page_copied = 0; struct mmu_notifier_range range; int ret; delayacct_wpcopy_start(); if (vmf->page) old_folio = page_folio(vmf->page); if (unlikely(anon_vma_prepare(vma))) goto oom; if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); if (!new_folio) goto oom; } else { new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address, false); if (!new_folio) goto oom; ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); if (ret) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on * the same address and we will handle the fault * from the second attempt. * The -EHWPOISON case will not be retried. */ folio_put(new_folio); if (old_folio) folio_put(old_folio); delayacct_wpcopy_end(); return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } kmsan_copy_page_meta(&new_folio->page, vmf->page); } if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL)) goto oom_free_new; folio_throttle_swaprate(new_folio, GFP_KERNEL); __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); /* * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(&old_folio->page)); inc_mm_counter(mm, MM_ANONPAGES); } } else { ksm_might_unmap_zero_page(mm, vmf->orig_pte); inc_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(&new_folio->page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); if (unlikely(unshare)) { if (pte_soft_dirty(vmf->orig_pte)) entry = pte_mksoft_dirty(entry); if (pte_uffd_wp(vmf->orig_pte)) entry = pte_mkuffd_wp(entry); } else { entry = maybe_mkwrite(pte_mkdirty(entry), vma); } /* * Clear the pte entry and flush it first, before updating the * pte with the new entry, to keep TLBs on different CPUs in * sync. This code used to set the new PTE then flush TLBs, but * that left a window where the new PTE could be loaded into * some TLBs while the old PTE remains in others. */ ptep_clear_flush(vma, vmf->address, vmf->pte); folio_add_new_anon_rmap(new_folio, vma, vmf->address); folio_add_lru_vma(new_folio, vma); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ BUG_ON(unshare && pte_write(entry)); set_pte_at_notify(mm, vmf->address, vmf->pte, entry); update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); if (old_folio) { /* * Only after switching the pte to the new page may * we remove the mapcount here. Otherwise another * process may come and find the rmap count decremented * before the pte is switched to the new page, and * "reuse" the old page writing into it while our pte * here still points into it and can be read by other * threads. * * The critical issue is to order this * page_remove_rmap with the ptp_clear_flush above. * Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative * in page_remove_rmap. * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the * decremented mapcount is visible. And the old page * cannot be reused until after the decremented * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ page_remove_rmap(vmf->page, vma, false); } /* Free the old page.. */ new_folio = old_folio; page_copied = 1; pte_unmap_unlock(vmf->pte, vmf->ptl); } else if (vmf->pte) { update_mmu_tlb(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); } mmu_notifier_invalidate_range_end(&range); if (new_folio) folio_put(new_folio); if (old_folio) { if (page_copied) free_swap_cache(&old_folio->page); folio_put(old_folio); } delayacct_wpcopy_end(); return 0; oom_free_new: folio_put(new_folio); oom: if (old_folio) folio_put(old_folio); delayacct_wpcopy_end(); return VM_FAULT_OOM; } /** * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE * writeable once the page is prepared * * @vmf: structure describing the fault * * This function handles all that is needed to finish a write page fault in a * shared mapping due to PTE being read-only once the mapped page is prepared. * It handles locking of PTE and modifying it. * * The function expects the page to be locked or other protection against * concurrent faults / writeback (such as DAX radix tree locks). * * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. */ if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } wp_page_reuse(vmf); return 0; } /* * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { vm_fault_t ret; pte_unmap_unlock(vmf->pte, vmf->ptl); if (vmf->flags & FAULT_FLAG_VMA_LOCK) { vma_end_read(vmf->vma); return VM_FAULT_RETRY; } vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; return finish_mkwrite_fault(vmf); } wp_page_reuse(vmf); return 0; } static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = 0; folio_get(folio); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { vm_fault_t tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); if (vmf->flags & FAULT_FLAG_VMA_LOCK) { folio_put(folio); vma_end_read(vmf->vma); return VM_FAULT_RETRY; } tmp = do_page_mkwrite(vmf, folio); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { folio_put(folio); return tmp; } tmp = finish_mkwrite_fault(vmf); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { folio_unlock(folio); folio_put(folio); return tmp; } } else { wp_page_reuse(vmf); folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); folio_put(folio); return ret; } /* * This routine handles present pages, when * * users try to write to a shared page (FAULT_FLAG_WRITE) * * GUP wants to take a R/O pin on a possibly shared anonymous page * (FAULT_FLAG_UNSHARE) * * It is done by copying the page to a new address and decrementing the * shared-page counter for the old page. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've * done any necessary COW. * * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even * though the page will change only once the write actually happens. This * avoids a few races, and potentially makes it more efficient. * * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_lock still held, but pte unmapped and unlocked. */ static vm_fault_t do_wp_page(struct vm_fault *vmf) __releases(vmf->ptl) { const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; struct vm_area_struct *vma = vmf->vma; struct folio *folio = NULL; if (likely(!unshare)) { if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_WP); } /* * Userfaultfd write-protect can defer flushes. Ensure the TLB * is flushed in this case before copying. */ if (unlikely(userfaultfd_wp(vmf->vma) && mm_tlb_flush_pending(vmf->vma->vm_mm))) flush_tlb_page(vmf->vma, vmf->address); } vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); if (vmf->page) folio = page_folio(vmf->page); /* * Shared mapping: we are guaranteed to have VM_WRITE and * FAULT_FLAG_WRITE set at this point. */ if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_PFNMAP VMA. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ if (!vmf->page) return wp_pfn_shared(vmf); return wp_page_shared(vmf, folio); } /* * Private mapping: create an exclusive anonymous page copy if reuse * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. */ if (folio && folio_test_anon(folio)) { /* * If the page is exclusive to this process we must reuse the * page without further checks. */ if (PageAnonExclusive(vmf->page)) goto reuse; /* * We have to verify under folio lock: these early checks are * just an optimization to avoid locking the folio and freeing * the swapcache if there is little hope that we can reuse. * * KSM doesn't necessarily raise the folio refcount. */ if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) goto copy; if (!folio_test_lru(folio)) /* * We cannot easily detect+handle references from * remote LRU caches or references to LRU folios. */ lru_add_drain(); if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) goto copy; if (!folio_trylock(folio)) goto copy; if (folio_test_swapcache(folio)) folio_free_swap(folio); if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { folio_unlock(folio); goto copy; } /* * Ok, we've got the only folio reference from our mapping * and the folio is locked, it's dark out, and we're wearing * sunglasses. Hit it. */ page_move_anon_rmap(vmf->page, vma); folio_unlock(folio); reuse: if (unlikely(unshare)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } wp_page_reuse(vmf); return 0; } copy: if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) { pte_unmap_unlock(vmf->pte, vmf->ptl); vma_end_read(vmf->vma); return VM_FAULT_RETRY; } /* * Ok, we need to copy. Oh, well.. */ if (folio) folio_get(folio); pte_unmap_unlock(vmf->pte, vmf->ptl); #ifdef CONFIG_KSM if (folio && folio_test_ksm(folio)) count_vm_event(COW_KSM); #endif return wp_page_copy(vmf); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } static inline void unmap_mapping_range_tree(struct rb_root_cached *root, pgoff_t first_index, pgoff_t last_index, struct zap_details *details) { struct vm_area_struct *vma; pgoff_t vba, vea, zba, zea; vma_interval_tree_foreach(vma, root, first_index, last_index) { vba = vma->vm_pgoff; vea = vba + vma_pages(vma) - 1; zba = max(first_index, vba); zea = min(last_index, vea); unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } } /** * unmap_mapping_folio() - Unmap single folio from processes. * @folio: The locked folio to be unmapped. * * Unmap this folio from any userspace process which still has it mmaped. * Typically, for efficiency, the range of nearby pages has already been * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once * truncation or invalidation holds the lock on a folio, it may find that * the page has been remapped again: and then uses unmap_mapping_folio() * to unmap it finally. */ void unmap_mapping_folio(struct folio *folio) { struct address_space *mapping = folio->mapping; struct zap_details details = { }; pgoff_t first_index; pgoff_t last_index; VM_BUG_ON(!folio_test_locked(folio)); first_index = folio->index; last_index = folio_next_index(folio) - 1; details.even_cows = false; details.single_folio = folio; details.zap_flags = ZAP_FLAG_DROP_MARKER; i_mmap_lock_read(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) unmap_mapping_range_tree(&mapping->i_mmap, first_index, last_index, &details); i_mmap_unlock_read(mapping); } /** * unmap_mapping_pages() - Unmap pages from processes. * @mapping: The address space containing pages to be unmapped. * @start: Index of first page to be unmapped. * @nr: Number of pages to be unmapped. 0 to unmap to end of file. * @even_cows: Whether to unmap even private COWed pages. * * Unmap the pages in this address space from any userspace process which * has them mmaped. Generally, you want to remove COWed pages as well when * a file is being truncated, but not when invalidating pages from the page * cache. */ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows) { struct zap_details details = { }; pgoff_t first_index = start; pgoff_t last_index = start + nr - 1; details.even_cows = even_cows; if (last_index < first_index) last_index = ULONG_MAX; i_mmap_lock_read(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) unmap_mapping_range_tree(&mapping->i_mmap, first_index, last_index, &details); i_mmap_unlock_read(mapping); } EXPORT_SYMBOL_GPL(unmap_mapping_pages); /** * unmap_mapping_range - unmap the portion of all mmaps in the specified * address_space corresponding to the specified byte range in the underlying * file. * * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE * boundary. Note that this is different from truncate_pagecache(), which * must keep the partial page. In contrast, we must get rid of * partial pages. * @holelen: size of prospective hole in bytes. This will be rounded * up to a PAGE_SIZE boundary. A holelen of zero truncates to the * end of the file. * @even_cows: 1 when truncating a file, unmap even private COWed pages; * but 0 when invalidating pagecache, don't throw away private data. */ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { pgoff_t hba = holebegin >> PAGE_SHIFT; pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { long long holeend = (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; if (holeend & ~(long long)ULONG_MAX) hlen = ULONG_MAX - hba + 1; } unmap_mapping_pages(mapping, hba, hlen, even_cows); } EXPORT_SYMBOL(unmap_mapping_range); /* * Restore a potential device exclusive pte to a working pte entry */ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; vm_fault_t ret; /* * We need a reference to lock the folio because we don't hold * the PTL so a racing thread can remove the device-exclusive * entry and unmap it. If the folio is free the entry must * have been removed already. If it happens to have already * been re-allocated after being freed all we do is lock and * unlock it. */ if (!folio_try_get(folio)) return 0; ret = folio_lock_or_retry(folio, vmf); if (ret) { folio_put(folio); return ret; } mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); mmu_notifier_invalidate_range_start(&range); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); folio_unlock(folio); folio_put(folio); mmu_notifier_invalidate_range_end(&range); return 0; } static inline bool should_try_to_free_swap(struct folio *folio, struct vm_area_struct *vma, unsigned int fault_flags) { if (!folio_test_swapcache(folio)) return false; if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || folio_test_mlocked(folio)) return true; /* * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * user. Try freeing the swapcache to get rid of the swapcache * reference only in case it's likely that we'll be the exlusive user. */ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && folio_ref_count(folio) == 2; } static vm_fault_t pte_marker_clear(struct vm_fault *vmf) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) return 0; /* * Be careful so that we will only recover a special uffd-wp pte into a * none pte. Otherwise it means the pte could have changed, so retry. * * This should also cover the case where e.g. the pte changed * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. * So is_pte_marker() check is not enough to safely drop the pte. */ if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } static vm_fault_t do_pte_missing(struct vm_fault *vmf) { if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf); else return do_fault(vmf); } /* * This is actually a page-missing access, but with uffd-wp special pte * installed. It means this pte was wr-protected before being unmapped. */ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) { /* * Just in case there're leftover special ptes even after the region * got unregistered - we can simply clear them. */ if (unlikely(!userfaultfd_wp(vmf->vma))) return pte_marker_clear(vmf); return do_pte_missing(vmf); } static vm_fault_t handle_pte_marker(struct vm_fault *vmf) { swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); unsigned long marker = pte_marker_get(entry); /* * PTE markers should never be empty. If anything weird happened, * the best thing to do is to kill the process along with its mm. */ if (WARN_ON_ONCE(!marker)) return VM_FAULT_SIGBUS; /* Higher priority than uffd-wp when data corrupted */ if (marker & PTE_MARKER_POISONED) return VM_FAULT_HWPOISON; if (pte_marker_entry_uffd_wp(entry)) return pte_marker_handle_uffd_wp(vmf); /* This is an unknown pte marker */ return VM_FAULT_SIGBUS; } /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with pte unmapped and unlocked. * * We return with the mmap_lock locked or unlocked in the same cases * as does filemap_fault(). */ vm_fault_t do_swap_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *swapcache, *folio = NULL; struct page *page; struct swap_info_struct *si = NULL; rmap_t rmap_flags = RMAP_NONE; bool exclusive = false; swp_entry_t entry; pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; if (!pte_unmap_same(vmf)) goto out; entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); } else if (is_device_exclusive_entry(entry)) { vmf->page = pfn_swap_entry_to_page(entry); ret = remove_device_exclusive_entry(vmf); } else if (is_device_private_entry(entry)) { if (vmf->flags & FAULT_FLAG_VMA_LOCK) { /* * migrate_to_ram is not yet ready to operate * under VMA lock. */ vma_end_read(vma); ret = VM_FAULT_RETRY; goto out; } vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto unlock; /* * Get a page reference while we know the page can't be * freed. */ get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); put_page(vmf->page); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else if (is_pte_marker_entry(entry)) { ret = handle_pte_marker(vmf); } else { print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; } /* Prevent swapoff from happening to us. */ si = get_swap_device(entry); if (unlikely(!si)) goto out; folio = swap_cache_get_folio(entry, vma, vmf->address); if (folio) page = folio_file_page(folio, swp_offset(entry)); swapcache = folio; if (!folio) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1) { /* skip swapcache */ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address, false); page = &folio->page; if (folio) { __folio_set_locked(folio); __folio_set_swapbacked(folio); if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, GFP_KERNEL, entry)) { ret = VM_FAULT_OOM; goto out_page; } mem_cgroup_swapin_uncharge_swap(entry); shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(folio, shadow); folio_add_lru(folio); /* To provide entry to swap_readpage() */ folio->swap = entry; swap_readpage(page, true, NULL); folio->private = NULL; } } else { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf); if (page) folio = page_folio(page); swapcache = folio; } if (!folio) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; goto out_release; } ret |= folio_lock_or_retry(folio, vmf); if (ret & VM_FAULT_RETRY) goto out_release; if (swapcache) { /* * Make sure folio_free_swap() or swapoff did not release the * swapcache from under us. The page pin, and pte_same test * below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not * changed. */ if (unlikely(!folio_test_swapcache(folio) || page_swap_entry(page).val != entry.val)) goto out_page; /* * KSM sometimes has to copy on read faults, for example, if * page->index of !PageKSM() pages would be nonlinear inside the * anon VMA -- PageKSM() is lost on actual swapout. */ page = ksm_might_need_to_copy(page, vma, vmf->address); if (unlikely(!page)) { ret = VM_FAULT_OOM; goto out_page; } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) { ret = VM_FAULT_HWPOISON; goto out_page; } folio = page_folio(page); /* * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * owner. Try removing the extra reference from the local LRU * caches if required. */ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && !folio_test_ksm(folio) && !folio_test_lru(folio)) lru_add_drain(); } folio_throttle_swaprate(folio, GFP_KERNEL); /* * Back out if somebody else already faulted in this pte. */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { ret = VM_FAULT_SIGBUS; goto out_nomap; } /* * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte * must never point at an anonymous page in the swapcache that is * PG_anon_exclusive. Sanity check that this holds and especially, that * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity * check after taking the PT lock and making sure that nobody * concurrently faulted in this page and set PG_anon_exclusive. */ BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); /* * Check under PT lock (to protect against concurrent fork() sharing * the swap entry concurrently) for certainly exclusive pages. */ if (!folio_test_ksm(folio)) { exclusive = pte_swp_exclusive(vmf->orig_pte); if (folio != swapcache) { /* * We have a fresh page that is not exposed to the * swapcache -> certainly exclusive. */ exclusive = true; } else if (exclusive && folio_test_writeback(folio) && data_race(si->flags & SWP_STABLE_WRITES)) { /* * This is tricky: not all swap backends support * concurrent page modifications while under writeback. * * So if we stumble over such a page in the swapcache * we must not set the page exclusive, otherwise we can * map it writable without further checks and modify it * while still under writeback. * * For these problematic swap backends, simply drop the * exclusive marker: this is perfectly fine as we start * writeback only if we fully unmapped the page and * there are no unexpected references on the page after * unmapping succeeded. After fully unmapped, no * further GUP references (FOLL_GET and FOLL_PIN) can * appear, so dropping the exclusive marker and mapping * it only R/O is fine. */ exclusive = false; } } /* * Some architectures may have to restore extra metadata to the page * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ arch_swap_restore(entry, folio); /* * Remove the swap entry and conditionally try to free up the swapcache. * We're already holding a reference on the page but haven't mapped it * yet. */ swap_free(entry); if (should_try_to_free_swap(folio, vma, vmf->flags)) folio_free_swap(folio); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); /* * Same logic as in do_wp_page(); however, optimize for pages that are * certainly not shared either because we just allocated them without * exposing them to the swapcache or because the swap entry indicates * exclusivity. */ if (!folio_test_ksm(folio) && (exclusive || folio_ref_count(folio) == 1)) { if (vmf->flags & FAULT_FLAG_WRITE) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); vmf->flags &= ~FAULT_FLAG_WRITE; } rmap_flags |= RMAP_EXCLUSIVE; } flush_icache_page(vma, page); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); if (pte_swp_uffd_wp(vmf->orig_pte)) pte = pte_mkuffd_wp(pte); vmf->orig_pte = pte; /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address); folio_add_lru_vma(folio, vma); } else { page_add_anon_rmap(page, vma, vmf->address, rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); folio_unlock(folio); if (folio != swapcache && swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check * (to avoid false positives from pte_same). For * further safety release the lock after the swap_free * so that the swap count won't change under a * parallel locked swapcache. */ folio_unlock(swapcache); folio_put(swapcache); } if (vmf->flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(vmf); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); out: if (si) put_swap_device(si); return ret; out_nomap: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); out_page: folio_unlock(folio); out_release: folio_put(folio); if (folio != swapcache && swapcache) { folio_unlock(swapcache); folio_put(swapcache); } if (si) put_swap_device(si); return ret; } /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_lock still held, but pte unmapped and unlocked. */ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); struct vm_area_struct *vma = vmf->vma; struct folio *folio; vm_fault_t ret = 0; pte_t entry; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* * Use pte_alloc() instead of pte_alloc_map(), so that OOM can * be distinguished from a transient failure of pte_offset_map(). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; /* Use the zero-page for reads */ if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) goto unlock; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto unlock; } ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_MISSING); } goto setpte; } /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); if (!folio) goto oom; if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) goto oom_free_page; folio_throttle_swaprate(folio, GFP_KERNEL); /* * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __folio_mark_uptodate(folio); entry = mk_pte(&folio->page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry), vma); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) goto release; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto release; } ret = check_stable_address_space(vma->vm_mm); if (ret) goto release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); folio_put(folio); return handle_userfault(vmf, VM_UFFD_MISSING); } inc_mm_counter(vma->vm_mm, MM_ANONPAGES); folio_add_new_anon_rmap(folio, vma, vmf->address); folio_add_lru_vma(folio, vma); setpte: if (uffd_wp) entry = pte_mkuffd_wp(entry); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; release: folio_put(folio); goto unlock; oom_free_page: folio_put(folio); oom: return VM_FAULT_OOM; } /* * The mmap_lock must have been held on entry, and may have been * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ static vm_fault_t __do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; /* * Preallocate pte before we take page_lock because this might lead to * deadlocks for memcg reclaim which waits for pages under writeback: * lock_page(A) * SetPageWriteback(A) * unlock_page(A) * lock_page(B) * lock_page(B) * pte_alloc_one * shrink_page_list * wait_on_page_writeback(A) * SetPageWriteback(B) * unlock_page(B) * # flush A, B to clear the writeback */ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } ret = vma->vm_ops->fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) return ret; if (unlikely(PageHWPoison(vmf->page))) { struct page *page = vmf->page; vm_fault_t poisonret = VM_FAULT_HWPOISON; if (ret & VM_FAULT_LOCKED) { if (page_mapped(page)) unmap_mapping_pages(page_mapping(page), page->index, 1, false); /* Retry if a clean page was removed from the cache. */ if (invalidate_inode_page(page)) poisonret = VM_FAULT_NOPAGE; unlock_page(page); } put_page(page); vmf->page = NULL; return poisonret; } if (unlikely(!(ret & VM_FAULT_LOCKED))) lock_page(vmf->page); else VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); return ret; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void deposit_prealloc_pte(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); /* * We are going to consume the prealloc table, * count that as nr_ptes. */ mm_inc_nr_ptes(vma->vm_mm); vmf->prealloc_pte = NULL; } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; pmd_t entry; vm_fault_t ret = VM_FAULT_FALLBACK; if (!transhuge_vma_suitable(vma, haddr)) return ret; page = compound_head(page); if (compound_order(page) != HPAGE_PMD_ORDER) return ret; /* * Just backoff if any subpage of a THP is corrupted otherwise * the corrupted page may mapped by PMD silently to escape the * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ if (unlikely(PageHasHWPoisoned(page))) return ret; /* * Archs like ppc64 need additional space to store information * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_none(*vmf->pmd))) goto out; flush_icache_pages(vma, page, HPAGE_PMD_NR); entry = mk_huge_pmd(page, vma->vm_page_prot); if (write) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); page_add_file_rmap(page, vma, true); /* * deposit and withdraw with pmd lock held */ if (arch_needs_pgtable_deposit()) deposit_prealloc_pte(vmf); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); update_mmu_cache_pmd(vma, haddr, vmf->pmd); /* fault is handled */ ret = 0; count_vm_event(THP_FILE_MAPPED); out: spin_unlock(vmf->ptl); return ret; } #else vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { return VM_FAULT_FALLBACK; } #endif /** * set_pte_range - Set a range of PTEs to point to pages in a folio. * @vmf: Fault decription. * @folio: The folio that contains @page. * @page: The first page to create a PTE for. * @nr: The number of PTEs to create. * @addr: The first address to create a PTE for. */ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr) { struct vm_area_struct *vma = vmf->vma; bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); bool write = vmf->flags & FAULT_FLAG_WRITE; bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; flush_icache_pages(vma, page, nr); entry = mk_pte(page, vma->vm_page_prot); if (prefault && arch_wants_old_prefaulted_pte()) entry = pte_mkold(entry); else entry = pte_sw_mkyoung(entry); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (unlikely(uffd_wp)) entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); folio_add_file_rmap_range(folio, page, nr, vma, false); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); } static bool vmf_pte_changed(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); return !pte_none(ptep_get(vmf->pte)); } /** * finish_fault - finish page fault once we have prepared the page to fault * * @vmf: structure describing the fault * * This function handles all that is needed to finish a page fault once the * page to fault in is prepared. It handles locking of PTEs, inserts PTE for * given page, adds reverse page mapping, handles memcg charges and LRU * addition. * * The function expects the page to be locked and on success it consumes a * reference of a page being mapped (for the PTE which maps it). * * Return: %0 on success, %VM_FAULT_ code in case of error. */ vm_fault_t finish_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page; vm_fault_t ret; /* Did we COW the page? */ if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) page = vmf->cow_page; else page = vmf->page; /* * check even for read faults because we might have lost our CoWed * page */ if (!(vma->vm_flags & VM_SHARED)) { ret = check_stable_address_space(vma->vm_mm); if (ret) return ret; } if (pmd_none(*vmf->pmd)) { if (PageTransCompound(page)) { ret = do_set_pmd(vmf, page); if (ret != VM_FAULT_FALLBACK) return ret; } if (vmf->prealloc_pte) pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) return VM_FAULT_OOM; } vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); set_pte_range(vmf, folio, page, 1, vmf->address); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); ret = VM_FAULT_NOPAGE; } pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; } static unsigned long fault_around_pages __read_mostly = 65536 >> PAGE_SHIFT; #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) { *val = fault_around_pages << PAGE_SHIFT; return 0; } /* * fault_around_bytes must be rounded down to the nearest page order as it's * what do_fault_around() expects to see. */ static int fault_around_bytes_set(void *data, u64 val) { if (val / PAGE_SIZE > PTRS_PER_PTE) return -EINVAL; /* * The minimum value is 1 page, however this results in no fault-around * at all. See should_fault_around(). */ fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); static int __init fault_around_debugfs(void) { debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, &fault_around_bytes_fops); return 0; } late_initcall(fault_around_debugfs); #endif /* * do_fault_around() tries to map few pages around the fault address. The hope * is that the pages will be needed soon and this will lower the number of * faults to handle. * * It uses vm_ops->map_pages() to map the pages, which skips the page if it's * not ready to be mapped: not up-to-date, locked, etc. * * This function doesn't cross VMA or page table boundaries, in order to call * map_pages() and acquire a PTE lock only once. * * fault_around_pages defines how many pages we'll try to map. * do_fault_around() expects it to be set to a power of two less than or equal * to PTRS_PER_PTE. * * The virtual address of the area that we map is naturally aligned to * fault_around_pages * PAGE_SIZE rounded down to the machine page size * (and therefore to page order). This way it's easier to guarantee * that we don't cross page table boundaries. */ static vm_fault_t do_fault_around(struct vm_fault *vmf) { pgoff_t nr_pages = READ_ONCE(fault_around_pages); pgoff_t pte_off = pte_index(vmf->address); /* The page offset of vmf->address within the VMA. */ pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; pgoff_t from_pte, to_pte; vm_fault_t ret; /* The PTE offset of the start address, clamped to the VMA. */ from_pte = max(ALIGN_DOWN(pte_off, nr_pages), pte_off - min(pte_off, vma_off)); /* The PTE offset of the end address, clamped to the VMA and PTE. */ to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, pte_off + vma_pages(vmf->vma) - vma_off) - 1; if (pmd_none(*vmf->pmd)) { vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } rcu_read_lock(); ret = vmf->vma->vm_ops->map_pages(vmf, vmf->pgoff + from_pte - pte_off, vmf->pgoff + to_pte - pte_off); rcu_read_unlock(); return ret; } /* Return true if we should do read fault-around, false otherwise */ static inline bool should_fault_around(struct vm_fault *vmf) { /* No ->map_pages? No way to fault around... */ if (!vmf->vma->vm_ops->map_pages) return false; if (uffd_disable_fault_around(vmf->vma)) return false; /* A single page implies no faulting 'around' at all. */ return fault_around_pages > 1; } static vm_fault_t do_read_fault(struct vm_fault *vmf) { vm_fault_t ret = 0; struct folio *folio; /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or * something). */ if (should_fault_around(vmf)) { ret = do_fault_around(vmf); if (ret) return ret; } if (vmf->flags & FAULT_FLAG_VMA_LOCK) { vma_end_read(vmf->vma); return VM_FAULT_RETRY; } ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; ret |= finish_fault(vmf); folio = page_folio(vmf->page); folio_unlock(folio); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) folio_put(folio); return ret; } static vm_fault_t do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { vma_end_read(vma); return VM_FAULT_RETRY; } if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!vmf->cow_page) return VM_FAULT_OOM; if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, GFP_KERNEL)) { put_page(vmf->cow_page); return VM_FAULT_OOM; } folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL); ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (ret & VM_FAULT_DONE_COW) return ret; copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); __SetPageUptodate(vmf->cow_page); ret |= finish_fault(vmf); unlock_page(vmf->page); put_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; return ret; uncharge_out: put_page(vmf->cow_page); return ret; } static vm_fault_t do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret, tmp; struct folio *folio; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { vma_end_read(vma); return VM_FAULT_RETRY; } ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; folio = page_folio(vmf->page); /* * Check if the backing address space wants to know that the page is * about to become writable */ if (vma->vm_ops->page_mkwrite) { folio_unlock(folio); tmp = do_page_mkwrite(vmf, folio); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { folio_put(folio); return tmp; } } ret |= finish_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { folio_unlock(folio); folio_put(folio); return ret; } ret |= fault_dirty_shared_page(vmf); return ret; } /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults). * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). * If mmap_lock is released, vma may become invalid (for example * by other thread calling munmap()). */ static vm_fault_t do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *vm_mm = vma->vm_mm; vm_fault_t ret; /* * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) ret = VM_FAULT_SIGBUS; else { /* * Make sure this is not a temporary clearing of pte * by holding ptl and checking again. A R/M/W update * of pte involves: take ptl, clearing the pte so that * we don't have concurrent modification by hardware * followed by an update. */ if (unlikely(pte_none(ptep_get(vmf->pte)))) ret = VM_FAULT_SIGBUS; else ret = VM_FAULT_NOPAGE; pte_unmap_unlock(vmf->pte, vmf->ptl); } } else if (!(vmf->flags & FAULT_FLAG_WRITE)) ret = do_read_fault(vmf); else if (!(vma->vm_flags & VM_SHARED)) ret = do_cow_fault(vmf); else ret = do_shared_fault(vmf); /* preallocated pagetable is unused: free it */ if (vmf->prealloc_pte) { pte_free(vm_mm, vmf->prealloc_pte); vmf->prealloc_pte = NULL; } return ret; } int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) { get_page(page); /* Record the current PID acceesing VMA */ vma_set_access_pid_bit(vma); count_vm_numa_event(NUMA_HINT_FAULTS); if (page_nid == numa_node_id()) { count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); *flags |= TNF_FAULT_LOCAL; } return mpol_misplaced(page, vma, addr); } static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page = NULL; int page_nid = NUMA_NO_NODE; bool writable = false; int last_cpupid; int target_nid; pte_t pte, old_pte; int flags = 0; /* * The "pte" at this point cannot be used safely without * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. */ spin_lock(vmf->ptl); if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } /* Get the normal PTE */ old_pte = ptep_get(vmf->pte); pte = pte_modify(old_pte, vma->vm_page_prot); /* * Detect now whether the PTE could be writable; this information * is only valid while holding the PT lock. */ writable = pte_write(pte); if (!writable && vma_wants_manual_pte_write_upgrade(vma) && can_change_pte_writable(vma, vmf->address, pte)) writable = true; page = vm_normal_page(vma, vmf->address, pte); if (!page || is_zone_device_page(page)) goto out_map; /* TODO: handle PTE-mapped THP */ if (PageCompound(page)) goto out_map; /* * Avoid grouping on RO pages in general. RO pages shouldn't hurt as * much anyway since they can be in shared cache state. This misses * the case where a mapping is writable but the process never writes * to it but pte_write gets cleared during protection updates and * pte_dirty has unpredictable behaviour between PTE scan updates, * background writeback, dirty balancing and application behaviour. */ if (!writable) flags |= TNF_NO_GROUP; /* * Flag if the page is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; page_nid = page_to_nid(page); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && !node_is_toptier(page_nid)) last_cpupid = (-1 & LAST_CPUPID_MASK); else last_cpupid = page_cpupid_last(page); target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, &flags); if (target_nid == NUMA_NO_NODE) { put_page(page); goto out_map; } pte_unmap_unlock(vmf->pte, vmf->ptl); writable = false; /* Migrate to the requested node */ if (migrate_misplaced_page(page, vma, target_nid)) { page_nid = target_nid; flags |= TNF_MIGRATED; } else { flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) goto out; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } goto out_map; } out: if (page_nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, page_nid, 1, flags); return 0; out_map: /* * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); pte = pte_modify(old_pte, vma->vm_page_prot); pte = pte_mkyoung(pte); if (writable) pte = pte_mkwrite(pte, vma); ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; if (vma_is_anonymous(vma)) return do_huge_pmd_anonymous_page(vmf); if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PMD_ORDER); return VM_FAULT_FALLBACK; } /* `inline' is required to avoid gcc 4.1.2 build error */ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; vm_fault_t ret; if (vma_is_anonymous(vma)) { if (likely(!unshare) && userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) return handle_userfault(vmf, VM_UFFD_WP); return do_huge_pmd_wp_page(vmf); } if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) return ret; } } /* COW or write-notify handled on pte level: split pmd. */ __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; } static vm_fault_t create_huge_pud(struct vm_fault *vmf) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) struct vm_area_struct *vma = vmf->vma; /* No support for anonymous transparent PUD pages yet */ if (vma_is_anonymous(vma)) return VM_FAULT_FALLBACK; if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PUD_ORDER); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ return VM_FAULT_FALLBACK; } static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; /* No support for anonymous transparent PUD pages yet */ if (vma_is_anonymous(vma)) goto split; if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) return ret; } } split: /* COW or write-notify not handled on PUD level: split pud.*/ __split_huge_pud(vma, vmf->pud, vmf->address); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ return VM_FAULT_FALLBACK; } /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most * RISC architectures). The early dirtying is also good on the i386. * * There is also a hook called "update_mmu_cache()" that architectures * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow * concurrent faults). * * The mmap_lock may have been released depending on flags and our return value. * See filemap_fault() and __folio_lock_or_retry(). */ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) { pte_t entry; if (unlikely(pmd_none(*vmf->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table * for an instant, it will be difficult to retract from * concurrent faults and from rmap lookups. */ vmf->pte = NULL; vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; } else { /* * A regular pmd is established and it can't morph into a huge * pmd by anon khugepaged, since that takes mmap_lock in write * mode; but shmem or file collapse to THP could still morph * it into a huge pmd: just retry later if so. */ vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) return 0; vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL; } } if (!vmf->pte) return do_pte_missing(vmf); if (!pte_present(vmf->orig_pte)) return do_swap_page(vmf); if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); spin_lock(vmf->ptl); entry = vmf->orig_pte; if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); goto unlock; } if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!pte_write(entry)) return do_wp_page(vmf); else if (likely(vmf->flags & FAULT_FLAG_WRITE)) entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, vmf->flags & FAULT_FLAG_WRITE)) { update_mmu_cache_range(vmf, vmf->vma, vmf->address, vmf->pte, 1); } else { /* Skip spurious TLB flush for retried page fault */ if (vmf->flags & FAULT_FLAG_TRIED) goto unlock; /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (vmf->flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, vmf->pte); } unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } /* * On entry, we hold either the VMA lock or the mmap_lock * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in * the result, the mmap_lock is not held on exit. See filemap_fault() * and __folio_lock_or_retry(). */ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { struct vm_fault vmf = { .vma = vma, .address = address & PAGE_MASK, .real_address = address, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), }; struct mm_struct *mm = vma->vm_mm; unsigned long vm_flags = vma->vm_flags; pgd_t *pgd; p4d_t *p4d; vm_fault_t ret; pgd = pgd_offset(mm, address); p4d = p4d_alloc(mm, pgd, address); if (!p4d) return VM_FAULT_OOM; vmf.pud = pud_alloc(mm, p4d, address); if (!vmf.pud) return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { pud_t orig_pud = *vmf.pud; barrier(); if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { /* * TODO once we support anonymous PUDs: NUMA case and * FAULT_FLAG_UNSHARE handling. */ if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { ret = wp_huge_pud(&vmf, orig_pud); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { huge_pud_set_accessed(&vmf, orig_pud); return 0; } } } vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd) return VM_FAULT_OOM; /* Huge pud page fault raced with pmd_alloc? */ if (pud_trans_unstable(vmf.pud)) goto retry_pud; if (pmd_none(*vmf.pmd) && hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); if (unlikely(is_swap_pmd(vmf.orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(vmf.orig_pmd)); if (is_pmd_migration_entry(vmf.orig_pmd)) pmd_migration_entry_wait(mm, vmf.pmd); return 0; } if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) return do_huge_pmd_numa_page(&vmf); if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && !pmd_write(vmf.orig_pmd)) { ret = wp_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { huge_pmd_set_accessed(&vmf); return 0; } } } return handle_pte_fault(&vmf); } /** * mm_account_fault - Do page fault accounting * @mm: mm from which memcg should be extracted. It can be NULL. * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting * of perf event counters, but we'll still do the per-task accounting to * the task who triggered this page fault. * @address: the faulted address. * @flags: the fault flags. * @ret: the fault retcode. * * This will take care of most of the page fault accounting. Meanwhile, it * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, unsigned long address, unsigned int flags, vm_fault_t ret) { bool major; /* Incomplete faults will be accounted upon completion. */ if (ret & VM_FAULT_RETRY) return; /* * To preserve the behavior of older kernels, PGFAULT counters record * both successful and failed faults, as opposed to perf counters, * which ignore failed cases. */ count_vm_event(PGFAULT); count_memcg_event_mm(mm, PGFAULT); /* * Do not account for unsuccessful faults (e.g. when the address wasn't * valid). That includes arch_vma_access_permitted() failing before * reaching here. So this is not a "this many hardware page faults" * counter. We should use the hw profiling for that. */ if (ret & VM_FAULT_ERROR) return; /* * We define the fault as a major fault when the final successful fault * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't * handle it immediately previously). */ major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); if (major) current->maj_flt++; else current->min_flt++; /* * If the fault is done for GUP, regs will be NULL. We only do the * accounting for the per thread fault counters who triggered the * fault, and we skip the perf event updates. */ if (!regs) return; if (major) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); else perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } #ifdef CONFIG_LRU_GEN static void lru_gen_enter_fault(struct vm_area_struct *vma) { /* the LRU algorithm only applies to accesses with recency */ current->in_lru_fault = vma_has_recency(vma); } static void lru_gen_exit_fault(void) { current->in_lru_fault = false; } #else static void lru_gen_enter_fault(struct vm_area_struct *vma) { } static void lru_gen_exit_fault(void) { } #endif /* CONFIG_LRU_GEN */ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, unsigned int *flags) { if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) return VM_FAULT_SIGSEGV; /* * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's * just treat it like an ordinary read-fault otherwise. */ if (!is_cow_mapping(vma->vm_flags)) *flags &= ~FAULT_FLAG_UNSHARE; } else if (*flags & FAULT_FLAG_WRITE) { /* Write faults on read-only mappings are impossible ... */ if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) return VM_FAULT_SIGSEGV; /* ... and FOLL_FORCE only applies to COW mappings. */ if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && !is_cow_mapping(vma->vm_flags))) return VM_FAULT_SIGSEGV; } #ifdef CONFIG_PER_VMA_LOCK /* * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of * the assumption that lock is dropped on VM_FAULT_RETRY. */ if (WARN_ON_ONCE((*flags & (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) return VM_FAULT_SIGSEGV; #endif return 0; } /* * By the time we get here, we already hold the mm semaphore * * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). */ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) { /* If the fault handler drops the mmap_lock, vma may be freed */ struct mm_struct *mm = vma->vm_mm; vm_fault_t ret; __set_current_state(TASK_RUNNING); ret = sanitize_fault_flags(vma, &flags); if (ret) goto out; if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, flags & FAULT_FLAG_INSTRUCTION, flags & FAULT_FLAG_REMOTE)) { ret = VM_FAULT_SIGSEGV; goto out; } /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. */ if (flags & FAULT_FLAG_USER) mem_cgroup_enter_user_fault(); lru_gen_enter_fault(vma); if (unlikely(is_vm_hugetlb_page(vma))) ret = hugetlb_fault(vma->vm_mm, vma, address, flags); else ret = __handle_mm_fault(vma, address, flags); lru_gen_exit_fault(); if (flags & FAULT_FLAG_USER) { mem_cgroup_exit_user_fault(); /* * The task may have entered a memcg OOM situation but * if the allocation error was handled gracefully (no * VM_FAULT_OOM), there is no need to kill anything. * Just clean up the OOM state peacefully. */ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) mem_cgroup_oom_synchronize(false); } out: mm_account_fault(mm, regs, address, flags, ret); return ret; } EXPORT_SYMBOL_GPL(handle_mm_fault); #ifdef CONFIG_LOCK_MM_AND_FIND_VMA #include <linux/extable.h> static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) { if (likely(mmap_read_trylock(mm))) return true; if (regs && !user_mode(regs)) { unsigned long ip = instruction_pointer(regs); if (!search_exception_tables(ip)) return false; } return !mmap_read_lock_killable(mm); } static inline bool mmap_upgrade_trylock(struct mm_struct *mm) { /* * We don't have this operation yet. * * It should be easy enough to do: it's basically a * atomic_long_try_cmpxchg_acquire() * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but * it also needs the proper lockdep magic etc. */ return false; } static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) { mmap_read_unlock(mm); if (regs && !user_mode(regs)) { unsigned long ip = instruction_pointer(regs); if (!search_exception_tables(ip)) return false; } return !mmap_write_lock_killable(mm); } /* * Helper for page fault handling. * * This is kind of equivalend to "mmap_read_lock()" followed * by "find_extend_vma()", except it's a lot more careful about * the locking (and will drop the lock on failure). * * For example, if we have a kernel bug that causes a page * fault, we don't want to just use mmap_read_lock() to get * the mm lock, because that would deadlock if the bug were * to happen while we're holding the mm lock for writing. * * So this checks the exception tables on kernel faults in * order to only do this all for instructions that are actually * expected to fault. * * We can also actually take the mm lock for writing if we * need to extend the vma, which helps the VM layer a lot. */ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsigned long addr, struct pt_regs *regs) { struct vm_area_struct *vma; if (!get_mmap_lock_carefully(mm, regs)) return NULL; vma = find_vma(mm, addr); if (likely(vma && (vma->vm_start <= addr))) return vma; /* * Well, dang. We might still be successful, but only * if we can extend a vma to do so. */ if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { mmap_read_unlock(mm); return NULL; } /* * We can try to upgrade the mmap lock atomically, * in which case we can continue to use the vma * we already looked up. * * Otherwise we'll have to drop the mmap lock and * re-take it, and also look up the vma again, * re-checking it. */ if (!mmap_upgrade_trylock(mm)) { if (!upgrade_mmap_lock_carefully(mm, regs)) return NULL; vma = find_vma(mm, addr); if (!vma) goto fail; if (vma->vm_start <= addr) goto success; if (!(vma->vm_flags & VM_GROWSDOWN)) goto fail; } if (expand_stack_locked(vma, addr)) goto fail; success: mmap_write_downgrade(mm); return vma; fail: mmap_write_unlock(mm); return NULL; } #endif #ifdef CONFIG_PER_VMA_LOCK /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the * function returns NULL. */ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) { MA_STATE(mas, &mm->mm_mt, address, address); struct vm_area_struct *vma; rcu_read_lock(); retry: vma = mas_walk(&mas); if (!vma) goto inval; if (!vma_start_read(vma)) goto inval; /* * find_mergeable_anon_vma uses adjacent vmas which are not locked. * This check must happen after vma_start_read(); otherwise, a * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA * from its anon_vma. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) goto inval_end_read; /* Check since vm_start/vm_end might change before we lock the VMA */ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; /* Check if the VMA got isolated after we found it */ if (vma->detached) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ goto retry; } rcu_read_unlock(); return vma; inval_end_read: vma_end_read(vma); inval: rcu_read_unlock(); count_vm_vma_lock_event(VMA_LOCK_ABORT); return NULL; } #endif /* CONFIG_PER_VMA_LOCK */ #ifndef __PAGETABLE_P4D_FOLDED /* * Allocate p4d page table. * We've already handled the fast-path in-line. */ int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { p4d_t *new = p4d_alloc_one(mm, address); if (!new) return -ENOMEM; spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) { /* Another has populated it */ p4d_free(mm, new); } else { smp_wmb(); /* See comment in pmd_install() */ pgd_populate(mm, pgd, new); } spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_P4D_FOLDED */ #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM; spin_lock(&mm->page_table_lock); if (!p4d_present(*p4d)) { mm_inc_nr_puds(mm); smp_wmb(); /* See comment in pmd_install() */ p4d_populate(mm, p4d, new); } else /* Another has populated it */ pud_free(mm, new); spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * We've already handled the fast-path in-line. */ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { spinlock_t *ptl; pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM; ptl = pud_lock(mm, pud); if (!pud_present(*pud)) { mm_inc_nr_pmds(mm); smp_wmb(); /* See comment in pmd_install() */ pud_populate(mm, pud, new); } else { /* Another has populated it */ pmd_free(mm, new); } spin_unlock(ptl); return 0; } #endif /* __PAGETABLE_PMD_FOLDED */ /** * follow_pte - look up PTE at a user virtual address * @mm: the mm_struct of the target address space * @address: user virtual address * @ptepp: location to store found PTE * @ptlp: location to store the lock for the PTE * * On a successful return, the pointer to the PTE is stored in @ptepp; * the corresponding lock is taken and its location is stored in @ptlp. * The contents of the PTE are only stable until @ptlp is released; * any further use, if any, must be protected against invalidation * with MMU notifiers. * * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore * should be taken for read. * * KVM uses this function. While it is arguably less bad than ``follow_pfn``, * it is not a good general-purpose API. * * Return: zero on success, -ve otherwise. */ int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep; pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) goto out; p4d = p4d_offset(pgd, address); if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) goto out; pud = pud_offset(p4d, address); if (pud_none(*pud) || unlikely(pud_bad(*pud))) goto out; pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; if (!pte_present(ptep_get(ptep))) goto unlock; *ptepp = ptep; return 0; unlock: pte_unmap_unlock(ptep, *ptlp); out: return -EINVAL; } EXPORT_SYMBOL_GPL(follow_pte); /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * This function does not allow the caller to read the permissions * of the PTE. Do not use it. * * Return: zero and the pfn at @pfn on success, -ve otherwise. */ int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { int ret = -EINVAL; spinlock_t *ptl; pte_t *ptep; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return ret; ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; *pfn = pte_pfn(ptep_get(ptep)); pte_unmap_unlock(ptep, ptl); return 0; } EXPORT_SYMBOL(follow_pfn); #ifdef CONFIG_HAVE_IOREMAP_PROT int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) { int ret = -EINVAL; pte_t *ptep, pte; spinlock_t *ptl; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; pte = ptep_get(ptep); if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; *prot = pgprot_val(pte_pgprot(pte)); *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; ret = 0; unlock: pte_unmap_unlock(ptep, ptl); out: return ret; } /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access * @addr: userspace address, not relative offset within @vma * @buf: buffer to read/write * @len: length of transfer * @write: set to FOLL_WRITE when writing, otherwise reading * * This is a generic implementation for &vm_operations_struct.access for an * iomem mapping. This callback is used by access_process_vm() when the @vma is * not page based. */ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; unsigned long prot = 0; void __iomem *maddr; pte_t *ptep, pte; spinlock_t *ptl; int offset = offset_in_page(addr); int ret = -EINVAL; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return -EINVAL; retry: if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) return -EINVAL; pte = ptep_get(ptep); pte_unmap_unlock(ptep, ptl); prot = pgprot_val(pte_pgprot(pte)); phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; if ((write & FOLL_WRITE) && !pte_write(pte)) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); if (!maddr) return -ENOMEM; if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) goto out_unmap; if (!pte_same(pte, ptep_get(ptep))) { pte_unmap_unlock(ptep, ptl); iounmap(maddr); goto retry; } if (write) memcpy_toio(maddr + offset, buf, len); else memcpy_fromio(buf, maddr + offset, len); ret = len; pte_unmap_unlock(ptep, ptl); out_unmap: iounmap(maddr); return ret; } EXPORT_SYMBOL_GPL(generic_access_phys); #endif /* * Access another process' address space as given in mm. */ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { void *old_buf = buf; int write = gup_flags & FOLL_WRITE; if (mmap_read_lock_killable(mm)) return 0; /* Untag the address before looking up the VMA */ addr = untagged_addr_remote(mm, addr); /* Avoid triggering the temporary warning in __get_user_pages */ if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) return 0; /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, offset; void *maddr; struct vm_area_struct *vma = NULL; struct page *page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); if (IS_ERR_OR_NULL(page)) { /* We might need to expand the stack to access it */ vma = vma_lookup(mm, addr); if (!vma) { vma = expand_stack(mm, addr); /* mmap_lock was dropped on failure */ if (!vma) return buf - old_buf; /* Try again if stack expansion worked */ continue; } /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ bytes = 0; #ifdef CONFIG_HAVE_IOREMAP_PROT if (vma->vm_ops && vma->vm_ops->access) bytes = vma->vm_ops->access(vma, addr, buf, len, write); #endif if (bytes <= 0) break; } else { bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) bytes = PAGE_SIZE-offset; maddr = kmap(page); if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); } kunmap(page); put_page(page); } len -= bytes; buf += bytes; addr += bytes; } mmap_read_unlock(mm); return buf - old_buf; } /** * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @gup_flags: flags modifying lookup behaviour * * The caller must hold a reference on @mm. * * Return: number of bytes copied from source to destination. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { return __access_remote_vm(mm, addr, buf, len, gup_flags); } /* * Access another process' address space. * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags) { struct mm_struct *mm; int ret; mm = get_task_mm(tsk); if (!mm) return 0; ret = __access_remote_vm(mm, addr, buf, len, gup_flags); mmput(mm); return ret; } EXPORT_SYMBOL_GPL(access_process_vm); /* * Print the name of a VMA. */ void print_vma_addr(char *prefix, unsigned long ip) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; /* * we might be running from an atomic context so we cannot sleep */ if (!mmap_read_trylock(mm)) return; vma = find_vma(mm, ip); if (vma && vma->vm_file) { struct file *f = vma->vm_file; char *buf = (char *)__get_free_page(GFP_NOWAIT); if (buf) { char *p; p = file_path(f, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; printk("%s%s[%lx+%lx]", prefix, kbasename(p), vma->vm_start, vma->vm_end - vma->vm_start); free_page((unsigned long)buf); } } mmap_read_unlock(mm); } #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void __might_fault(const char *file, int line) { if (pagefault_disabled()) return; __might_sleep(file, line); #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(&current->mm->mmap_lock); #endif } EXPORT_SYMBOL(__might_fault); #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) /* * Process all subpages of the specified huge page with the specified * operation. The target subpage will be processed last to keep its * cache lines hot. */ static inline int process_huge_page( unsigned long addr_hint, unsigned int pages_per_huge_page, int (*process_subpage)(unsigned long addr, int idx, void *arg), void *arg) { int i, n, base, l, ret; unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); /* Process target subpage last to keep its cache lines hot */ might_sleep(); n = (addr_hint - addr) / PAGE_SIZE; if (2 * n <= pages_per_huge_page) { /* If target subpage in first half of huge page */ base = 0; l = n; /* Process subpages at the end of huge page */ for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { cond_resched(); ret = process_subpage(addr + i * PAGE_SIZE, i, arg); if (ret) return ret; } } else { /* If target subpage in second half of huge page */ base = pages_per_huge_page - 2 * (pages_per_huge_page - n); l = pages_per_huge_page - n; /* Process subpages at the begin of huge page */ for (i = 0; i < base; i++) { cond_resched(); ret = process_subpage(addr + i * PAGE_SIZE, i, arg); if (ret) return ret; } } /* * Process remaining subpages in left-right-left-right pattern * towards the target subpage */ for (i = 0; i < l; i++) { int left_idx = base + i; int right_idx = base + 2 * l - 1 - i; cond_resched(); ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); if (ret) return ret; cond_resched(); ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); if (ret) return ret; } return 0; } static void clear_gigantic_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) { int i; struct page *p; might_sleep(); for (i = 0; i < pages_per_huge_page; i++) { p = nth_page(page, i); cond_resched(); clear_user_highpage(p, addr + i * PAGE_SIZE); } } static int clear_subpage(unsigned long addr, int idx, void *arg) { struct page *page = arg; clear_user_highpage(page + idx, addr); return 0; } void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page) { unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { clear_gigantic_page(page, addr, pages_per_huge_page); return; } process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); } static int copy_user_gigantic_page(struct folio *dst, struct folio *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { int i; struct page *dst_page; struct page *src_page; for (i = 0; i < pages_per_huge_page; i++) { dst_page = folio_page(dst, i); src_page = folio_page(src, i); cond_resched(); if (copy_mc_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma)) { memory_failure_queue(page_to_pfn(src_page), 0); return -EHWPOISON; } } return 0; } struct copy_subpage_arg { struct page *dst; struct page *src; struct vm_area_struct *vma; }; static int copy_subpage(unsigned long addr, int idx, void *arg) { struct copy_subpage_arg *copy_arg = arg; if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, addr, copy_arg->vma)) { memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0); return -EHWPOISON; } return 0; } int copy_user_large_folio(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma) { unsigned int pages_per_huge_page = folio_nr_pages(dst); unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); struct copy_subpage_arg arg = { .dst = &dst->page, .src = &src->page, .vma = vma, }; if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) return copy_user_gigantic_page(dst, src, addr, vma, pages_per_huge_page); return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); } long copy_folio_from_user(struct folio *dst_folio, const void __user *usr_src, bool allow_pagefault) { void *kaddr; unsigned long i, rc = 0; unsigned int nr_pages = folio_nr_pages(dst_folio); unsigned long ret_val = nr_pages * PAGE_SIZE; struct page *subpage; for (i = 0; i < nr_pages; i++) { subpage = folio_page(dst_folio, i); kaddr = kmap_local_page(subpage); if (!allow_pagefault) pagefault_disable(); rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); if (!allow_pagefault) pagefault_enable(); kunmap_local(kaddr); ret_val -= (PAGE_SIZE - rc); if (rc) break; flush_dcache_page(subpage); cond_resched(); } return ret_val; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS static struct kmem_cache *page_ptl_cachep; void __init ptlock_cache_init(void) { page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, SLAB_PANIC, NULL); } bool ptlock_alloc(struct ptdesc *ptdesc) { spinlock_t *ptl; ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); if (!ptl) return false; ptdesc->ptl = ptl; return true; } void ptlock_free(struct ptdesc *ptdesc) { kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif
linux-master
mm/memory.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu-vm.c - vmalloc area based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <[email protected]> * * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ #include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { /* must not be used on pre-mapped chunk */ WARN_ON(chunk->immutable); return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); } /** * pcpu_get_pages - get temp pages array * * Returns pointer to array of pointers to struct page which can be indexed * with pcpu_page_idx(). Note that there is only one array and accesses * should be serialized by pcpu_alloc_mutex. * * RETURNS: * Pointer to temp pages array on success. */ static struct page **pcpu_get_pages(void) { static struct page **pages; size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); lockdep_assert_held(&pcpu_alloc_mutex); if (!pages) pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); return pages; } /** * pcpu_free_pages - free pages which were allocated for @chunk * @chunk: chunk pages were allocated for * @pages: array of pages to be freed, indexed by pcpu_page_idx() * @page_start: page index of the first page to be freed * @page_end: page index of the last page to be freed + 1 * * Free pages [@page_start and @page_end) in @pages for all units. * The pages were allocated for @chunk. */ static void pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page = pages[pcpu_page_idx(cpu, i)]; if (page) __free_page(page); } } } /** * pcpu_alloc_pages - allocates pages for @chunk * @chunk: target chunk * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() * @page_start: page index of the first page to be allocated * @page_end: page index of the last page to be allocated + 1 * @gfp: allocation flags passed to the underlying allocator * * Allocate pages [@page_start,@page_end) into @pages for all units. * The allocation is for @chunk. Percpu core doesn't care about the * content of @pages and will pass it verbatim to pcpu_map_pages(). */ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) { unsigned int cpu, tcpu; int i; gfp |= __GFP_HIGHMEM; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); if (!*pagep) goto err; } } return 0; err: while (--i >= page_start) __free_page(pages[pcpu_page_idx(cpu, i)]); for_each_possible_cpu(tcpu) { if (tcpu == cpu) break; for (i = page_start; i < page_end; i++) __free_page(pages[pcpu_page_idx(tcpu, i)]); } return -ENOMEM; } /** * pcpu_pre_unmap_flush - flush cache prior to unmapping * @chunk: chunk the regions to be flushed belongs to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages in [@page_start,@page_end) of @chunk are about to be * unmapped. Flush cache. As each flushing trial can be very * expensive, issue flush on the whole region at once rather than * doing it for each cpu. This could be an overkill but is more * scalable. */ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vunmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** * pcpu_unmap_pages - unmap pages out of a pcpu_chunk * @chunk: chunk of interest * @pages: pages array which can be used to pass information to free * @page_start: page index of the first page to unmap * @page_end: page index of the last page to unmap + 1 * * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. * Corresponding elements in @pages were cleared by the caller and can * be used to carry information to pcpu_free_pages() which will be * called after all unmaps are finished. The caller should call * proper pre/post flush functions. */ static void pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page; page = pcpu_chunk_page(chunk, cpu, i); WARN_ON(!page); pages[pcpu_page_idx(cpu, i)] = page; } __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), page_end - page_start); } } /** * pcpu_post_unmap_tlb_flush - flush TLB after unmapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush * TLB for the regions. This can be skipped if the area is to be * returned to vmalloc as vmalloc will handle TLB flushing lazily. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), PAGE_KERNEL, pages, PAGE_SHIFT); } /** * pcpu_map_pages - map pages into a pcpu_chunk * @chunk: chunk of interest * @pages: pages array containing pages to be mapped * @page_start: page index of the first page to map * @page_end: page index of the last page to map + 1 * * For each cpu, map pages [@page_start,@page_end) into @chunk. The * caller is responsible for calling pcpu_post_map_flush() after all * mappings are complete. * * This function is responsible for setting up whatever is necessary for * reverse lookup (addr -> chunk). */ static int pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu, tcpu; int i, err; for_each_possible_cpu(cpu) { err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), &pages[pcpu_page_idx(cpu, page_start)], page_end - page_start); if (err < 0) goto err; for (i = page_start; i < page_end; i++) pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], chunk); } return 0; err: for_each_possible_cpu(tcpu) { if (tcpu == cpu) break; __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); } pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } /** * pcpu_post_map_flush - flush cache after mapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been mapped. Flush * cache. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } /** * pcpu_populate_chunk - populate and map an area of a pcpu_chunk * @chunk: chunk of interest * @page_start: the start page * @page_end: the end page * @gfp: allocation flags passed to the underlying memory allocator * * For each cpu, populate and map pages [@page_start,@page_end) into * @chunk. * * CONTEXT: * pcpu_alloc_mutex, does GFP_KERNEL allocation. */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) { struct page **pages; pages = pcpu_get_pages(); if (!pages) return -ENOMEM; if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) return -ENOMEM; if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pcpu_free_pages(chunk, pages, page_start, page_end); return -ENOMEM; } pcpu_post_map_flush(chunk, page_start, page_end); return 0; } /** * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk * @chunk: chunk to depopulate * @page_start: the start page * @page_end: the end page * * For each cpu, depopulate and unmap pages [@page_start,@page_end) * from @chunk. * * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the * region back to vmalloc() which will lazily flush the tlb. * * CONTEXT: * pcpu_alloc_mutex. */ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { struct page **pages; /* * If control reaches here, there must have been at least one * successful population attempt so the temp pages array must * be available now. */ pages = pcpu_get_pages(); BUG_ON(!pages); /* unmap and free */ pcpu_pre_unmap_flush(chunk, page_start, page_end); pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_free_pages(chunk, pages, page_start, page_end); } static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; struct vm_struct **vms; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; } chunk->data = vms; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return vmalloc_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { /* no extra restriction */ return 0; } /** * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim * @chunk: chunk of interest * * This is the entry point for percpu reclaim. If a chunk qualifies, it is then * isolated and managed in separate lists at the back of pcpu_slot: sidelined * and to_depopulate respectively. The to_depopulate list holds chunks slated * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once * they are on this list. Once depopulated, they are moved onto the sidelined * list which enables them to be pulled back in for allocation if no other chunk * can suffice the allocation. */ static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) { /* do not reclaim either the first chunk or reserved chunk */ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) return false; /* * If it is isolated, it may be on the sidelined list so move it back to * the to_depopulate list. If we hit at least 1/4 pages empty pages AND * there is no system-wide shortage of empty pages aside from this * chunk, move it to the to_depopulate list. */ return ((chunk->isolated && chunk->nr_empty_pop_pages) || (pcpu_nr_empty_pop_pages > (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); }
linux-master
mm/percpu-vm.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/balloon_compaction.c * * Common interface for making balloon pages movable by compaction. * * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <[email protected]> */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/balloon_compaction.h> static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, struct page *page) { /* * Block others from accessing the 'page' when we get around to * establishing additional references. We should be the only one * holding a reference to the 'page' at this point. If we are not, then * memory corruption is possible and we should stop execution. */ BUG_ON(!trylock_page(page)); balloon_page_insert(b_dev_info, page); unlock_page(page); __count_vm_event(BALLOON_INFLATE); } /** * balloon_page_list_enqueue() - inserts a list of pages into the balloon page * list. * @b_dev_info: balloon device descriptor where we will insert a new page to * @pages: pages to enqueue - allocated using balloon_page_alloc. * * Driver must call this function to properly enqueue balloon pages before * definitively removing them from the guest system. * * Return: number of pages that were enqueued. */ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, struct list_head *pages) { struct page *page, *tmp; unsigned long flags; size_t n_pages = 0; spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_for_each_entry_safe(page, tmp, pages, lru) { list_del(&page->lru); balloon_page_enqueue_one(b_dev_info, page); n_pages++; } spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); return n_pages; } EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); /** * balloon_page_list_dequeue() - removes pages from balloon's page list and * returns a list of the pages. * @b_dev_info: balloon device descriptor where we will grab a page from. * @pages: pointer to the list of pages that would be returned to the caller. * @n_req_pages: number of requested pages. * * Driver must call this function to properly de-allocate a previous enlisted * balloon pages before definitively releasing it back to the guest system. * This function tries to remove @n_req_pages from the ballooned pages and * return them to the caller in the @pages list. * * Note that this function may fail to dequeue some pages even if the balloon * isn't empty - since the page list can be temporarily empty due to compaction * of isolated pages. * * Return: number of pages that were added to the @pages list. */ size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, struct list_head *pages, size_t n_req_pages) { struct page *page, *tmp; unsigned long flags; size_t n_pages = 0; spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { if (n_pages == n_req_pages) break; /* * Block others from accessing the 'page' while we get around to * establishing additional references and preparing the 'page' * to be released by the balloon driver. */ if (!trylock_page(page)) continue; if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) && PageIsolated(page)) { /* raced with isolation */ unlock_page(page); continue; } balloon_page_delete(page); __count_vm_event(BALLOON_DEFLATE); list_add(&page->lru, pages); unlock_page(page); n_pages++; } spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); return n_pages; } EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); /* * balloon_page_alloc - allocates a new page for insertion into the balloon * page list. * * Driver must call this function to properly allocate a new balloon page. * Driver must call balloon_page_enqueue before definitively removing the page * from the guest system. * * Return: struct page for the allocated page or NULL on allocation failure. */ struct page *balloon_page_alloc(void) { struct page *page = alloc_page(balloon_mapping_gfp_mask() | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN); return page; } EXPORT_SYMBOL_GPL(balloon_page_alloc); /* * balloon_page_enqueue - inserts a new page into the balloon page list. * * @b_dev_info: balloon device descriptor where we will insert a new page * @page: new page to enqueue - allocated using balloon_page_alloc. * * Drivers must call this function to properly enqueue a new allocated balloon * page before definitively removing the page from the guest system. * * Drivers must not call balloon_page_enqueue on pages that have been pushed to * a list with balloon_page_push before removing them with balloon_page_pop. To * enqueue a list of pages, use balloon_page_list_enqueue instead. */ void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, struct page *page) { unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_enqueue_one(b_dev_info, page); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } EXPORT_SYMBOL_GPL(balloon_page_enqueue); /* * balloon_page_dequeue - removes a page from balloon's page list and returns * its address to allow the driver to release the page. * @b_dev_info: balloon device descriptor where we will grab a page from. * * Driver must call this function to properly dequeue a previously enqueued page * before definitively releasing it back to the guest system. * * Caller must perform its own accounting to ensure that this * function is called only if some pages are actually enqueued. * * Note that this function may fail to dequeue some pages even if there are * some enqueued pages - since the page list can be temporarily empty due to * the compaction of isolated pages. * * TODO: remove the caller accounting requirements, and allow caller to wait * until all pages can be dequeued. * * Return: struct page for the dequeued page, or NULL if no page was dequeued. */ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) { unsigned long flags; LIST_HEAD(pages); int n_pages; n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); if (n_pages != 1) { /* * If we are unable to dequeue a balloon page because the page * list is empty and there are no isolated pages, then something * went out of track and some balloon pages are lost. * BUG() here, otherwise the balloon driver may get stuck in * an infinite loop while attempting to release all its pages. */ spin_lock_irqsave(&b_dev_info->pages_lock, flags); if (unlikely(list_empty(&b_dev_info->pages) && !b_dev_info->isolated_pages)) BUG(); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); return NULL; } return list_first_entry(&pages, struct page, lru); } EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION static bool balloon_page_isolate(struct page *page, isolate_mode_t mode) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_del(&page->lru); b_dev_info->isolated_pages++; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); return true; } static void balloon_page_putback(struct page *page) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_add(&page->lru, &b_dev_info->pages); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } /* move_to_new_page() counterpart for a ballooned page */ static int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { struct balloon_dev_info *balloon = balloon_page_device(page); /* * We can not easily support the no copy case here so ignore it as it * is unlikely to be used with balloon pages. See include/linux/hmm.h * for a user of the MIGRATE_SYNC_NO_COPY mode. */ if (mode == MIGRATE_SYNC_NO_COPY) return -EINVAL; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); return balloon->migratepage(balloon, newpage, page, mode); } const struct movable_operations balloon_mops = { .migrate_page = balloon_page_migrate, .isolate_page = balloon_page_isolate, .putback_page = balloon_page_putback, }; EXPORT_SYMBOL_GPL(balloon_mops); #endif /* CONFIG_BALLOON_COMPACTION */
linux-master
mm/balloon_compaction.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/memblock.h> #include <linux/fs.h> #include <linux/sysfs.h> #include <linux/kobject.h> #include <linux/memory_hotplug.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/page_ext.h> #include <linux/page_idle.h> #include "internal.h" #define BITMAP_CHUNK_SIZE sizeof(u64) #define BITMAP_CHUNK_BITS (BITMAP_CHUNK_SIZE * BITS_PER_BYTE) /* * Idle page tracking only considers user memory pages, for other types of * pages the idle flag is always unset and an attempt to set it is silently * ignored. * * We treat a page as a user memory page if it is on an LRU list, because it is * always safe to pass such a page to rmap_walk(), which is essential for idle * page tracking. With such an indicator of user pages we can skip isolated * pages, but since there are not usually many of them, it will hardly affect * the overall result. * * This function tries to get a user memory page by pfn as described above. */ static struct folio *page_idle_get_folio(unsigned long pfn) { struct page *page = pfn_to_online_page(pfn); struct folio *folio; if (!page || PageTail(page)) return NULL; folio = page_folio(page); if (!folio_test_lru(folio) || !folio_try_get(folio)) return NULL; if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { folio_put(folio); folio = NULL; } return folio; } static bool page_idle_clear_pte_refs_one(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); bool referenced = false; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { /* * For PTE-mapped THP, one sub page is referenced, * the whole THP is referenced. */ if (ptep_clear_young_notify(vma, addr, pvmw.pte)) referenced = true; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) referenced = true; } else { /* unexpected pmd-mapped page? */ WARN_ON_ONCE(1); } } if (referenced) { folio_clear_idle(folio); /* * We cleared the referenced bit in a mapping to this page. To * avoid interference with page reclaim, mark it young so that * folio_referenced() will return > 0. */ folio_set_young(folio); } return true; } static void page_idle_clear_pte_refs(struct folio *folio) { /* * Since rwc.try_lock is unused, rwc is effectively immutable, so we * can make it static to save some cycles and stack. */ static struct rmap_walk_control rwc = { .rmap_one = page_idle_clear_pte_refs_one, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; if (!folio_mapped(folio) || !folio_raw_mapping(folio)) return; need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); if (need_lock && !folio_trylock(folio)) return; rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); } static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { u64 *out = (u64 *)buf; struct folio *folio; unsigned long pfn, end_pfn; int bit; if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) return -EINVAL; pfn = pos * BITS_PER_BYTE; if (pfn >= max_pfn) return 0; end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; if (!bit) *out = 0ULL; folio = page_idle_get_folio(pfn); if (folio) { if (folio_test_idle(folio)) { /* * The page might have been referenced via a * pte, in which case it is not idle. Clear * refs and recheck. */ page_idle_clear_pte_refs(folio); if (folio_test_idle(folio)) *out |= 1ULL << bit; } folio_put(folio); } if (bit == BITMAP_CHUNK_BITS - 1) out++; cond_resched(); } return (char *)out - buf; } static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { const u64 *in = (u64 *)buf; struct folio *folio; unsigned long pfn, end_pfn; int bit; if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) return -EINVAL; pfn = pos * BITS_PER_BYTE; if (pfn >= max_pfn) return -ENXIO; end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; if ((*in >> bit) & 1) { folio = page_idle_get_folio(pfn); if (folio) { page_idle_clear_pte_refs(folio); folio_set_idle(folio); folio_put(folio); } } if (bit == BITMAP_CHUNK_BITS - 1) in++; cond_resched(); } return (char *)in - buf; } static struct bin_attribute page_idle_bitmap_attr = __BIN_ATTR(bitmap, 0600, page_idle_bitmap_read, page_idle_bitmap_write, 0); static struct bin_attribute *page_idle_bin_attrs[] = { &page_idle_bitmap_attr, NULL, }; static const struct attribute_group page_idle_attr_group = { .bin_attrs = page_idle_bin_attrs, .name = "page_idle", }; static int __init page_idle_init(void) { int err; err = sysfs_create_group(mm_kobj, &page_idle_attr_group); if (err) { pr_err("page_idle: register sysfs failed\n"); return err; } return 0; } subsys_initcall(page_idle_init);
linux-master
mm/page_idle.c
/* * Resizable virtual memory filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * 2000-2001 Christoph Rohland * 2000-2001 SAP AG * 2002 Red Hat Inc. * Copyright (C) 2002-2011 Hugh Dickins. * Copyright (C) 2011 Google Inc. * Copyright (C) 2002-2005 VERITAS Software Corporation. * Copyright (C) 2004 Andi Kleen, SuSE Labs * * Extended attribute support for tmpfs: * Copyright (c) 2004, Luke Kenneth Casson Leighton <[email protected]> * Copyright (c) 2004 Red Hat, Inc., James Morris <[email protected]> * * tiny-shmem: * Copyright (c) 2004, 2008 Matt Mackall <[email protected]> * * This file is released under the GPL. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/vfs.h> #include <linux/mount.h> #include <linux/ramfs.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/fileattr.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched/signal.h> #include <linux/export.h> #include <linux/shmem_fs.h> #include <linux/swap.h> #include <linux/uio.h> #include <linux/hugetlb.h> #include <linux/fs_parser.h> #include <linux/swapfile.h> #include <linux/iversion.h> #include "swap.h" static struct vfsmount *shm_mnt; #ifdef CONFIG_SHMEM /* * This virtual memory filesystem is heavily based on the ramfs. It * extends ramfs by the ability to use swap and honor resource limits * which makes it a completely usable filesystem. */ #include <linux/xattr.h> #include <linux/exportfs.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <linux/mman.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/percpu_counter.h> #include <linux/falloc.h> #include <linux/splice.h> #include <linux/security.h> #include <linux/swapops.h> #include <linux/mempolicy.h> #include <linux/namei.h> #include <linux/ctype.h> #include <linux/migrate.h> #include <linux/highmem.h> #include <linux/seq_file.h> #include <linux/magic.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <uapi/linux/memfd.h> #include <linux/rmap.h> #include <linux/uuid.h> #include <linux/quotaops.h> #include <linux/uaccess.h> #include "internal.h" #define BLOCKS_PER_PAGE (PAGE_SIZE/512) #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) /* Pretend that each entry is of this size in directory's i_size */ #define BOGO_DIRENT_SIZE 20 /* Pretend that one inode + its dentry occupy this much memory */ #define BOGO_INODE_SIZE 1024 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ #define SHORT_SYMLINK_LEN 128 /* * shmem_fallocate communicates with shmem_fault or shmem_writepage via * inode->i_private (with i_rwsem making sure that it has only one user at * a time): we would prefer not to enlarge the shmem inode just for that. */ struct shmem_falloc { wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ pgoff_t start; /* start of range currently being fallocated */ pgoff_t next; /* the next page offset to be fallocated */ pgoff_t nr_falloced; /* how many new pages have been fallocated */ pgoff_t nr_unswapped; /* how often writepage refused to swap out */ }; struct shmem_options { unsigned long long blocks; unsigned long long inodes; struct mempolicy *mpol; kuid_t uid; kgid_t gid; umode_t mode; bool full_inums; int huge; int seen; bool noswap; unsigned short quota_types; struct shmem_quota_limits qlimits; #define SHMEM_SEEN_BLOCKS 1 #define SHMEM_SEEN_INODES 2 #define SHMEM_SEEN_HUGE 4 #define SHMEM_SEEN_INUMS 8 #define SHMEM_SEEN_NOSWAP 16 #define SHMEM_SEEN_QUOTA 32 }; #ifdef CONFIG_TMPFS static unsigned long shmem_default_max_blocks(void) { return totalram_pages() / 2; } static unsigned long shmem_default_max_inodes(void) { unsigned long nr_pages = totalram_pages(); return min3(nr_pages - totalhigh_pages(), nr_pages / 2, ULONG_MAX / BOGO_INODE_SIZE); } #endif static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_area_struct *vma, vm_fault_t *fault_type); static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { return sb->s_fs_info; } /* * shmem_file_setup pre-accounts the whole fixed size of a VM object, * for shared memory and for shared anonymous (/dev/zero) mappings * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), * consistent with the pre-accounting of private mappings ... */ static inline int shmem_acct_size(unsigned long flags, loff_t size) { return (flags & VM_NORESERVE) ? 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); } static inline void shmem_unacct_size(unsigned long flags, loff_t size) { if (!(flags & VM_NORESERVE)) vm_unacct_memory(VM_ACCT(size)); } static inline int shmem_reacct_size(unsigned long flags, loff_t oldsize, loff_t newsize) { if (!(flags & VM_NORESERVE)) { if (VM_ACCT(newsize) > VM_ACCT(oldsize)) return security_vm_enough_memory_mm(current->mm, VM_ACCT(newsize) - VM_ACCT(oldsize)); else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); } return 0; } /* * ... whereas tmpfs objects are accounted incrementally as * pages are allocated, in order to allow large sparse files. * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */ static inline int shmem_acct_block(unsigned long flags, long pages) { if (!(flags & VM_NORESERVE)) return 0; return security_vm_enough_memory_mm(current->mm, pages * VM_ACCT(PAGE_SIZE)); } static inline void shmem_unacct_blocks(unsigned long flags, long pages) { if (flags & VM_NORESERVE) vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); } static int shmem_inode_acct_block(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); int err = -ENOSPC; if (shmem_acct_block(info->flags, pages)) return err; might_sleep(); /* when quotas */ if (sbinfo->max_blocks) { if (percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks - pages) > 0) goto unacct; err = dquot_alloc_block_nodirty(inode, pages); if (err) goto unacct; percpu_counter_add(&sbinfo->used_blocks, pages); } else { err = dquot_alloc_block_nodirty(inode, pages); if (err) goto unacct; } return 0; unacct: shmem_unacct_blocks(info->flags, pages); return err; } static void shmem_inode_unacct_blocks(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); might_sleep(); /* when quotas */ dquot_free_block_nodirty(inode, pages); if (sbinfo->max_blocks) percpu_counter_sub(&sbinfo->used_blocks, pages); shmem_unacct_blocks(info->flags, pages); } static const struct super_operations shmem_ops; const struct address_space_operations shmem_aops; static const struct file_operations shmem_file_operations; static const struct inode_operations shmem_inode_operations; static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; static const struct vm_operations_struct shmem_vm_ops; static const struct vm_operations_struct shmem_anon_vm_ops; static struct file_system_type shmem_fs_type; bool vma_is_anon_shmem(struct vm_area_struct *vma) { return vma->vm_ops == &shmem_anon_vm_ops; } bool vma_is_shmem(struct vm_area_struct *vma) { return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; } static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex); #ifdef CONFIG_TMPFS_QUOTA static int shmem_enable_quotas(struct super_block *sb, unsigned short quota_types) { int type, err = 0; sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; for (type = 0; type < SHMEM_MAXQUOTAS; type++) { if (!(quota_types & (1 << type))) continue; err = dquot_load_quota_sb(sb, type, QFMT_SHMEM, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); if (err) goto out_err; } return 0; out_err: pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n", type, err); for (type--; type >= 0; type--) dquot_quota_off(sb, type); return err; } static void shmem_disable_quotas(struct super_block *sb) { int type; for (type = 0; type < SHMEM_MAXQUOTAS; type++) dquot_quota_off(sb, type); } static struct dquot **shmem_get_dquots(struct inode *inode) { return SHMEM_I(inode)->i_dquot; } #endif /* CONFIG_TMPFS_QUOTA */ /* * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and * produces a novel ino for the newly allocated inode. * * It may also be called when making a hard link to permit the space needed by * each dentry. However, in that case, no new inode number is needed since that * internally draws from another pool of inode numbers (currently global * get_next_ino()). This case is indicated by passing NULL as inop. */ #define SHMEM_INO_BATCH 1024 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); ino_t ino; if (!(sb->s_flags & SB_KERNMOUNT)) { raw_spin_lock(&sbinfo->stat_lock); if (sbinfo->max_inodes) { if (sbinfo->free_ispace < BOGO_INODE_SIZE) { raw_spin_unlock(&sbinfo->stat_lock); return -ENOSPC; } sbinfo->free_ispace -= BOGO_INODE_SIZE; } if (inop) { ino = sbinfo->next_ino++; if (unlikely(is_zero_ino(ino))) ino = sbinfo->next_ino++; if (unlikely(!sbinfo->full_inums && ino > UINT_MAX)) { /* * Emulate get_next_ino uint wraparound for * compatibility */ if (IS_ENABLED(CONFIG_64BIT)) pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", __func__, MINOR(sb->s_dev)); sbinfo->next_ino = 1; ino = sbinfo->next_ino++; } *inop = ino; } raw_spin_unlock(&sbinfo->stat_lock); } else if (inop) { /* * __shmem_file_setup, one of our callers, is lock-free: it * doesn't hold stat_lock in shmem_reserve_inode since * max_inodes is always 0, and is called from potentially * unknown contexts. As such, use a per-cpu batched allocator * which doesn't require the per-sb stat_lock unless we are at * the batch boundary. * * We don't need to worry about inode{32,64} since SB_KERNMOUNT * shmem mounts are not exposed to userspace, so we don't need * to worry about things like glibc compatibility. */ ino_t *next_ino; next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); ino = *next_ino; if (unlikely(ino % SHMEM_INO_BATCH == 0)) { raw_spin_lock(&sbinfo->stat_lock); ino = sbinfo->next_ino; sbinfo->next_ino += SHMEM_INO_BATCH; raw_spin_unlock(&sbinfo->stat_lock); if (unlikely(is_zero_ino(ino))) ino++; } *inop = ino; *next_ino = ++ino; put_cpu(); } return 0; } static void shmem_free_inode(struct super_block *sb, size_t freed_ispace) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); if (sbinfo->max_inodes) { raw_spin_lock(&sbinfo->stat_lock); sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; raw_spin_unlock(&sbinfo->stat_lock); } } /** * shmem_recalc_inode - recalculate the block usage of an inode * @inode: inode to recalc * @alloced: the change in number of pages allocated to inode * @swapped: the change in number of pages swapped from inode * * We have to calculate the free blocks since the mm can drop * undirtied hole pages behind our back. * * But normally info->alloced == inode->i_mapping->nrpages + info->swapped * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) */ static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) { struct shmem_inode_info *info = SHMEM_I(inode); long freed; spin_lock(&info->lock); info->alloced += alloced; info->swapped += swapped; freed = info->alloced - info->swapped - READ_ONCE(inode->i_mapping->nrpages); /* * Special case: whereas normally shmem_recalc_inode() is called * after i_mapping->nrpages has already been adjusted (up or down), * shmem_writepage() has to raise swapped before nrpages is lowered - * to stop a racing shmem_recalc_inode() from thinking that a page has * been freed. Compensate here, to avoid the need for a followup call. */ if (swapped > 0) freed += swapped; if (freed > 0) info->alloced -= freed; spin_unlock(&info->lock); /* The quota case may block */ if (freed > 0) shmem_inode_unacct_blocks(inode, freed); } bool shmem_charge(struct inode *inode, long pages) { struct address_space *mapping = inode->i_mapping; if (shmem_inode_acct_block(inode, pages)) return false; /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ xa_lock_irq(&mapping->i_pages); mapping->nrpages += pages; xa_unlock_irq(&mapping->i_pages); shmem_recalc_inode(inode, pages, 0); return true; } void shmem_uncharge(struct inode *inode, long pages) { /* pages argument is currently unused: keep it to help debugging */ /* nrpages adjustment done by __filemap_remove_folio() or caller */ shmem_recalc_inode(inode, 0, 0); } /* * Replace item expected in xarray by a new item, while holding xa_lock. */ static int shmem_replace_entry(struct address_space *mapping, pgoff_t index, void *expected, void *replacement) { XA_STATE(xas, &mapping->i_pages, index); void *item; VM_BUG_ON(!expected); VM_BUG_ON(!replacement); item = xas_load(&xas); if (item != expected) return -ENOENT; xas_store(&xas, replacement); return 0; } /* * Sometimes, before we decide whether to proceed or to fail, we must check * that an entry was not already brought back from swap by a racing thread. * * Checking page is not enough: by the time a SwapCache page is locked, it * might be reused, and again be SwapCache, using the same swap as before. */ static bool shmem_confirm_swap(struct address_space *mapping, pgoff_t index, swp_entry_t swap) { return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); } /* * Definitions for "huge tmpfs": tmpfs mounted with the huge= option * * SHMEM_HUGE_NEVER: * disables huge pages for the mount; * SHMEM_HUGE_ALWAYS: * enables huge pages for the mount; * SHMEM_HUGE_WITHIN_SIZE: * only allocate huge pages if the page will be fully within i_size, * also respect fadvise()/madvise() hints; * SHMEM_HUGE_ADVISE: * only allocate huge pages if requested with fadvise()/madvise(); */ #define SHMEM_HUGE_NEVER 0 #define SHMEM_HUGE_ALWAYS 1 #define SHMEM_HUGE_WITHIN_SIZE 2 #define SHMEM_HUGE_ADVISE 3 /* * Special values. * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: * * SHMEM_HUGE_DENY: * disables huge on shm_mnt and all mounts, for emergency use; * SHMEM_HUGE_FORCE: * enables huge on shm_mnt and all mounts, w/o needing option, for testing; * */ #define SHMEM_HUGE_DENY (-1) #define SHMEM_HUGE_FORCE (-2) #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* ifdef here to avoid bloating shmem.o when not necessary */ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { loff_t i_size; if (!S_ISREG(inode->i_mode)) return false; if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) return false; if (shmem_huge == SHMEM_HUGE_DENY) return false; if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) return true; switch (SHMEM_SB(inode->i_sb)->huge) { case SHMEM_HUGE_ALWAYS: return true; case SHMEM_HUGE_WITHIN_SIZE: index = round_up(index + 1, HPAGE_PMD_NR); i_size = round_up(i_size_read(inode), PAGE_SIZE); if (i_size >> PAGE_SHIFT >= index) return true; fallthrough; case SHMEM_HUGE_ADVISE: if (mm && (vm_flags & VM_HUGEPAGE)) return true; fallthrough; default: return false; } } #if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { if (!strcmp(str, "never")) return SHMEM_HUGE_NEVER; if (!strcmp(str, "always")) return SHMEM_HUGE_ALWAYS; if (!strcmp(str, "within_size")) return SHMEM_HUGE_WITHIN_SIZE; if (!strcmp(str, "advise")) return SHMEM_HUGE_ADVISE; if (!strcmp(str, "deny")) return SHMEM_HUGE_DENY; if (!strcmp(str, "force")) return SHMEM_HUGE_FORCE; return -EINVAL; } #endif #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) static const char *shmem_format_huge(int huge) { switch (huge) { case SHMEM_HUGE_NEVER: return "never"; case SHMEM_HUGE_ALWAYS: return "always"; case SHMEM_HUGE_WITHIN_SIZE: return "within_size"; case SHMEM_HUGE_ADVISE: return "advise"; case SHMEM_HUGE_DENY: return "deny"; case SHMEM_HUGE_FORCE: return "force"; default: VM_BUG_ON(1); return "bad_val"; } } #endif static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { LIST_HEAD(list), *pos, *next; LIST_HEAD(to_remove); struct inode *inode; struct shmem_inode_info *info; struct folio *folio; unsigned long batch = sc ? sc->nr_to_scan : 128; int split = 0; if (list_empty(&sbinfo->shrinklist)) return SHRINK_STOP; spin_lock(&sbinfo->shrinklist_lock); list_for_each_safe(pos, next, &sbinfo->shrinklist) { info = list_entry(pos, struct shmem_inode_info, shrinklist); /* pin the inode */ inode = igrab(&info->vfs_inode); /* inode is about to be evicted */ if (!inode) { list_del_init(&info->shrinklist); goto next; } /* Check if there's anything to gain */ if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { list_move(&info->shrinklist, &to_remove); goto next; } list_move(&info->shrinklist, &list); next: sbinfo->shrinklist_len--; if (!--batch) break; } spin_unlock(&sbinfo->shrinklist_lock); list_for_each_safe(pos, next, &to_remove) { info = list_entry(pos, struct shmem_inode_info, shrinklist); inode = &info->vfs_inode; list_del_init(&info->shrinklist); iput(inode); } list_for_each_safe(pos, next, &list) { int ret; pgoff_t index; info = list_entry(pos, struct shmem_inode_info, shrinklist); inode = &info->vfs_inode; if (nr_to_split && split >= nr_to_split) goto move_back; index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; folio = filemap_get_folio(inode->i_mapping, index); if (IS_ERR(folio)) goto drop; /* No huge page at the end of the file: nothing to split */ if (!folio_test_large(folio)) { folio_put(folio); goto drop; } /* * Move the inode on the list back to shrinklist if we failed * to lock the page at this time. * * Waiting for the lock may lead to deadlock in the * reclaim path. */ if (!folio_trylock(folio)) { folio_put(folio); goto move_back; } ret = split_folio(folio); folio_unlock(folio); folio_put(folio); /* If split failed move the inode on the list back to shrinklist */ if (ret) goto move_back; split++; drop: list_del_init(&info->shrinklist); goto put; move_back: /* * Make sure the inode is either on the global list or deleted * from any local list before iput() since it could be deleted * in another thread once we put the inode (then the local list * is corrupted). */ spin_lock(&sbinfo->shrinklist_lock); list_move(&info->shrinklist, &sbinfo->shrinklist); sbinfo->shrinklist_len++; spin_unlock(&sbinfo->shrinklist_lock); put: iput(inode); } return split; } static long shmem_unused_huge_scan(struct super_block *sb, struct shrink_control *sc) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); if (!READ_ONCE(sbinfo->shrinklist_len)) return SHRINK_STOP; return shmem_unused_huge_shrink(sbinfo, sc, 0); } static long shmem_unused_huge_count(struct super_block *sb, struct shrink_control *sc) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); return READ_ONCE(sbinfo->shrinklist_len); } #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ #define shmem_huge SHMEM_HUGE_DENY bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { return false; } static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* * Like filemap_add_folio, but error if expected item has gone. */ static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t index, void *expected, gfp_t gfp, struct mm_struct *charge_mm) { XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); long nr = folio_nr_pages(folio); int error; VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); VM_BUG_ON(expected && folio_test_large(folio)); folio_ref_add(folio, nr); folio->mapping = mapping; folio->index = index; if (!folio_test_swapcache(folio)) { error = mem_cgroup_charge(folio, charge_mm, gfp); if (error) { if (folio_test_pmd_mappable(folio)) { count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK_CHARGE); } goto error; } } folio_throttle_swaprate(folio, gfp); do { xas_lock_irq(&xas); if (expected != xas_find_conflict(&xas)) { xas_set_err(&xas, -EEXIST); goto unlock; } if (expected && xas_find_conflict(&xas)) { xas_set_err(&xas, -EEXIST); goto unlock; } xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; if (folio_test_pmd_mappable(folio)) { count_vm_event(THP_FILE_ALLOC); __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); } mapping->nrpages += nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); if (xas_error(&xas)) { error = xas_error(&xas); goto error; } return 0; error: folio->mapping = NULL; folio_ref_sub(folio, nr); return error; } /* * Like delete_from_page_cache, but substitutes swap for @folio. */ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) { struct address_space *mapping = folio->mapping; long nr = folio_nr_pages(folio); int error; xa_lock_irq(&mapping->i_pages); error = shmem_replace_entry(mapping, folio->index, folio, radswap); folio->mapping = NULL; mapping->nrpages -= nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); xa_unlock_irq(&mapping->i_pages); folio_put(folio); BUG_ON(error); } /* * Remove swap entry from page cache, free the swap and its page cache. */ static int shmem_free_swap(struct address_space *mapping, pgoff_t index, void *radswap) { void *old; old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); if (old != radswap) return -ENOENT; free_swap_and_cache(radix_to_swp_entry(radswap)); return 0; } /* * Determine (in bytes) how many of the shmem object's pages mapped by the * given offsets are swapped out. * * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, * as long as the inode doesn't go away and racy results are not a problem. */ unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); struct page *page; unsigned long swapped = 0; unsigned long max = end - 1; rcu_read_lock(); xas_for_each(&xas, page, max) { if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) swapped++; if (xas.xa_index == max) break; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); } } rcu_read_unlock(); return swapped << PAGE_SHIFT; } /* * Determine (in bytes) how many of the shmem object's pages mapped by the * given vma is swapped out. * * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, * as long as the inode doesn't go away and racy results are not a problem. */ unsigned long shmem_swap_usage(struct vm_area_struct *vma) { struct inode *inode = file_inode(vma->vm_file); struct shmem_inode_info *info = SHMEM_I(inode); struct address_space *mapping = inode->i_mapping; unsigned long swapped; /* Be careful as we don't hold info->lock */ swapped = READ_ONCE(info->swapped); /* * The easier cases are when the shmem object has nothing in swap, or * the vma maps it whole. Then we can simply use the stats that we * already track. */ if (!swapped) return 0; if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) return swapped << PAGE_SHIFT; /* Here comes the more involved part */ return shmem_partial_swap_usage(mapping, vma->vm_pgoff, vma->vm_pgoff + vma_pages(vma)); } /* * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. */ void shmem_unlock_mapping(struct address_space *mapping) { struct folio_batch fbatch; pgoff_t index = 0; folio_batch_init(&fbatch); /* * Minor point, but we might as well stop if someone else SHM_LOCKs it. */ while (!mapping_unevictable(mapping) && filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { check_move_unevictable_folios(&fbatch); folio_batch_release(&fbatch); cond_resched(); } } static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) { struct folio *folio; /* * At first avoid shmem_get_folio(,,,SGP_READ): that fails * beyond i_size, and reports fallocated folios as holes. */ folio = filemap_get_entry(inode->i_mapping, index); if (!folio) return folio; if (!xa_is_value(folio)) { folio_lock(folio); if (folio->mapping == inode->i_mapping) return folio; /* The folio has been swapped out */ folio_unlock(folio); folio_put(folio); } /* * But read a folio back from swap if any of it is within i_size * (although in some cases this is just a waste of time). */ folio = NULL; shmem_get_folio(inode, index, &folio, SGP_READ); return folio; } /* * Remove range of pages and swap entries from page cache, and free them. * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. */ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, bool unfalloc) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; pgoff_t end = (lend + 1) >> PAGE_SHIFT; struct folio_batch fbatch; pgoff_t indices[PAGEVEC_SIZE]; struct folio *folio; bool same_folio; long nr_swaps_freed = 0; pgoff_t index; int i; if (lend == -1) end = -1; /* unsigned, so actually very big */ if (info->fallocend > start && info->fallocend <= end && !unfalloc) info->fallocend = start; folio_batch_init(&fbatch); index = start; while (index < end && find_lock_entries(mapping, &index, end - 1, &fbatch, indices)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; if (xa_is_value(folio)) { if (unfalloc) continue; nr_swaps_freed += !shmem_free_swap(mapping, indices[i], folio); continue; } if (!unfalloc || !folio_test_uptodate(folio)) truncate_inode_folio(mapping, folio); folio_unlock(folio); } folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); } /* * When undoing a failed fallocate, we want none of the partial folio * zeroing and splitting below, but shall want to truncate the whole * folio when !uptodate indicates that it was added by this fallocate, * even when [lstart, lend] covers only a part of the folio. */ if (unfalloc) goto whole_folios; same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); if (folio) { same_folio = lend < folio_pos(folio) + folio_size(folio); folio_mark_dirty(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) { start = folio_next_index(folio); if (same_folio) end = folio->index; } folio_unlock(folio); folio_put(folio); folio = NULL; } if (!same_folio) folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); if (folio) { folio_mark_dirty(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) end = folio->index; folio_unlock(folio); folio_put(folio); } whole_folios: index = start; while (index < end) { cond_resched(); if (!find_get_entries(mapping, &index, end - 1, &fbatch, indices)) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) break; /* But if truncating, restart to make sure all gone */ index = start; continue; } for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; if (xa_is_value(folio)) { if (unfalloc) continue; if (shmem_free_swap(mapping, indices[i], folio)) { /* Swap was replaced by page: retry */ index = indices[i]; break; } nr_swaps_freed++; continue; } folio_lock(folio); if (!unfalloc || !folio_test_uptodate(folio)) { if (folio_mapping(folio) != mapping) { /* Page was replaced by swap: retry */ folio_unlock(folio); index = indices[i]; break; } VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); truncate_inode_folio(mapping, folio); } folio_unlock(folio); } folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); } shmem_recalc_inode(inode, 0, -nr_swaps_freed); } void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { shmem_undo_range(inode, lstart, lend, false); inode->i_mtime = inode_set_ctime_current(inode); inode_inc_iversion(inode); } EXPORT_SYMBOL_GPL(shmem_truncate_range); static int shmem_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = path->dentry->d_inode; struct shmem_inode_info *info = SHMEM_I(inode); if (info->alloced - info->swapped != inode->i_mapping->nrpages) shmem_recalc_inode(inode, 0, 0); if (info->fsflags & FS_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; if (info->fsflags & FS_IMMUTABLE_FL) stat->attributes |= STATX_ATTR_IMMUTABLE; if (info->fsflags & FS_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP); generic_fillattr(idmap, request_mask, inode, stat); if (shmem_is_huge(inode, 0, false, NULL, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { stat->result_mask |= STATX_BTIME; stat->btime.tv_sec = info->i_crtime.tv_sec; stat->btime.tv_nsec = info->i_crtime.tv_nsec; } return 0; } static int shmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct shmem_inode_info *info = SHMEM_I(inode); int error; bool update_mtime = false; bool update_ctime = true; error = setattr_prepare(idmap, dentry, attr); if (error) return error; if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { if ((inode->i_mode ^ attr->ia_mode) & 0111) { return -EPERM; } } if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { loff_t oldsize = inode->i_size; loff_t newsize = attr->ia_size; /* protected by i_rwsem */ if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || (newsize > oldsize && (info->seals & F_SEAL_GROW))) return -EPERM; if (newsize != oldsize) { error = shmem_reacct_size(SHMEM_I(inode)->flags, oldsize, newsize); if (error) return error; i_size_write(inode, newsize); update_mtime = true; } else { update_ctime = false; } if (newsize <= oldsize) { loff_t holebegin = round_up(newsize, PAGE_SIZE); if (oldsize > holebegin) unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); if (info->alloced) shmem_truncate_range(inode, newsize, (loff_t)-1); /* unmap again to remove racily COWed private pages */ if (oldsize > holebegin) unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); } } if (is_quota_modification(idmap, inode, attr)) { error = dquot_initialize(inode); if (error) return error; } /* Transfer quota accounting */ if (i_uid_needs_update(idmap, attr, inode) || i_gid_needs_update(idmap, attr, inode)) { error = dquot_transfer(idmap, inode, attr); if (error) return error; } setattr_copy(idmap, inode, attr); if (attr->ia_valid & ATTR_MODE) error = posix_acl_chmod(idmap, dentry, inode->i_mode); if (!error && update_ctime) { inode_set_ctime_current(inode); if (update_mtime) inode->i_mtime = inode_get_ctime(inode); inode_inc_iversion(inode); } return error; } static void shmem_evict_inode(struct inode *inode) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); size_t freed = 0; if (shmem_mapping(inode->i_mapping)) { shmem_unacct_size(info->flags, inode->i_size); inode->i_size = 0; mapping_set_exiting(inode->i_mapping); shmem_truncate_range(inode, 0, (loff_t)-1); if (!list_empty(&info->shrinklist)) { spin_lock(&sbinfo->shrinklist_lock); if (!list_empty(&info->shrinklist)) { list_del_init(&info->shrinklist); sbinfo->shrinklist_len--; } spin_unlock(&sbinfo->shrinklist_lock); } while (!list_empty(&info->swaplist)) { /* Wait while shmem_unuse() is scanning this inode... */ wait_var_event(&info->stop_eviction, !atomic_read(&info->stop_eviction)); mutex_lock(&shmem_swaplist_mutex); /* ...but beware of the race if we peeked too early */ if (!atomic_read(&info->stop_eviction)) list_del_init(&info->swaplist); mutex_unlock(&shmem_swaplist_mutex); } } simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); shmem_free_inode(inode->i_sb, freed); WARN_ON(inode->i_blocks); clear_inode(inode); #ifdef CONFIG_TMPFS_QUOTA dquot_free_inode(inode); dquot_drop(inode); #endif } static int shmem_find_swap_entries(struct address_space *mapping, pgoff_t start, struct folio_batch *fbatch, pgoff_t *indices, unsigned int type) { XA_STATE(xas, &mapping->i_pages, start); struct folio *folio; swp_entry_t entry; rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { if (xas_retry(&xas, folio)) continue; if (!xa_is_value(folio)) continue; entry = radix_to_swp_entry(folio); /* * swapin error entries can be found in the mapping. But they're * deliberately ignored here as we've done everything we can do. */ if (swp_type(entry) != type) continue; indices[folio_batch_count(fbatch)] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); } } rcu_read_unlock(); return xas.xa_index; } /* * Move the swapped pages for an inode to page cache. Returns the count * of pages swapped in, or the error in case of failure. */ static int shmem_unuse_swap_entries(struct inode *inode, struct folio_batch *fbatch, pgoff_t *indices) { int i = 0; int ret = 0; int error = 0; struct address_space *mapping = inode->i_mapping; for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; if (!xa_is_value(folio)) continue; error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, mapping_gfp_mask(mapping), NULL, NULL); if (error == 0) { folio_unlock(folio); folio_put(folio); ret++; } if (error == -ENOMEM) break; error = 0; } return error ? error : ret; } /* * If swap found in inode, free it and move page from swapcache to filecache. */ static int shmem_unuse_inode(struct inode *inode, unsigned int type) { struct address_space *mapping = inode->i_mapping; pgoff_t start = 0; struct folio_batch fbatch; pgoff_t indices[PAGEVEC_SIZE]; int ret = 0; do { folio_batch_init(&fbatch); shmem_find_swap_entries(mapping, start, &fbatch, indices, type); if (folio_batch_count(&fbatch) == 0) { ret = 0; break; } ret = shmem_unuse_swap_entries(inode, &fbatch, indices); if (ret < 0) break; start = indices[folio_batch_count(&fbatch) - 1]; } while (true); return ret; } /* * Read all the shared memory data that resides in the swap * device 'type' back into memory, so the swap device can be * unused. */ int shmem_unuse(unsigned int type) { struct shmem_inode_info *info, *next; int error = 0; if (list_empty(&shmem_swaplist)) return 0; mutex_lock(&shmem_swaplist_mutex); list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { if (!info->swapped) { list_del_init(&info->swaplist); continue; } /* * Drop the swaplist mutex while searching the inode for swap; * but before doing so, make sure shmem_evict_inode() will not * remove placeholder inode from swaplist, nor let it be freed * (igrab() would protect from unlink, but not from unmount). */ atomic_inc(&info->stop_eviction); mutex_unlock(&shmem_swaplist_mutex); error = shmem_unuse_inode(&info->vfs_inode, type); cond_resched(); mutex_lock(&shmem_swaplist_mutex); next = list_next_entry(info, swaplist); if (!info->swapped) list_del_init(&info->swaplist); if (atomic_dec_and_test(&info->stop_eviction)) wake_up_var(&info->stop_eviction); if (error) break; } mutex_unlock(&shmem_swaplist_mutex); return error; } /* * Move the page from the page cache to the swap cache. */ static int shmem_writepage(struct page *page, struct writeback_control *wbc) { struct folio *folio = page_folio(page); struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); swp_entry_t swap; pgoff_t index; /* * Our capabilities prevent regular writeback or sync from ever calling * shmem_writepage; but a stacking filesystem might use ->writepage of * its underlying filesystem, in which case tmpfs should write out to * swap only in response to memory pressure, and not for the writeback * threads or sync. */ if (WARN_ON_ONCE(!wbc->for_reclaim)) goto redirty; if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) goto redirty; if (!total_swap_pages) goto redirty; /* * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, * and its shmem_writeback() needs them to be split when swapping. */ if (folio_test_large(folio)) { /* Ensure the subpages are still dirty */ folio_test_set_dirty(folio); if (split_huge_page(page) < 0) goto redirty; folio = page_folio(page); folio_clear_dirty(folio); } index = folio->index; /* * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC * value into swapfile.c, the only way we can correctly account for a * fallocated folio arriving here is now to initialize it and write it. * * That's okay for a folio already fallocated earlier, but if we have * not yet completed the fallocation, then (a) we want to keep track * of this folio in case we have to undo it, and (b) it may not be a * good idea to continue anyway, once we're pushing into swap. So * reactivate the folio, and let shmem_fallocate() quit when too many. */ if (!folio_test_uptodate(folio)) { if (inode->i_private) { struct shmem_falloc *shmem_falloc; spin_lock(&inode->i_lock); shmem_falloc = inode->i_private; if (shmem_falloc && !shmem_falloc->waitq && index >= shmem_falloc->start && index < shmem_falloc->next) shmem_falloc->nr_unswapped++; else shmem_falloc = NULL; spin_unlock(&inode->i_lock); if (shmem_falloc) goto redirty; } folio_zero_range(folio, 0, folio_size(folio)); flush_dcache_folio(folio); folio_mark_uptodate(folio); } swap = folio_alloc_swap(folio); if (!swap.val) goto redirty; /* * Add inode to shmem_unuse()'s list of swapped-out inodes, * if it's not already there. Do it now before the folio is * moved to swap cache, when its pagelock no longer protects * the inode from eviction. But don't unlock the mutex until * we've incremented swapped, because shmem_unuse_inode() will * prune a !swapped inode from the swaplist under this mutex. */ mutex_lock(&shmem_swaplist_mutex); if (list_empty(&info->swaplist)) list_add(&info->swaplist, &shmem_swaplist); if (add_to_swap_cache(folio, swap, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, NULL) == 0) { shmem_recalc_inode(inode, 0, 1); swap_shmem_alloc(swap); shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); mutex_unlock(&shmem_swaplist_mutex); BUG_ON(folio_mapped(folio)); swap_writepage(&folio->page, wbc); return 0; } mutex_unlock(&shmem_swaplist_mutex); put_swap_folio(folio, swap); redirty: folio_mark_dirty(folio); if (wbc->for_reclaim) return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ folio_unlock(folio); return 0; } #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) { char buffer[64]; if (!mpol || mpol->mode == MPOL_DEFAULT) return; /* show nothing */ mpol_to_str(buffer, sizeof(buffer), mpol); seq_printf(seq, ",mpol=%s", buffer); } static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) { struct mempolicy *mpol = NULL; if (sbinfo->mpol) { raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ mpol = sbinfo->mpol; mpol_get(mpol); raw_spin_unlock(&sbinfo->stat_lock); } return mpol; } #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) { } static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) { return NULL; } #endif /* CONFIG_NUMA && CONFIG_TMPFS */ #ifndef CONFIG_NUMA #define vm_policy vm_private_data #endif static void shmem_pseudo_vma_init(struct vm_area_struct *vma, struct shmem_inode_info *info, pgoff_t index) { /* Create a pseudo vma that just contains the policy */ vma_init(vma, NULL); /* Bias interleave by inode number to distribute better across nodes */ vma->vm_pgoff = index + info->vfs_inode.i_ino; vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); } static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) { /* Drop reference taken by mpol_shared_policy_lookup() */ mpol_cond_put(vma->vm_policy); } static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct page *page; struct vm_fault vmf = { .vma = &pvma, }; shmem_pseudo_vma_init(&pvma, info, index); page = swap_cluster_readahead(swap, gfp, &vmf); shmem_pseudo_vma_destroy(&pvma); if (!page) return NULL; return page_folio(page); } /* * Make sure huge_gfp is always more limited than limit_gfp. * Some of the flags set permissions, while others set limitations. */ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) { gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); /* Allow allocations only from the originally specified zones. */ result |= zoneflags; /* * Minimize the result gfp by taking the union with the deny flags, * and the intersection of the allow flags. */ result |= (limit_gfp & denyflags); result |= (huge_gfp & limit_gfp) & allowflags; return result; } static struct folio *shmem_alloc_hugefolio(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct address_space *mapping = info->vfs_inode.i_mapping; pgoff_t hindex; struct folio *folio; hindex = round_down(index, HPAGE_PMD_NR); if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, XA_PRESENT)) return NULL; shmem_pseudo_vma_init(&pvma, info, hindex); folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); shmem_pseudo_vma_destroy(&pvma); if (!folio) count_vm_event(THP_FILE_FALLBACK); return folio; } static struct folio *shmem_alloc_folio(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct folio *folio; shmem_pseudo_vma_init(&pvma, info, index); folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); shmem_pseudo_vma_destroy(&pvma); return folio; } static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, pgoff_t index, bool huge) { struct shmem_inode_info *info = SHMEM_I(inode); struct folio *folio; int nr; int err; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) huge = false; nr = huge ? HPAGE_PMD_NR : 1; err = shmem_inode_acct_block(inode, nr); if (err) goto failed; if (huge) folio = shmem_alloc_hugefolio(gfp, info, index); else folio = shmem_alloc_folio(gfp, info, index); if (folio) { __folio_set_locked(folio); __folio_set_swapbacked(folio); return folio; } err = -ENOMEM; shmem_inode_unacct_blocks(inode, nr); failed: return ERR_PTR(err); } /* * When a page is moved from swapcache to shmem filecache (either by the * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of * shmem_unuse_inode()), it may have been read in earlier from swap, in * ignorance of the mapping it belongs to. If that mapping has special * constraints (like the gma500 GEM driver, which requires RAM below 4GB), * we may need to copy to a suitable page before moving to filecache. * * In a future release, this may well be extended to respect cpuset and * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); * but for now it is a simple matter of zone. */ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) { return folio_zonenum(folio) > gfp_zone(gfp); } static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct folio *old, *new; struct address_space *swap_mapping; swp_entry_t entry; pgoff_t swap_index; int error; old = *foliop; entry = old->swap; swap_index = swp_offset(entry); swap_mapping = swap_address_space(entry); /* * We have arrived here because our zones are constrained, so don't * limit chance of success by further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; VM_BUG_ON_FOLIO(folio_test_large(old), old); new = shmem_alloc_folio(gfp, info, index); if (!new) return -ENOMEM; folio_get(new); folio_copy(new, old); flush_dcache_folio(new); __folio_set_locked(new); __folio_set_swapbacked(new); folio_mark_uptodate(new); new->swap = entry; folio_set_swapcache(new); /* * Our caller will very soon move newpage out of swapcache, but it's * a nice clean interface for us to replace oldpage by newpage there. */ xa_lock_irq(&swap_mapping->i_pages); error = shmem_replace_entry(swap_mapping, swap_index, old, new); if (!error) { mem_cgroup_migrate(old, new); __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); __lruvec_stat_mod_folio(new, NR_SHMEM, 1); __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); __lruvec_stat_mod_folio(old, NR_SHMEM, -1); } xa_unlock_irq(&swap_mapping->i_pages); if (unlikely(error)) { /* * Is this possible? I think not, now that our callers check * both PageSwapCache and page_private after getting page lock; * but be defensive. Reverse old to newpage for clear and free. */ old = new; } else { folio_add_lru(new); *foliop = new; } folio_clear_swapcache(old); old->private = NULL; folio_unlock(old); folio_put_refs(old, 2); return error; } static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, struct folio *folio, swp_entry_t swap) { struct address_space *mapping = inode->i_mapping; swp_entry_t swapin_error; void *old; swapin_error = make_poisoned_swp_entry(); old = xa_cmpxchg_irq(&mapping->i_pages, index, swp_to_radix_entry(swap), swp_to_radix_entry(swapin_error), 0); if (old != swp_to_radix_entry(swap)) return; folio_wait_writeback(folio); delete_from_swap_cache(folio); /* * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) * in shmem_evict_inode(). */ shmem_recalc_inode(inode, -1, -1); swap_free(swap); } /* * Swap in the folio pointed to by *foliop. * Caller has to make sure that *foliop contains a valid swapped folio. * Returns 0 and the folio in foliop if success. On failure, returns the * error code and NULL in *foliop. */ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_area_struct *vma, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; struct swap_info_struct *si; struct folio *folio = NULL; swp_entry_t swap; int error; VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); swap = radix_to_swp_entry(*foliop); *foliop = NULL; if (is_poisoned_swp_entry(swap)) return -EIO; si = get_swap_device(swap); if (!si) { if (!shmem_confirm_swap(mapping, index, swap)) return -EEXIST; else return -EINVAL; } /* Look it up and read it in.. */ folio = swap_cache_get_folio(swap, NULL, 0); if (!folio) { /* Or update major stats only when swapin succeeds?? */ if (fault_type) { *fault_type |= VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); count_memcg_event_mm(charge_mm, PGMAJFAULT); } /* Here we actually start the io */ folio = shmem_swapin(swap, gfp, info, index); if (!folio) { error = -ENOMEM; goto failed; } } /* We have to do this with folio locked to prevent races */ folio_lock(folio); if (!folio_test_swapcache(folio) || folio->swap.val != swap.val || !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; goto unlock; } if (!folio_test_uptodate(folio)) { error = -EIO; goto failed; } folio_wait_writeback(folio); /* * Some architectures may have to restore extra metadata to the * folio after reading from swap. */ arch_swap_restore(swap, folio); if (shmem_should_replace_folio(folio, gfp)) { error = shmem_replace_folio(&folio, gfp, info, index); if (error) goto failed; } error = shmem_add_to_page_cache(folio, mapping, index, swp_to_radix_entry(swap), gfp, charge_mm); if (error) goto failed; shmem_recalc_inode(inode, 0, -1); if (sgp == SGP_WRITE) folio_mark_accessed(folio); delete_from_swap_cache(folio); folio_mark_dirty(folio); swap_free(swap); put_swap_device(si); *foliop = folio; return 0; failed: if (!shmem_confirm_swap(mapping, index, swap)) error = -EEXIST; if (error == -EIO) shmem_set_folio_swapin_error(inode, index, folio, swap); unlock: if (folio) { folio_unlock(folio); folio_put(folio); } put_swap_device(si); return error; } /* * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate * * If we allocate a new one we do not mark it dirty. That's up to the * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache. * * vma, vmf, and fault_type are only supplied by shmem_fault: * otherwise they are NULL. */ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, struct vm_area_struct *vma, struct vm_fault *vmf, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; struct mm_struct *charge_mm; struct folio *folio; pgoff_t hindex; gfp_t huge_gfp; int error; int once = 0; int alloced = 0; if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; repeat: if (sgp <= SGP_CACHE && ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { return -EINVAL; } sbinfo = SHMEM_SB(inode->i_sb); charge_mm = vma ? vma->vm_mm : NULL; folio = filemap_get_entry(mapping, index); if (folio && vma && userfaultfd_minor(vma)) { if (!xa_is_value(folio)) folio_put(folio); *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); return 0; } if (xa_is_value(folio)) { error = shmem_swapin_folio(inode, index, &folio, sgp, gfp, vma, fault_type); if (error == -EEXIST) goto repeat; *foliop = folio; return error; } if (folio) { folio_lock(folio); /* Has the folio been truncated or swapped out? */ if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); folio_put(folio); goto repeat; } if (sgp == SGP_WRITE) folio_mark_accessed(folio); if (folio_test_uptodate(folio)) goto out; /* fallocated folio */ if (sgp != SGP_READ) goto clear; folio_unlock(folio); folio_put(folio); } /* * SGP_READ: succeed on hole, with NULL folio, letting caller zero. * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. */ *foliop = NULL; if (sgp == SGP_READ) return 0; if (sgp == SGP_NOALLOC) return -ENOENT; /* * Fast cache lookup and swap lookup did not find it: allocate. */ if (vma && userfaultfd_missing(vma)) { *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); return 0; } if (!shmem_is_huge(inode, index, false, vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) goto alloc_nohuge; huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = limit_gfp_mask(huge_gfp, gfp); folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); if (IS_ERR(folio)) { alloc_nohuge: folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); } if (IS_ERR(folio)) { int retry = 5; error = PTR_ERR(folio); folio = NULL; if (error != -ENOSPC) goto unlock; /* * Try to reclaim some space by splitting a large folio * beyond i_size on the filesystem. */ while (retry--) { int ret; ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); if (ret == SHRINK_STOP) break; if (ret) goto alloc_nohuge; } goto unlock; } hindex = round_down(index, folio_nr_pages(folio)); if (sgp == SGP_WRITE) __folio_set_referenced(folio); error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp & GFP_RECLAIM_MASK, charge_mm); if (error) goto unacct; folio_add_lru(folio); shmem_recalc_inode(inode, folio_nr_pages(folio), 0); alloced = true; if (folio_test_pmd_mappable(folio) && DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < folio_next_index(folio) - 1) { /* * Part of the large folio is beyond i_size: subject * to shrink under memory pressure. */ spin_lock(&sbinfo->shrinklist_lock); /* * _careful to defend against unlocked access to * ->shrink_list in shmem_unused_huge_shrink() */ if (list_empty_careful(&info->shrinklist)) { list_add_tail(&info->shrinklist, &sbinfo->shrinklist); sbinfo->shrinklist_len++; } spin_unlock(&sbinfo->shrinklist_lock); } /* * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. */ if (sgp == SGP_FALLOC) sgp = SGP_WRITE; clear: /* * Let SGP_WRITE caller clear ends if write does not fill folio; * but SGP_FALLOC on a folio fallocated earlier must initialize * it now, lest undo on failure cancel our earlier guarantee. */ if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { long i, n = folio_nr_pages(folio); for (i = 0; i < n; i++) clear_highpage(folio_page(folio, i)); flush_dcache_folio(folio); folio_mark_uptodate(folio); } /* Perhaps the file has been truncated since we checked */ if (sgp <= SGP_CACHE && ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { if (alloced) { folio_clear_dirty(folio); filemap_remove_folio(folio); shmem_recalc_inode(inode, 0, 0); } error = -EINVAL; goto unlock; } out: *foliop = folio; return 0; /* * Error recovery. */ unacct: shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); if (folio_test_large(folio)) { folio_unlock(folio); folio_put(folio); goto alloc_nohuge; } unlock: if (folio) { folio_unlock(folio); folio_put(folio); } if (error == -ENOSPC && !once++) { shmem_recalc_inode(inode, 0, 0); goto repeat; } if (error == -EEXIST) goto repeat; return error; } int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp) { return shmem_get_folio_gfp(inode, index, foliop, sgp, mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); } /* * This is like autoremove_wake_function, but it removes the wait queue * entry unconditionally - even if something else had already woken the * target. */ static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); list_del_init(&wait->entry); return ret; } static vm_fault_t shmem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct inode *inode = file_inode(vma->vm_file); gfp_t gfp = mapping_gfp_mask(inode->i_mapping); struct folio *folio = NULL; int err; vm_fault_t ret = VM_FAULT_LOCKED; /* * Trinity finds that probing a hole which tmpfs is punching can * prevent the hole-punch from ever completing: which in turn * locks writers out with its hold on i_rwsem. So refrain from * faulting pages into the hole while it's being punched. Although * shmem_undo_range() does remove the additions, it may be unable to * keep up, as each new page needs its own unmap_mapping_range() call, * and the i_mmap tree grows ever slower to scan if new vmas are added. * * It does not matter if we sometimes reach this check just before the * hole-punch begins, so that one fault then races with the punch: * we just need to make racing faults a rare case. * * The implementation below would be much simpler if we just used a * standard mutex or completion: but we cannot take i_rwsem in fault, * and bloating every shmem inode for this unlikely case would be sad. */ if (unlikely(inode->i_private)) { struct shmem_falloc *shmem_falloc; spin_lock(&inode->i_lock); shmem_falloc = inode->i_private; if (shmem_falloc && shmem_falloc->waitq && vmf->pgoff >= shmem_falloc->start && vmf->pgoff < shmem_falloc->next) { struct file *fpin; wait_queue_head_t *shmem_falloc_waitq; DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); ret = VM_FAULT_NOPAGE; fpin = maybe_unlock_mmap_for_io(vmf, NULL); if (fpin) ret = VM_FAULT_RETRY; shmem_falloc_waitq = shmem_falloc->waitq; prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, TASK_UNINTERRUPTIBLE); spin_unlock(&inode->i_lock); schedule(); /* * shmem_falloc_waitq points into the shmem_fallocate() * stack of the hole-punching task: shmem_falloc_waitq * is usually invalid by the time we reach here, but * finish_wait() does not dereference it in that case; * though i_lock needed lest racing with wake_up_all(). */ spin_lock(&inode->i_lock); finish_wait(shmem_falloc_waitq, &shmem_fault_wait); spin_unlock(&inode->i_lock); if (fpin) fput(fpin); return ret; } spin_unlock(&inode->i_lock); } err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, gfp, vma, vmf, &ret); if (err) return vmf_error(err); if (folio) vmf->page = folio_file_page(folio, vmf->pgoff); return ret; } unsigned long shmem_get_unmapped_area(struct file *file, unsigned long uaddr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long addr; unsigned long offset; unsigned long inflated_len; unsigned long inflated_addr; unsigned long inflated_offset; if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; addr = get_area(file, uaddr, len, pgoff, flags); if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return addr; if (IS_ERR_VALUE(addr)) return addr; if (addr & ~PAGE_MASK) return addr; if (addr > TASK_SIZE - len) return addr; if (shmem_huge == SHMEM_HUGE_DENY) return addr; if (len < HPAGE_PMD_SIZE) return addr; if (flags & MAP_FIXED) return addr; /* * Our priority is to support MAP_SHARED mapped hugely; * and support MAP_PRIVATE mapped hugely too, until it is COWed. * But if caller specified an address hint and we allocated area there * successfully, respect that as before. */ if (uaddr == addr) return addr; if (shmem_huge != SHMEM_HUGE_FORCE) { struct super_block *sb; if (file) { VM_BUG_ON(file->f_op != &shmem_file_operations); sb = file_inode(file)->i_sb; } else { /* * Called directly from mm/mmap.c, or drivers/char/mem.c * for "/dev/zero", to create a shared anonymous object. */ if (IS_ERR(shm_mnt)) return addr; sb = shm_mnt->mnt_sb; } if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) return addr; } offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); if (offset && offset + len < 2 * HPAGE_PMD_SIZE) return addr; if ((addr & (HPAGE_PMD_SIZE-1)) == offset) return addr; inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; if (inflated_len > TASK_SIZE) return addr; if (inflated_len < len) return addr; inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); if (IS_ERR_VALUE(inflated_addr)) return addr; if (inflated_addr & ~PAGE_MASK) return addr; inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); inflated_addr += offset - inflated_offset; if (inflated_offset > offset) inflated_addr += HPAGE_PMD_SIZE; if (inflated_addr > TASK_SIZE - len) return addr; return inflated_addr; } #ifdef CONFIG_NUMA static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) { struct inode *inode = file_inode(vma->vm_file); return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); } static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct inode *inode = file_inode(vma->vm_file); pgoff_t index; index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); } #endif int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) { struct inode *inode = file_inode(file); struct shmem_inode_info *info = SHMEM_I(inode); int retval = -ENOMEM; /* * What serializes the accesses to info->flags? * ipc_lock_object() when called from shmctl_do_lock(), * no serialization needed when called from shm_destroy(). */ if (lock && !(info->flags & VM_LOCKED)) { if (!user_shm_lock(inode->i_size, ucounts)) goto out_nomem; info->flags |= VM_LOCKED; mapping_set_unevictable(file->f_mapping); } if (!lock && (info->flags & VM_LOCKED) && ucounts) { user_shm_unlock(inode->i_size, ucounts); info->flags &= ~VM_LOCKED; mapping_clear_unevictable(file->f_mapping); } retval = 0; out_nomem: return retval; } static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); struct shmem_inode_info *info = SHMEM_I(inode); int ret; ret = seal_check_future_write(info->seals, vma); if (ret) return ret; /* arm64 - allow memory tagging on RAM-based files */ vm_flags_set(vma, VM_MTE_ALLOWED); file_accessed(file); /* This is anonymous shared memory if it is unlinked at the time of mmap */ if (inode->i_nlink) vma->vm_ops = &shmem_vm_ops; else vma->vm_ops = &shmem_anon_vm_ops; return 0; } static int shmem_file_open(struct inode *inode, struct file *file) { file->f_mode |= FMODE_CAN_ODIRECT; return generic_file_open(inode, file); } #ifdef CONFIG_TMPFS_XATTR static int shmem_initxattrs(struct inode *, const struct xattr *, void *); /* * chattr's fsflags are unrelated to extended attributes, * but tmpfs has chosen to enable them under the same config option. */ static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) { unsigned int i_flags = 0; if (fsflags & FS_NOATIME_FL) i_flags |= S_NOATIME; if (fsflags & FS_APPEND_FL) i_flags |= S_APPEND; if (fsflags & FS_IMMUTABLE_FL) i_flags |= S_IMMUTABLE; /* * But FS_NODUMP_FL does not require any action in i_flags. */ inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); } #else static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) { } #define shmem_initxattrs NULL #endif static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) { return &SHMEM_I(inode)->dir_offsets; } static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, unsigned long flags) { struct inode *inode; struct shmem_inode_info *info; struct shmem_sb_info *sbinfo = SHMEM_SB(sb); ino_t ino; int err; err = shmem_reserve_inode(sb, &ino); if (err) return ERR_PTR(err); inode = new_inode(sb); if (!inode) { shmem_free_inode(sb, 0); return ERR_PTR(-ENOSPC); } inode->i_ino = ino; inode_init_owner(idmap, inode, dir, mode); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); inode->i_generation = get_random_u32(); info = SHMEM_I(inode); memset(info, 0, (char *)inode - (char *)info); spin_lock_init(&info->lock); atomic_set(&info->stop_eviction, 0); info->seals = F_SEAL_SEAL; info->flags = flags & VM_NORESERVE; info->i_crtime = inode->i_mtime; info->fsflags = (dir == NULL) ? 0 : SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; if (info->fsflags) shmem_set_inode_flags(inode, info->fsflags); INIT_LIST_HEAD(&info->shrinklist); INIT_LIST_HEAD(&info->swaplist); INIT_LIST_HEAD(&info->swaplist); if (sbinfo->noswap) mapping_set_unevictable(inode->i_mapping); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { default: inode->i_op = &shmem_special_inode_operations; init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_inode_operations; inode->i_fop = &shmem_file_operations; mpol_shared_policy_init(&info->policy, shmem_get_sbmpol(sbinfo)); break; case S_IFDIR: inc_nlink(inode); /* Some things misbehave if size == 0 on a directory */ inode->i_size = 2 * BOGO_DIRENT_SIZE; inode->i_op = &shmem_dir_inode_operations; inode->i_fop = &simple_offset_dir_operations; simple_offset_init(shmem_get_offset_ctx(inode)); break; case S_IFLNK: /* * Must not load anything in the rbtree, * mpol_free_shared_policy will not be called. */ mpol_shared_policy_init(&info->policy, NULL); break; } lockdep_annotate_inode_mutex_key(inode); return inode; } #ifdef CONFIG_TMPFS_QUOTA static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, unsigned long flags) { int err; struct inode *inode; inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); if (IS_ERR(inode)) return inode; err = dquot_initialize(inode); if (err) goto errout; err = dquot_alloc_inode(inode); if (err) { dquot_drop(inode); goto errout; } return inode; errout: inode->i_flags |= S_NOQUOTA; iput(inode); return ERR_PTR(err); } #else static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, unsigned long flags) { return __shmem_get_inode(idmap, sb, dir, mode, dev, flags); } #endif /* CONFIG_TMPFS_QUOTA */ #ifdef CONFIG_USERFAULTFD int shmem_mfill_atomic_pte(pmd_t *dst_pmd, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, uffd_flags_t flags, struct folio **foliop) { struct inode *inode = file_inode(dst_vma->vm_file); struct shmem_inode_info *info = SHMEM_I(inode); struct address_space *mapping = inode->i_mapping; gfp_t gfp = mapping_gfp_mask(mapping); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); void *page_kaddr; struct folio *folio; int ret; pgoff_t max_off; if (shmem_inode_acct_block(inode, 1)) { /* * We may have got a page, returned -ENOENT triggering a retry, * and now we find ourselves with -ENOMEM. Release the page, to * avoid a BUG_ON in our caller. */ if (unlikely(*foliop)) { folio_put(*foliop); *foliop = NULL; } return -ENOMEM; } if (!*foliop) { ret = -ENOMEM; folio = shmem_alloc_folio(gfp, info, pgoff); if (!folio) goto out_unacct_blocks; if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { page_kaddr = kmap_local_folio(folio, 0); /* * The read mmap_lock is held here. Despite the * mmap_lock being read recursive a deadlock is still * possible if a writer has taken a lock. For example: * * process A thread 1 takes read lock on own mmap_lock * process A thread 2 calls mmap, blocks taking write lock * process B thread 1 takes page fault, read lock on own mmap lock * process B thread 2 calls mmap, blocks taking write lock * process A thread 1 blocks taking read lock on process B * process B thread 1 blocks taking read lock on process A * * Disable page faults to prevent potential deadlock * and retry the copy outside the mmap_lock. */ pagefault_disable(); ret = copy_from_user(page_kaddr, (const void __user *)src_addr, PAGE_SIZE); pagefault_enable(); kunmap_local(page_kaddr); /* fallback to copy_from_user outside mmap_lock */ if (unlikely(ret)) { *foliop = folio; ret = -ENOENT; /* don't free the page */ goto out_unacct_blocks; } flush_dcache_folio(folio); } else { /* ZEROPAGE */ clear_user_highpage(&folio->page, dst_addr); } } else { folio = *foliop; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); *foliop = NULL; } VM_BUG_ON(folio_test_locked(folio)); VM_BUG_ON(folio_test_swapbacked(folio)); __folio_set_locked(folio); __folio_set_swapbacked(folio); __folio_mark_uptodate(folio); ret = -EFAULT; max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (unlikely(pgoff >= max_off)) goto out_release; ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); if (ret) goto out_release; ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, &folio->page, true, flags); if (ret) goto out_delete_from_cache; shmem_recalc_inode(inode, 1, 0); folio_unlock(folio); return 0; out_delete_from_cache: filemap_remove_folio(folio); out_release: folio_unlock(folio); folio_put(folio); out_unacct_blocks: shmem_inode_unacct_blocks(inode, 1); return ret; } #endif /* CONFIG_USERFAULTFD */ #ifdef CONFIG_TMPFS static const struct inode_operations shmem_symlink_inode_operations; static const struct inode_operations shmem_short_symlink_operations; static int shmem_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); pgoff_t index = pos >> PAGE_SHIFT; struct folio *folio; int ret = 0; /* i_rwsem is held by caller */ if (unlikely(info->seals & (F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) return -EPERM; if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) return -EPERM; } ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); if (ret) return ret; *pagep = folio_file_page(folio, index); if (PageHWPoison(*pagep)) { folio_unlock(folio); folio_put(folio); *pagep = NULL; return -EIO; } return 0; } static int shmem_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct folio *folio = page_folio(page); struct inode *inode = mapping->host; if (pos + copied > inode->i_size) i_size_write(inode, pos + copied); if (!folio_test_uptodate(folio)) { if (copied < folio_size(folio)) { size_t from = offset_in_folio(folio, pos); folio_zero_segments(folio, 0, from, from + copied, folio_size(folio)); } folio_mark_uptodate(folio); } folio_mark_dirty(folio); folio_unlock(folio); folio_put(folio); return copied; } static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; pgoff_t index; unsigned long offset; int error = 0; ssize_t retval = 0; loff_t *ppos = &iocb->ki_pos; index = *ppos >> PAGE_SHIFT; offset = *ppos & ~PAGE_MASK; for (;;) { struct folio *folio = NULL; struct page *page = NULL; pgoff_t end_index; unsigned long nr, ret; loff_t i_size = i_size_read(inode); end_index = i_size >> PAGE_SHIFT; if (index > end_index) break; if (index == end_index) { nr = i_size & ~PAGE_MASK; if (nr <= offset) break; } error = shmem_get_folio(inode, index, &folio, SGP_READ); if (error) { if (error == -EINVAL) error = 0; break; } if (folio) { folio_unlock(folio); page = folio_file_page(folio, index); if (PageHWPoison(page)) { folio_put(folio); error = -EIO; break; } } /* * We must evaluate after, since reads (unlike writes) * are called without i_rwsem protection against truncate */ nr = PAGE_SIZE; i_size = i_size_read(inode); end_index = i_size >> PAGE_SHIFT; if (index == end_index) { nr = i_size & ~PAGE_MASK; if (nr <= offset) { if (folio) folio_put(folio); break; } } nr -= offset; if (folio) { /* * If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) flush_dcache_page(page); /* * Mark the page accessed if we read the beginning. */ if (!offset) folio_mark_accessed(folio); /* * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... */ ret = copy_page_to_iter(page, offset, nr, to); folio_put(folio); } else if (user_backed_iter(to)) { /* * Copy to user tends to be so well optimized, but * clear_user() not so much, that it is noticeably * faster to copy the zero page instead of clearing. */ ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); } else { /* * But submitting the same page twice in a row to * splice() - or others? - can result in confusion: * so don't attempt that optimization on pipes etc. */ ret = iov_iter_zero(nr, to); } retval += ret; offset += ret; index += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; if (!iov_iter_count(to)) break; if (ret < nr) { error = -EFAULT; break; } cond_resched(); } *ppos = ((loff_t) index << PAGE_SHIFT) + offset; file_accessed(file); return retval ? retval : error; } static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; ssize_t ret; inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret <= 0) goto unlock; ret = file_remove_privs(file); if (ret) goto unlock; ret = file_update_time(file); if (ret) goto unlock; ret = generic_perform_write(iocb, from); unlock: inode_unlock(inode); return ret; } static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return true; } static void zero_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { } static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return false; } static const struct pipe_buf_operations zero_pipe_buf_ops = { .release = zero_pipe_buf_release, .try_steal = zero_pipe_buf_try_steal, .get = zero_pipe_buf_get, }; static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, loff_t fpos, size_t size) { size_t offset = fpos & ~PAGE_MASK; size = min_t(size_t, size, PAGE_SIZE - offset); if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { struct pipe_buffer *buf = pipe_head_buf(pipe); *buf = (struct pipe_buffer) { .ops = &zero_pipe_buf_ops, .page = ZERO_PAGE(0), .offset = offset, .len = size, }; pipe->head++; } return size; } static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); struct address_space *mapping = inode->i_mapping; struct folio *folio = NULL; size_t total_spliced = 0, used, npages, n, part; loff_t isize; int error = 0; /* Work out how much data we can actually add into the pipe */ used = pipe_occupancy(pipe->head, pipe->tail); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); do { if (*ppos >= i_size_read(inode)) break; error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, SGP_READ); if (error) { if (error == -EINVAL) error = 0; break; } if (folio) { folio_unlock(folio); if (folio_test_hwpoison(folio) || (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { error = -EIO; break; } } /* * i_size must be checked after we know the pages are Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); if (unlikely(*ppos >= isize)) break; part = min_t(loff_t, isize - *ppos, len); if (folio) { /* * If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); folio_mark_accessed(folio); /* * Ok, we have the page, and it's up-to-date, so we can * now splice it into the pipe. */ n = splice_folio_into_pipe(pipe, folio, *ppos, part); folio_put(folio); folio = NULL; } else { n = splice_zeropage_into_pipe(pipe, *ppos, part); } if (!n) break; len -= n; total_spliced += n; *ppos += n; in->f_ra.prev_pos = *ppos; if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) break; cond_resched(); } while (len); if (folio) folio_put(folio); file_accessed(in); return total_spliced ? total_spliced : error; } static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; if (whence != SEEK_DATA && whence != SEEK_HOLE) return generic_file_llseek_size(file, offset, whence, MAX_LFS_FILESIZE, i_size_read(inode)); if (offset < 0) return -ENXIO; inode_lock(inode); /* We're holding i_rwsem so we can access i_size directly */ offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); if (offset >= 0) offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); inode_unlock(inode); return offset; } static long shmem_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_falloc shmem_falloc; pgoff_t start, index, end, undo_fallocend; int error; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; inode_lock(inode); if (mode & FALLOC_FL_PUNCH_HOLE) { struct address_space *mapping = file->f_mapping; loff_t unmap_start = round_up(offset, PAGE_SIZE); loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); /* protected by i_rwsem */ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { error = -EPERM; goto out; } shmem_falloc.waitq = &shmem_falloc_waitq; shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; spin_lock(&inode->i_lock); inode->i_private = &shmem_falloc; spin_unlock(&inode->i_lock); if ((u64)unmap_end > (u64)unmap_start) unmap_mapping_range(mapping, unmap_start, 1 + unmap_end - unmap_start, 0); shmem_truncate_range(inode, offset, offset + len - 1); /* No need to unmap again: hole-punching leaves COWed pages */ spin_lock(&inode->i_lock); inode->i_private = NULL; wake_up_all(&shmem_falloc_waitq); WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); spin_unlock(&inode->i_lock); error = 0; goto out; } /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ error = inode_newsize_ok(inode, offset + len); if (error) goto out; if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { error = -EPERM; goto out; } start = offset >> PAGE_SHIFT; end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Try to avoid a swapstorm if len is impossible to satisfy */ if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { error = -ENOSPC; goto out; } shmem_falloc.waitq = NULL; shmem_falloc.start = start; shmem_falloc.next = start; shmem_falloc.nr_falloced = 0; shmem_falloc.nr_unswapped = 0; spin_lock(&inode->i_lock); inode->i_private = &shmem_falloc; spin_unlock(&inode->i_lock); /* * info->fallocend is only relevant when huge pages might be * involved: to prevent split_huge_page() freeing fallocated * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. */ undo_fallocend = info->fallocend; if (info->fallocend < end) info->fallocend = end; for (index = start; index < end; ) { struct folio *folio; /* * Good, the fallocate(2) manpage permits EINTR: we may have * been interrupted because we are using up too much memory. */ if (signal_pending(current)) error = -EINTR; else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) error = -ENOMEM; else error = shmem_get_folio(inode, index, &folio, SGP_FALLOC); if (error) { info->fallocend = undo_fallocend; /* Remove the !uptodate folios we added */ if (index > start) { shmem_undo_range(inode, (loff_t)start << PAGE_SHIFT, ((loff_t)index << PAGE_SHIFT) - 1, true); } goto undone; } /* * Here is a more important optimization than it appears: * a second SGP_FALLOC on the same large folio will clear it, * making it uptodate and un-undoable if we fail later. */ index = folio_next_index(folio); /* Beware 32-bit wraparound */ if (!index) index--; /* * Inform shmem_writepage() how far we have reached. * No need for lock or barrier: we have the page lock. */ if (!folio_test_uptodate(folio)) shmem_falloc.nr_falloced += index - shmem_falloc.next; shmem_falloc.next = index; /* * If !uptodate, leave it that way so that freeable folios * can be recognized if we need to rollback on error later. * But mark it dirty so that memory pressure will swap rather * than free the folios we are allocating (and SGP_CACHE folios * might still be clean: we now need to mark those dirty too). */ folio_mark_dirty(folio); folio_unlock(folio); folio_put(folio); cond_resched(); } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) i_size_write(inode, offset + len); undone: spin_lock(&inode->i_lock); inode->i_private = NULL; spin_unlock(&inode->i_lock); out: if (!error) file_modified(file); inode_unlock(inode); return error; } static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) { struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); buf->f_type = TMPFS_MAGIC; buf->f_bsize = PAGE_SIZE; buf->f_namelen = NAME_MAX; if (sbinfo->max_blocks) { buf->f_blocks = sbinfo->max_blocks; buf->f_bavail = buf->f_bfree = sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks); } if (sbinfo->max_inodes) { buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; } /* else leave those fields 0 like simple_statfs */ buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); return 0; } /* * File creation. Allocate an inode, and we're done.. */ static int shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; int error; inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); if (IS_ERR(inode)) return PTR_ERR(inode); error = simple_acl_create(dir, inode); if (error) goto out_iput; error = security_inode_init_security(inode, dir, &dentry->d_name, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); if (error) goto out_iput; dir->i_size += BOGO_DIRENT_SIZE; dir->i_mtime = inode_set_ctime_current(dir); inode_inc_iversion(dir); d_instantiate(dentry, inode); dget(dentry); /* Extra count - pin the dentry in core */ return error; out_iput: iput(inode); return error; } static int shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct inode *inode; int error; inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto err_out; } error = security_inode_init_security(inode, dir, NULL, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; error = simple_acl_create(dir, inode); if (error) goto out_iput; d_tmpfile(file, inode); err_out: return finish_open_simple(file, error); out_iput: iput(inode); return error; } static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { int error; error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); if (error) return error; inc_nlink(dir); return 0; } static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); } /* * Link a file.. */ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); int ret = 0; /* * No ordinary (disk based) filesystem counts links as inodes; * but each new link needs a new dentry, pinning lowmem, and * tmpfs dentries cannot be pruned until they are unlinked. * But if an O_TMPFILE file is linked into the tmpfs, the * first link must skip that, to get the accounting right. */ if (inode->i_nlink) { ret = shmem_reserve_inode(inode->i_sb, NULL); if (ret) goto out; } ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry); if (ret) { if (inode->i_nlink) shmem_free_inode(inode->i_sb, 0); goto out; } dir->i_size += BOGO_DIRENT_SIZE; dir->i_mtime = inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)); inode_inc_iversion(dir); inc_nlink(inode); ihold(inode); /* New dentry reference */ dget(dentry); /* Extra pinning count for the created dentry */ d_instantiate(dentry, inode); out: return ret; } static int shmem_unlink(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) shmem_free_inode(inode->i_sb, 0); simple_offset_remove(shmem_get_offset_ctx(dir), dentry); dir->i_size -= BOGO_DIRENT_SIZE; dir->i_mtime = inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)); inode_inc_iversion(dir); drop_nlink(inode); dput(dentry); /* Undo the count from "create" - this does all the work */ return 0; } static int shmem_rmdir(struct inode *dir, struct dentry *dentry) { if (!simple_empty(dentry)) return -ENOTEMPTY; drop_nlink(d_inode(dentry)); drop_nlink(dir); return shmem_unlink(dir, dentry); } static int shmem_whiteout(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry) { struct dentry *whiteout; int error; whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); if (!whiteout) return -ENOMEM; error = shmem_mknod(idmap, old_dir, whiteout, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); dput(whiteout); if (error) return error; /* * Cheat and hash the whiteout while the old dentry is still in * place, instead of playing games with FS_RENAME_DOES_D_MOVE. * * d_lookup() will consistently find one of them at this point, * not sure which one, but that isn't even important. */ d_rehash(whiteout); return 0; } /* * The VFS layer already does all the dentry stuff for rename, * we just have to decrement the usage count for the target if * it exists so that the VFS layer correctly free's it when it * gets overwritten. */ static int shmem_rename2(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *inode = d_inode(old_dentry); int they_are_dirs = S_ISDIR(inode->i_mode); int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if (flags & RENAME_EXCHANGE) return simple_offset_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); if (!simple_empty(new_dentry)) return -ENOTEMPTY; if (flags & RENAME_WHITEOUT) { error = shmem_whiteout(idmap, old_dir, old_dentry); if (error) return error; } simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry); error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry); if (error) return error; if (d_really_is_positive(new_dentry)) { (void) shmem_unlink(new_dir, new_dentry); if (they_are_dirs) { drop_nlink(d_inode(new_dentry)); drop_nlink(old_dir); } } else if (they_are_dirs) { drop_nlink(old_dir); inc_nlink(new_dir); } old_dir->i_size -= BOGO_DIRENT_SIZE; new_dir->i_size += BOGO_DIRENT_SIZE; simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); inode_inc_iversion(old_dir); inode_inc_iversion(new_dir); return 0; } static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { int error; int len; struct inode *inode; struct folio *folio; len = strlen(symname) + 1; if (len > PAGE_SIZE) return -ENAMETOOLONG; inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, VM_NORESERVE); if (IS_ERR(inode)) return PTR_ERR(inode); error = security_inode_init_security(inode, dir, &dentry->d_name, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); if (error) goto out_iput; inode->i_size = len-1; if (len <= SHORT_SYMLINK_LEN) { inode->i_link = kmemdup(symname, len, GFP_KERNEL); if (!inode->i_link) { error = -ENOMEM; goto out_remove_offset; } inode->i_op = &shmem_short_symlink_operations; } else { inode_nohighmem(inode); error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); if (error) goto out_remove_offset; inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; memcpy(folio_address(folio), symname, len); folio_mark_uptodate(folio); folio_mark_dirty(folio); folio_unlock(folio); folio_put(folio); } dir->i_size += BOGO_DIRENT_SIZE; dir->i_mtime = inode_set_ctime_current(dir); inode_inc_iversion(dir); d_instantiate(dentry, inode); dget(dentry); return 0; out_remove_offset: simple_offset_remove(shmem_get_offset_ctx(dir), dentry); out_iput: iput(inode); return error; } static void shmem_put_link(void *arg) { folio_mark_accessed(arg); folio_put(arg); } static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct folio *folio = NULL; int error; if (!dentry) { folio = filemap_get_folio(inode->i_mapping, 0); if (IS_ERR(folio)) return ERR_PTR(-ECHILD); if (PageHWPoison(folio_page(folio, 0)) || !folio_test_uptodate(folio)) { folio_put(folio); return ERR_PTR(-ECHILD); } } else { error = shmem_get_folio(inode, 0, &folio, SGP_READ); if (error) return ERR_PTR(error); if (!folio) return ERR_PTR(-ECHILD); if (PageHWPoison(folio_page(folio, 0))) { folio_unlock(folio); folio_put(folio); return ERR_PTR(-ECHILD); } folio_unlock(folio); } set_delayed_call(done, shmem_put_link, folio); return folio_address(folio); } #ifdef CONFIG_TMPFS_XATTR static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); return 0; } static int shmem_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct shmem_inode_info *info = SHMEM_I(inode); if (fileattr_has_fsx(fa)) return -EOPNOTSUPP; if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) return -EOPNOTSUPP; info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | (fa->flags & SHMEM_FL_USER_MODIFIABLE); shmem_set_inode_flags(inode, info->fsflags); inode_set_ctime_current(inode); inode_inc_iversion(inode); return 0; } /* * Superblocks without xattr inode operations may get some security.* xattr * support from the LSM "for free". As soon as we have any other xattrs * like ACLs, we also need to implement the security.* handlers at * filesystem level, though. */ /* * Callback for security_inode_init_security() for acquiring xattrs. */ static int shmem_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); const struct xattr *xattr; struct simple_xattr *new_xattr; size_t ispace = 0; size_t len; if (sbinfo->max_inodes) { for (xattr = xattr_array; xattr->name != NULL; xattr++) { ispace += simple_xattr_space(xattr->name, xattr->value_len + XATTR_SECURITY_PREFIX_LEN); } if (ispace) { raw_spin_lock(&sbinfo->stat_lock); if (sbinfo->free_ispace < ispace) ispace = 0; else sbinfo->free_ispace -= ispace; raw_spin_unlock(&sbinfo->stat_lock); if (!ispace) return -ENOSPC; } } for (xattr = xattr_array; xattr->name != NULL; xattr++) { new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); if (!new_xattr) break; len = strlen(xattr->name) + 1; new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, GFP_KERNEL_ACCOUNT); if (!new_xattr->name) { kvfree(new_xattr); break; } memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, xattr->name, len); simple_xattr_add(&info->xattrs, new_xattr); } if (xattr->name != NULL) { if (ispace) { raw_spin_lock(&sbinfo->stat_lock); sbinfo->free_ispace += ispace; raw_spin_unlock(&sbinfo->stat_lock); } simple_xattrs_free(&info->xattrs, NULL); return -ENOMEM; } return 0; } static int shmem_xattr_handler_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { struct shmem_inode_info *info = SHMEM_I(inode); name = xattr_full_name(handler, name); return simple_xattr_get(&info->xattrs, name, buffer, size); } static int shmem_xattr_handler_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct simple_xattr *old_xattr; size_t ispace = 0; name = xattr_full_name(handler, name); if (value && sbinfo->max_inodes) { ispace = simple_xattr_space(name, size); raw_spin_lock(&sbinfo->stat_lock); if (sbinfo->free_ispace < ispace) ispace = 0; else sbinfo->free_ispace -= ispace; raw_spin_unlock(&sbinfo->stat_lock); if (!ispace) return -ENOSPC; } old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); if (!IS_ERR(old_xattr)) { ispace = 0; if (old_xattr && sbinfo->max_inodes) ispace = simple_xattr_space(old_xattr->name, old_xattr->size); simple_xattr_free(old_xattr); old_xattr = NULL; inode_set_ctime_current(inode); inode_inc_iversion(inode); } if (ispace) { raw_spin_lock(&sbinfo->stat_lock); sbinfo->free_ispace += ispace; raw_spin_unlock(&sbinfo->stat_lock); } return PTR_ERR(old_xattr); } static const struct xattr_handler shmem_security_xattr_handler = { .prefix = XATTR_SECURITY_PREFIX, .get = shmem_xattr_handler_get, .set = shmem_xattr_handler_set, }; static const struct xattr_handler shmem_trusted_xattr_handler = { .prefix = XATTR_TRUSTED_PREFIX, .get = shmem_xattr_handler_get, .set = shmem_xattr_handler_set, }; static const struct xattr_handler shmem_user_xattr_handler = { .prefix = XATTR_USER_PREFIX, .get = shmem_xattr_handler_get, .set = shmem_xattr_handler_set, }; static const struct xattr_handler *shmem_xattr_handlers[] = { &shmem_security_xattr_handler, &shmem_trusted_xattr_handler, &shmem_user_xattr_handler, NULL }; static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) { struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); } #endif /* CONFIG_TMPFS_XATTR */ static const struct inode_operations shmem_short_symlink_operations = { .getattr = shmem_getattr, .setattr = shmem_setattr, .get_link = simple_get_link, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, #endif }; static const struct inode_operations shmem_symlink_inode_operations = { .getattr = shmem_getattr, .setattr = shmem_setattr, .get_link = shmem_get_link, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, #endif }; static struct dentry *shmem_get_parent(struct dentry *child) { return ERR_PTR(-ESTALE); } static int shmem_match(struct inode *ino, void *vfh) { __u32 *fh = vfh; __u64 inum = fh[2]; inum = (inum << 32) | fh[1]; return ino->i_ino == inum && fh[0] == ino->i_generation; } /* Find any alias of inode, but prefer a hashed alias */ static struct dentry *shmem_find_alias(struct inode *inode) { struct dentry *alias = d_find_alias(inode); return alias ?: d_find_any_alias(inode); } static struct dentry *shmem_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct inode *inode; struct dentry *dentry = NULL; u64 inum; if (fh_len < 3) return NULL; inum = fid->raw[2]; inum = (inum << 32) | fid->raw[1]; inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), shmem_match, fid->raw); if (inode) { dentry = shmem_find_alias(inode); iput(inode); } return dentry; } static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, struct inode *parent) { if (*len < 3) { *len = 3; return FILEID_INVALID; } if (inode_unhashed(inode)) { /* Unfortunately insert_inode_hash is not idempotent, * so as we hash inodes here rather than at creation * time, we need a lock to ensure we only try * to do it once */ static DEFINE_SPINLOCK(lock); spin_lock(&lock); if (inode_unhashed(inode)) __insert_inode_hash(inode, inode->i_ino + inode->i_generation); spin_unlock(&lock); } fh[0] = inode->i_generation; fh[1] = inode->i_ino; fh[2] = ((__u64)inode->i_ino) >> 32; *len = 3; return 1; } static const struct export_operations shmem_export_ops = { .get_parent = shmem_get_parent, .encode_fh = shmem_encode_fh, .fh_to_dentry = shmem_fh_to_dentry, }; enum shmem_param { Opt_gid, Opt_huge, Opt_mode, Opt_mpol, Opt_nr_blocks, Opt_nr_inodes, Opt_size, Opt_uid, Opt_inode32, Opt_inode64, Opt_noswap, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_usrquota_block_hardlimit, Opt_usrquota_inode_hardlimit, Opt_grpquota_block_hardlimit, Opt_grpquota_inode_hardlimit, }; static const struct constant_table shmem_param_enums_huge[] = { {"never", SHMEM_HUGE_NEVER }, {"always", SHMEM_HUGE_ALWAYS }, {"within_size", SHMEM_HUGE_WITHIN_SIZE }, {"advise", SHMEM_HUGE_ADVISE }, {} }; const struct fs_parameter_spec shmem_fs_parameters[] = { fsparam_u32 ("gid", Opt_gid), fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), fsparam_u32oct("mode", Opt_mode), fsparam_string("mpol", Opt_mpol), fsparam_string("nr_blocks", Opt_nr_blocks), fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("size", Opt_size), fsparam_u32 ("uid", Opt_uid), fsparam_flag ("inode32", Opt_inode32), fsparam_flag ("inode64", Opt_inode64), fsparam_flag ("noswap", Opt_noswap), #ifdef CONFIG_TMPFS_QUOTA fsparam_flag ("quota", Opt_quota), fsparam_flag ("usrquota", Opt_usrquota), fsparam_flag ("grpquota", Opt_grpquota), fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit), fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit), fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit), fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit), #endif {} }; static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) { struct shmem_options *ctx = fc->fs_private; struct fs_parse_result result; unsigned long long size; char *rest; int opt; kuid_t kuid; kgid_t kgid; opt = fs_parse(fc, shmem_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_size: size = memparse(param->string, &rest); if (*rest == '%') { size <<= PAGE_SHIFT; size *= totalram_pages(); do_div(size, 100); rest++; } if (*rest) goto bad_value; ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); ctx->seen |= SHMEM_SEEN_BLOCKS; break; case Opt_nr_blocks: ctx->blocks = memparse(param->string, &rest); if (*rest || ctx->blocks > LONG_MAX) goto bad_value; ctx->seen |= SHMEM_SEEN_BLOCKS; break; case Opt_nr_inodes: ctx->inodes = memparse(param->string, &rest); if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) goto bad_value; ctx->seen |= SHMEM_SEEN_INODES; break; case Opt_mode: ctx->mode = result.uint_32 & 07777; break; case Opt_uid: kuid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(kuid)) goto bad_value; /* * The requested uid must be representable in the * filesystem's idmapping. */ if (!kuid_has_mapping(fc->user_ns, kuid)) goto bad_value; ctx->uid = kuid; break; case Opt_gid: kgid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(kgid)) goto bad_value; /* * The requested gid must be representable in the * filesystem's idmapping. */ if (!kgid_has_mapping(fc->user_ns, kgid)) goto bad_value; ctx->gid = kgid; break; case Opt_huge: ctx->huge = result.uint_32; if (ctx->huge != SHMEM_HUGE_NEVER && !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_hugepage())) goto unsupported_parameter; ctx->seen |= SHMEM_SEEN_HUGE; break; case Opt_mpol: if (IS_ENABLED(CONFIG_NUMA)) { mpol_put(ctx->mpol); ctx->mpol = NULL; if (mpol_parse_str(param->string, &ctx->mpol)) goto bad_value; break; } goto unsupported_parameter; case Opt_inode32: ctx->full_inums = false; ctx->seen |= SHMEM_SEEN_INUMS; break; case Opt_inode64: if (sizeof(ino_t) < 8) { return invalfc(fc, "Cannot use inode64 with <64bit inums in kernel\n"); } ctx->full_inums = true; ctx->seen |= SHMEM_SEEN_INUMS; break; case Opt_noswap: if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { return invalfc(fc, "Turning off swap in unprivileged tmpfs mounts unsupported"); } ctx->noswap = true; ctx->seen |= SHMEM_SEEN_NOSWAP; break; case Opt_quota: if (fc->user_ns != &init_user_ns) return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); ctx->seen |= SHMEM_SEEN_QUOTA; ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); break; case Opt_usrquota: if (fc->user_ns != &init_user_ns) return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); ctx->seen |= SHMEM_SEEN_QUOTA; ctx->quota_types |= QTYPE_MASK_USR; break; case Opt_grpquota: if (fc->user_ns != &init_user_ns) return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); ctx->seen |= SHMEM_SEEN_QUOTA; ctx->quota_types |= QTYPE_MASK_GRP; break; case Opt_usrquota_block_hardlimit: size = memparse(param->string, &rest); if (*rest || !size) goto bad_value; if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) return invalfc(fc, "User quota block hardlimit too large."); ctx->qlimits.usrquota_bhardlimit = size; break; case Opt_grpquota_block_hardlimit: size = memparse(param->string, &rest); if (*rest || !size) goto bad_value; if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) return invalfc(fc, "Group quota block hardlimit too large."); ctx->qlimits.grpquota_bhardlimit = size; break; case Opt_usrquota_inode_hardlimit: size = memparse(param->string, &rest); if (*rest || !size) goto bad_value; if (size > SHMEM_QUOTA_MAX_INO_LIMIT) return invalfc(fc, "User quota inode hardlimit too large."); ctx->qlimits.usrquota_ihardlimit = size; break; case Opt_grpquota_inode_hardlimit: size = memparse(param->string, &rest); if (*rest || !size) goto bad_value; if (size > SHMEM_QUOTA_MAX_INO_LIMIT) return invalfc(fc, "Group quota inode hardlimit too large."); ctx->qlimits.grpquota_ihardlimit = size; break; } return 0; unsupported_parameter: return invalfc(fc, "Unsupported parameter '%s'", param->key); bad_value: return invalfc(fc, "Bad value for '%s'", param->key); } static int shmem_parse_options(struct fs_context *fc, void *data) { char *options = data; if (options) { int err = security_sb_eat_lsm_opts(options, &fc->security); if (err) return err; } while (options != NULL) { char *this_char = options; for (;;) { /* * NUL-terminate this option: unfortunately, * mount options form a comma-separated list, * but mpol's nodelist may also contain commas. */ options = strchr(options, ','); if (options == NULL) break; options++; if (!isdigit(*options)) { options[-1] = '\0'; break; } } if (*this_char) { char *value = strchr(this_char, '='); size_t len = 0; int err; if (value) { *value++ = '\0'; len = strlen(value); } err = vfs_parse_fs_string(fc, this_char, value, len); if (err < 0) return err; } } return 0; } /* * Reconfigure a shmem filesystem. */ static int shmem_reconfigure(struct fs_context *fc) { struct shmem_options *ctx = fc->fs_private; struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); unsigned long used_isp; struct mempolicy *mpol = NULL; const char *err; raw_spin_lock(&sbinfo->stat_lock); used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { if (!sbinfo->max_blocks) { err = "Cannot retroactively limit size"; goto out; } if (percpu_counter_compare(&sbinfo->used_blocks, ctx->blocks) > 0) { err = "Too small a size for current use"; goto out; } } if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { if (!sbinfo->max_inodes) { err = "Cannot retroactively limit inodes"; goto out; } if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { err = "Too few inodes for current use"; goto out; } } if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && sbinfo->next_ino > UINT_MAX) { err = "Current inum too high to switch to 32-bit inums"; goto out; } if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { err = "Cannot disable swap on remount"; goto out; } if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { err = "Cannot enable swap on remount if it was disabled on first mount"; goto out; } if (ctx->seen & SHMEM_SEEN_QUOTA && !sb_any_quota_loaded(fc->root->d_sb)) { err = "Cannot enable quota on remount"; goto out; } #ifdef CONFIG_TMPFS_QUOTA #define CHANGED_LIMIT(name) \ (ctx->qlimits.name## hardlimit && \ (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) || CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) { err = "Cannot change global quota limit on remount"; goto out; } #endif /* CONFIG_TMPFS_QUOTA */ if (ctx->seen & SHMEM_SEEN_HUGE) sbinfo->huge = ctx->huge; if (ctx->seen & SHMEM_SEEN_INUMS) sbinfo->full_inums = ctx->full_inums; if (ctx->seen & SHMEM_SEEN_BLOCKS) sbinfo->max_blocks = ctx->blocks; if (ctx->seen & SHMEM_SEEN_INODES) { sbinfo->max_inodes = ctx->inodes; sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; } /* * Preserve previous mempolicy unless mpol remount option was specified. */ if (ctx->mpol) { mpol = sbinfo->mpol; sbinfo->mpol = ctx->mpol; /* transfers initial ref */ ctx->mpol = NULL; } if (ctx->noswap) sbinfo->noswap = true; raw_spin_unlock(&sbinfo->stat_lock); mpol_put(mpol); return 0; out: raw_spin_unlock(&sbinfo->stat_lock); return invalfc(fc, "%s", err); } static int shmem_show_options(struct seq_file *seq, struct dentry *root) { struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); struct mempolicy *mpol; if (sbinfo->max_blocks != shmem_default_max_blocks()) seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); if (sbinfo->max_inodes != shmem_default_max_inodes()) seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); if (sbinfo->mode != (0777 | S_ISVTX)) seq_printf(seq, ",mode=%03ho", sbinfo->mode); if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbinfo->uid)); if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); /* * Showing inode{64,32} might be useful even if it's the system default, * since then people don't have to resort to checking both here and * /proc/config.gz to confirm 64-bit inums were successfully applied * (which may not even exist if IKCONFIG_PROC isn't enabled). * * We hide it when inode64 isn't the default and we are using 32-bit * inodes, since that probably just means the feature isn't even under * consideration. * * As such: * * +-----------------+-----------------+ * | TMPFS_INODE64=y | TMPFS_INODE64=n | * +------------------+-----------------+-----------------+ * | full_inums=true | show | show | * | full_inums=false | show | hide | * +------------------+-----------------+-----------------+ * */ if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ if (sbinfo->huge) seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); #endif mpol = shmem_get_sbmpol(sbinfo); shmem_show_mpol(seq, mpol); mpol_put(mpol); if (sbinfo->noswap) seq_printf(seq, ",noswap"); return 0; } #endif /* CONFIG_TMPFS */ static void shmem_put_super(struct super_block *sb) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); #ifdef CONFIG_TMPFS_QUOTA shmem_disable_quotas(sb); #endif free_percpu(sbinfo->ino_batch); percpu_counter_destroy(&sbinfo->used_blocks); mpol_put(sbinfo->mpol); kfree(sbinfo); sb->s_fs_info = NULL; } static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) { struct shmem_options *ctx = fc->fs_private; struct inode *inode; struct shmem_sb_info *sbinfo; int error = -ENOMEM; /* Round up to L1_CACHE_BYTES to resist false sharing */ sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL); if (!sbinfo) return error; sb->s_fs_info = sbinfo; #ifdef CONFIG_TMPFS /* * Per default we only allow half of the physical ram per * tmpfs instance, limiting inodes to one per page of lowmem; * but the internal instance is left unlimited. */ if (!(sb->s_flags & SB_KERNMOUNT)) { if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) ctx->blocks = shmem_default_max_blocks(); if (!(ctx->seen & SHMEM_SEEN_INODES)) ctx->inodes = shmem_default_max_inodes(); if (!(ctx->seen & SHMEM_SEEN_INUMS)) ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); sbinfo->noswap = ctx->noswap; } else { sb->s_flags |= SB_NOUSER; } sb->s_export_op = &shmem_export_ops; sb->s_flags |= SB_NOSEC | SB_I_VERSION; #else sb->s_flags |= SB_NOUSER; #endif sbinfo->max_blocks = ctx->blocks; sbinfo->max_inodes = ctx->inodes; sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; if (sb->s_flags & SB_KERNMOUNT) { sbinfo->ino_batch = alloc_percpu(ino_t); if (!sbinfo->ino_batch) goto failed; } sbinfo->uid = ctx->uid; sbinfo->gid = ctx->gid; sbinfo->full_inums = ctx->full_inums; sbinfo->mode = ctx->mode; sbinfo->huge = ctx->huge; sbinfo->mpol = ctx->mpol; ctx->mpol = NULL; raw_spin_lock_init(&sbinfo->stat_lock); if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) goto failed; spin_lock_init(&sbinfo->shrinklist_lock); INIT_LIST_HEAD(&sbinfo->shrinklist); sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = TMPFS_MAGIC; sb->s_op = &shmem_ops; sb->s_time_gran = 1; #ifdef CONFIG_TMPFS_XATTR sb->s_xattr = shmem_xattr_handlers; #endif #ifdef CONFIG_TMPFS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif uuid_gen(&sb->s_uuid); #ifdef CONFIG_TMPFS_QUOTA if (ctx->seen & SHMEM_SEEN_QUOTA) { sb->dq_op = &shmem_quota_operations; sb->s_qcop = &dquot_quotactl_sysfile_ops; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; /* Copy the default limits from ctx into sbinfo */ memcpy(&sbinfo->qlimits, &ctx->qlimits, sizeof(struct shmem_quota_limits)); if (shmem_enable_quotas(sb, ctx->quota_types)) goto failed; } #endif /* CONFIG_TMPFS_QUOTA */ inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto failed; } inode->i_uid = sbinfo->uid; inode->i_gid = sbinfo->gid; sb->s_root = d_make_root(inode); if (!sb->s_root) goto failed; return 0; failed: shmem_put_super(sb); return error; } static int shmem_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, shmem_fill_super); } static void shmem_free_fc(struct fs_context *fc) { struct shmem_options *ctx = fc->fs_private; if (ctx) { mpol_put(ctx->mpol); kfree(ctx); } } static const struct fs_context_operations shmem_fs_context_ops = { .free = shmem_free_fc, .get_tree = shmem_get_tree, #ifdef CONFIG_TMPFS .parse_monolithic = shmem_parse_options, .parse_param = shmem_parse_one, .reconfigure = shmem_reconfigure, #endif }; static struct kmem_cache *shmem_inode_cachep; static struct inode *shmem_alloc_inode(struct super_block *sb) { struct shmem_inode_info *info; info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); if (!info) return NULL; return &info->vfs_inode; } static void shmem_free_in_core_inode(struct inode *inode) { if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } static void shmem_destroy_inode(struct inode *inode) { if (S_ISREG(inode->i_mode)) mpol_free_shared_policy(&SHMEM_I(inode)->policy); if (S_ISDIR(inode->i_mode)) simple_offset_destroy(shmem_get_offset_ctx(inode)); } static void shmem_init_inode(void *foo) { struct shmem_inode_info *info = foo; inode_init_once(&info->vfs_inode); } static void shmem_init_inodecache(void) { shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", sizeof(struct shmem_inode_info), 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); } static void shmem_destroy_inodecache(void) { kmem_cache_destroy(shmem_inode_cachep); } /* Keep the page in page cache instead of truncating it */ static int shmem_error_remove_page(struct address_space *mapping, struct page *page) { return 0; } const struct address_space_operations shmem_aops = { .writepage = shmem_writepage, .dirty_folio = noop_dirty_folio, #ifdef CONFIG_TMPFS .write_begin = shmem_write_begin, .write_end = shmem_write_end, #endif #ifdef CONFIG_MIGRATION .migrate_folio = migrate_folio, #endif .error_remove_page = shmem_error_remove_page, }; EXPORT_SYMBOL(shmem_aops); static const struct file_operations shmem_file_operations = { .mmap = shmem_mmap, .open = shmem_file_open, .get_unmapped_area = shmem_get_unmapped_area, #ifdef CONFIG_TMPFS .llseek = shmem_file_llseek, .read_iter = shmem_file_read_iter, .write_iter = shmem_file_write_iter, .fsync = noop_fsync, .splice_read = shmem_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = shmem_fallocate, #endif }; static const struct inode_operations shmem_inode_operations = { .getattr = shmem_getattr, .setattr = shmem_setattr, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, .set_acl = simple_set_acl, .fileattr_get = shmem_fileattr_get, .fileattr_set = shmem_fileattr_set, #endif }; static const struct inode_operations shmem_dir_inode_operations = { #ifdef CONFIG_TMPFS .getattr = shmem_getattr, .create = shmem_create, .lookup = simple_lookup, .link = shmem_link, .unlink = shmem_unlink, .symlink = shmem_symlink, .mkdir = shmem_mkdir, .rmdir = shmem_rmdir, .mknod = shmem_mknod, .rename = shmem_rename2, .tmpfile = shmem_tmpfile, .get_offset_ctx = shmem_get_offset_ctx, #endif #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, .fileattr_get = shmem_fileattr_get, .fileattr_set = shmem_fileattr_set, #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, .set_acl = simple_set_acl, #endif }; static const struct inode_operations shmem_special_inode_operations = { .getattr = shmem_getattr, #ifdef CONFIG_TMPFS_XATTR .listxattr = shmem_listxattr, #endif #ifdef CONFIG_TMPFS_POSIX_ACL .setattr = shmem_setattr, .set_acl = simple_set_acl, #endif }; static const struct super_operations shmem_ops = { .alloc_inode = shmem_alloc_inode, .free_inode = shmem_free_in_core_inode, .destroy_inode = shmem_destroy_inode, #ifdef CONFIG_TMPFS .statfs = shmem_statfs, .show_options = shmem_show_options, #endif #ifdef CONFIG_TMPFS_QUOTA .get_dquots = shmem_get_dquots, #endif .evict_inode = shmem_evict_inode, .drop_inode = generic_delete_inode, .put_super = shmem_put_super, #ifdef CONFIG_TRANSPARENT_HUGEPAGE .nr_cached_objects = shmem_unused_huge_count, .free_cached_objects = shmem_unused_huge_scan, #endif }; static const struct vm_operations_struct shmem_vm_ops = { .fault = shmem_fault, .map_pages = filemap_map_pages, #ifdef CONFIG_NUMA .set_policy = shmem_set_policy, .get_policy = shmem_get_policy, #endif }; static const struct vm_operations_struct shmem_anon_vm_ops = { .fault = shmem_fault, .map_pages = filemap_map_pages, #ifdef CONFIG_NUMA .set_policy = shmem_set_policy, .get_policy = shmem_get_policy, #endif }; int shmem_init_fs_context(struct fs_context *fc) { struct shmem_options *ctx; ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->mode = 0777 | S_ISVTX; ctx->uid = current_fsuid(); ctx->gid = current_fsgid(); fc->fs_private = ctx; fc->ops = &shmem_fs_context_ops; return 0; } static struct file_system_type shmem_fs_type = { .owner = THIS_MODULE, .name = "tmpfs", .init_fs_context = shmem_init_fs_context, #ifdef CONFIG_TMPFS .parameters = shmem_fs_parameters, #endif .kill_sb = kill_litter_super, #ifdef CONFIG_SHMEM .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, #else .fs_flags = FS_USERNS_MOUNT, #endif }; void __init shmem_init(void) { int error; shmem_init_inodecache(); #ifdef CONFIG_TMPFS_QUOTA error = register_quota_format(&shmem_quota_format); if (error < 0) { pr_err("Could not register quota format\n"); goto out3; } #endif error = register_filesystem(&shmem_fs_type); if (error) { pr_err("Could not register tmpfs\n"); goto out2; } shm_mnt = kern_mount(&shmem_fs_type); if (IS_ERR(shm_mnt)) { error = PTR_ERR(shm_mnt); pr_err("Could not kern_mount tmpfs\n"); goto out1; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ #endif return; out1: unregister_filesystem(&shmem_fs_type); out2: #ifdef CONFIG_TMPFS_QUOTA unregister_quota_format(&shmem_quota_format); out3: #endif shmem_destroy_inodecache(); shm_mnt = ERR_PTR(error); } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) static ssize_t shmem_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { static const int values[] = { SHMEM_HUGE_ALWAYS, SHMEM_HUGE_WITHIN_SIZE, SHMEM_HUGE_ADVISE, SHMEM_HUGE_NEVER, SHMEM_HUGE_DENY, SHMEM_HUGE_FORCE, }; int len = 0; int i; for (i = 0; i < ARRAY_SIZE(values); i++) { len += sysfs_emit_at(buf, len, shmem_huge == values[i] ? "%s[%s]" : "%s%s", i ? " " : "", shmem_format_huge(values[i])); } len += sysfs_emit_at(buf, len, "\n"); return len; } static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { char tmp[16]; int huge; if (count + 1 > sizeof(tmp)) return -EINVAL; memcpy(tmp, buf, count); tmp[count] = '\0'; if (count && tmp[count - 1] == '\n') tmp[count - 1] = '\0'; huge = shmem_parse_huge(tmp); if (huge == -EINVAL) return -EINVAL; if (!has_transparent_hugepage() && huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) return -EINVAL; shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; return count; } struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ #else /* !CONFIG_SHMEM */ /* * tiny-shmem: simple shmemfs and tmpfs using ramfs code * * This is intended for small system where the benefits of the full * shmem code (swap-backed and resource-limited) are outweighed by * their complexity. On systems without swap this code should be * effectively equivalent, but much lighter weight. */ static struct file_system_type shmem_fs_type = { .name = "tmpfs", .init_fs_context = ramfs_init_fs_context, .parameters = ramfs_fs_parameters, .kill_sb = ramfs_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; void __init shmem_init(void) { BUG_ON(register_filesystem(&shmem_fs_type) != 0); shm_mnt = kern_mount(&shmem_fs_type); BUG_ON(IS_ERR(shm_mnt)); } int shmem_unuse(unsigned int type) { return 0; } int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) { return 0; } void shmem_unlock_mapping(struct address_space *mapping) { } #ifdef CONFIG_MMU unsigned long shmem_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); } #endif void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { truncate_inode_pages_range(inode->i_mapping, lstart, lend); } EXPORT_SYMBOL_GPL(shmem_truncate_range); #define shmem_vm_ops generic_file_vm_ops #define shmem_anon_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, unsigned long flags) { struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); return inode ? inode : ERR_PTR(-ENOSPC); } #endif /* CONFIG_SHMEM */ /* common code */ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, unsigned long flags, unsigned int i_flags) { struct inode *inode; struct file *res; if (IS_ERR(mnt)) return ERR_CAST(mnt); if (size < 0 || size > MAX_LFS_FILESIZE) return ERR_PTR(-EINVAL); if (shmem_acct_size(flags, size)) return ERR_PTR(-ENOMEM); if (is_idmapped_mnt(mnt)) return ERR_PTR(-EINVAL); inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); if (IS_ERR(inode)) { shmem_unacct_size(flags, size); return ERR_CAST(inode); } inode->i_flags |= i_flags; inode->i_size = size; clear_nlink(inode); /* It is unlinked */ res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); if (!IS_ERR(res)) res = alloc_file_pseudo(inode, mnt, name, O_RDWR, &shmem_file_operations); if (IS_ERR(res)) iput(inode); return res; } /** * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be * kernel internal. There will be NO LSM permission checks against the * underlying inode. So users of this interface must do LSM checks at a * higher layer. The users are the big_key and shm implementations. LSM * checks are provided at the key or shm level rather than the inode. * @name: name for dentry (to be seen in /proc/<pid>/maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) { return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); } /** * shmem_file_setup - get an unlinked file living in tmpfs * @name: name for dentry (to be seen in /proc/<pid>/maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) { return __shmem_file_setup(shm_mnt, name, size, flags, 0); } EXPORT_SYMBOL_GPL(shmem_file_setup); /** * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs * @mnt: the tmpfs mount where the file will be created * @name: name for dentry (to be seen in /proc/<pid>/maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size */ struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, loff_t size, unsigned long flags) { return __shmem_file_setup(mnt, name, size, flags, 0); } EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); /** * shmem_zero_setup - setup a shared anonymous mapping * @vma: the vma to be mmapped is prepared by do_mmap */ int shmem_zero_setup(struct vm_area_struct *vma) { struct file *file; loff_t size = vma->vm_end - vma->vm_start; /* * Cloning a new file under mmap_lock leads to a lock ordering conflict * between XFS directory reading and selinux: since this file is only * accessible to the user through its mapping, use S_PRIVATE flag to * bypass file security, in the same way as shmem_kernel_file_setup(). */ file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); if (IS_ERR(file)) return PTR_ERR(file); if (vma->vm_file) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_anon_vm_ops; return 0; } /** * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. * @mapping: the folio's address_space * @index: the folio index * @gfp: the page allocator flags to use if allocating * * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", * with any new page allocations done using the specified allocation flags. * But read_cache_page_gfp() uses the ->read_folio() method: which does not * suit tmpfs, since it may have pages in swapcache, and needs to find those * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. * * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. */ struct folio *shmem_read_folio_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { #ifdef CONFIG_SHMEM struct inode *inode = mapping->host; struct folio *folio; int error; BUG_ON(!shmem_mapping(mapping)); error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, gfp, NULL, NULL, NULL); if (error) return ERR_PTR(error); folio_unlock(folio); return folio; #else /* * The tiny !SHMEM case uses ramfs without swap */ return mapping_read_folio_gfp(mapping, index, gfp); #endif } EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp) { struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); struct page *page; if (IS_ERR(folio)) return &folio->page; page = folio_file_page(folio, index); if (PageHWPoison(page)) { folio_put(folio); return ERR_PTR(-EIO); } return page; } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
linux-master
mm/shmem.c
// SPDX-License-Identifier: GPL-2.0-only /* * Linux VM pressure * * Copyright 2012 Linaro Ltd. * Anton Vorontsov <[email protected]> * * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro, * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg. */ #include <linux/cgroup.h> #include <linux/fs.h> #include <linux/log2.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/eventfd.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/printk.h> #include <linux/vmpressure.h> /* * The window size (vmpressure_win) is the number of scanned pages before * we try to analyze scanned/reclaimed ratio. So the window is used as a * rate-limit tunable for the "low" level notification, and also for * averaging the ratio for medium/critical levels. Using small window * sizes can cause lot of false positives, but too big window size will * delay the notifications. * * As the vmscan reclaimer logic works with chunks which are multiple of * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well. * * TODO: Make the window size depend on machine size, as we do for vmstat * thresholds. Currently we set it to 512 pages (2MB for 4KB pages). */ static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16; /* * These thresholds are used when we account memory pressure through * scanned/reclaimed ratio. The current values were chosen empirically. In * essence, they are percents: the higher the value, the more number * unsuccessful reclaims there were. */ static const unsigned int vmpressure_level_med = 60; static const unsigned int vmpressure_level_critical = 95; /* * When there are too little pages left to scan, vmpressure() may miss the * critical pressure as number of pages will be less than "window size". * However, in that case the vmscan priority will raise fast as the * reclaimer will try to scan LRUs more deeply. * * The vmscan logic considers these special priorities: * * prio == DEF_PRIORITY (12): reclaimer starts with that value * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed * prio == 0 : close to OOM, kernel scans every page in an lru * * Any value in this range is acceptable for this tunable (i.e. from 12 to * 0). Current value for the vmpressure_level_critical_prio is chosen * empirically, but the number, in essence, means that we consider * critical level when scanning depth is ~10% of the lru size (vmscan * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one * eights). */ static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10); static struct vmpressure *work_to_vmpressure(struct work_struct *work) { return container_of(work, struct vmpressure, work); } static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr) { struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); memcg = parent_mem_cgroup(memcg); if (!memcg) return NULL; return memcg_to_vmpressure(memcg); } enum vmpressure_levels { VMPRESSURE_LOW = 0, VMPRESSURE_MEDIUM, VMPRESSURE_CRITICAL, VMPRESSURE_NUM_LEVELS, }; enum vmpressure_modes { VMPRESSURE_NO_PASSTHROUGH = 0, VMPRESSURE_HIERARCHY, VMPRESSURE_LOCAL, VMPRESSURE_NUM_MODES, }; static const char * const vmpressure_str_levels[] = { [VMPRESSURE_LOW] = "low", [VMPRESSURE_MEDIUM] = "medium", [VMPRESSURE_CRITICAL] = "critical", }; static const char * const vmpressure_str_modes[] = { [VMPRESSURE_NO_PASSTHROUGH] = "default", [VMPRESSURE_HIERARCHY] = "hierarchy", [VMPRESSURE_LOCAL] = "local", }; static enum vmpressure_levels vmpressure_level(unsigned long pressure) { if (pressure >= vmpressure_level_critical) return VMPRESSURE_CRITICAL; else if (pressure >= vmpressure_level_med) return VMPRESSURE_MEDIUM; return VMPRESSURE_LOW; } static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, unsigned long reclaimed) { unsigned long scale = scanned + reclaimed; unsigned long pressure = 0; /* * reclaimed can be greater than scanned for things such as reclaimed * slab pages. shrink_node() just adds reclaimed pages without a * related increment to scanned pages. */ if (reclaimed >= scanned) goto out; /* * We calculate the ratio (in percents) of how many pages were * scanned vs. reclaimed in a given time frame (window). Note that * time is in VM reclaimer's "ticks", i.e. number of pages * scanned. This makes it possible to set desired reaction time * and serves as a ratelimit. */ pressure = scale - (reclaimed * scale / scanned); pressure = pressure * 100 / scale; out: pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure, scanned, reclaimed); return vmpressure_level(pressure); } struct vmpressure_event { struct eventfd_ctx *efd; enum vmpressure_levels level; enum vmpressure_modes mode; struct list_head node; }; static bool vmpressure_event(struct vmpressure *vmpr, const enum vmpressure_levels level, bool ancestor, bool signalled) { struct vmpressure_event *ev; bool ret = false; mutex_lock(&vmpr->events_lock); list_for_each_entry(ev, &vmpr->events, node) { if (ancestor && ev->mode == VMPRESSURE_LOCAL) continue; if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH) continue; if (level < ev->level) continue; eventfd_signal(ev->efd, 1); ret = true; } mutex_unlock(&vmpr->events_lock); return ret; } static void vmpressure_work_fn(struct work_struct *work) { struct vmpressure *vmpr = work_to_vmpressure(work); unsigned long scanned; unsigned long reclaimed; enum vmpressure_levels level; bool ancestor = false; bool signalled = false; spin_lock(&vmpr->sr_lock); /* * Several contexts might be calling vmpressure(), so it is * possible that the work was rescheduled again before the old * work context cleared the counters. In that case we will run * just after the old work returns, but then scanned might be zero * here. No need for any locks here since we don't care if * vmpr->reclaimed is in sync. */ scanned = vmpr->tree_scanned; if (!scanned) { spin_unlock(&vmpr->sr_lock); return; } reclaimed = vmpr->tree_reclaimed; vmpr->tree_scanned = 0; vmpr->tree_reclaimed = 0; spin_unlock(&vmpr->sr_lock); level = vmpressure_calc_level(scanned, reclaimed); do { if (vmpressure_event(vmpr, level, ancestor, signalled)) signalled = true; ancestor = true; } while ((vmpr = vmpressure_parent(vmpr))); } /** * vmpressure() - Account memory pressure through scanned/reclaimed ratio * @gfp: reclaimer's gfp mask * @memcg: cgroup memory controller handle * @tree: legacy subtree mode * @scanned: number of pages scanned * @reclaimed: number of pages reclaimed * * This function should be called from the vmscan reclaim path to account * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw * pressure index is then further refined and averaged over time. * * If @tree is set, vmpressure is in traditional userspace reporting * mode: @memcg is considered the pressure root and userspace is * notified of the entire subtree's reclaim efficiency. * * If @tree is not set, reclaim efficiency is recorded for @memcg, and * only in-kernel users are notified. * * This function does not return any value. */ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) { struct vmpressure *vmpr; if (mem_cgroup_disabled()) return; /* * The in-kernel users only care about the reclaim efficiency * for this @memcg rather than the whole subtree, and there * isn't and won't be any in-kernel user in a legacy cgroup. */ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !tree) return; vmpr = memcg_to_vmpressure(memcg); /* * Here we only want to account pressure that userland is able to * help us with. For example, suppose that DMA zone is under * pressure; if we notify userland about that kind of pressure, * then it will be mostly a waste as it will trigger unnecessary * freeing of memory by userland (since userland is more likely to * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That * is why we include only movable, highmem and FS/IO pages. * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so * we account it too. */ if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) return; /* * If we got here with no pages scanned, then that is an indicator * that reclaimer was unable to find any shrinkable LRUs at the * current scanning depth. But it does not mean that we should * report the critical pressure, yet. If the scanning priority * (scanning depth) goes too high (deep), we will be notified * through vmpressure_prio(). But so far, keep calm. */ if (!scanned) return; if (tree) { spin_lock(&vmpr->sr_lock); scanned = vmpr->tree_scanned += scanned; vmpr->tree_reclaimed += reclaimed; spin_unlock(&vmpr->sr_lock); if (scanned < vmpressure_win) return; schedule_work(&vmpr->work); } else { enum vmpressure_levels level; /* For now, no users for root-level efficiency */ if (!memcg || mem_cgroup_is_root(memcg)) return; spin_lock(&vmpr->sr_lock); scanned = vmpr->scanned += scanned; reclaimed = vmpr->reclaimed += reclaimed; if (scanned < vmpressure_win) { spin_unlock(&vmpr->sr_lock); return; } vmpr->scanned = vmpr->reclaimed = 0; spin_unlock(&vmpr->sr_lock); level = vmpressure_calc_level(scanned, reclaimed); if (level > VMPRESSURE_LOW) { /* * Let the socket buffer allocator know that * we are having trouble reclaiming LRU pages. * * For hysteresis keep the pressure state * asserted for a second in which subsequent * pressure events can occur. */ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); } } } /** * vmpressure_prio() - Account memory pressure through reclaimer priority level * @gfp: reclaimer's gfp mask * @memcg: cgroup memory controller handle * @prio: reclaimer's priority * * This function should be called from the reclaim path every time when * the vmscan's reclaiming priority (scanning depth) changes. * * This function does not return any value. */ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) { /* * We only use prio for accounting critical level. For more info * see comment for vmpressure_level_critical_prio variable above. */ if (prio > vmpressure_level_critical_prio) return; /* * OK, the prio is below the threshold, updating vmpressure * information before shrinker dives into long shrinking of long * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0 * to the vmpressure() basically means that we signal 'critical' * level. */ vmpressure(gfp, memcg, true, vmpressure_win, 0); } #define MAX_VMPRESSURE_ARGS_LEN (strlen("critical") + strlen("hierarchy") + 2) /** * vmpressure_register_event() - Bind vmpressure notifications to an eventfd * @memcg: memcg that is interested in vmpressure notifications * @eventfd: eventfd context to link notifications with * @args: event arguments (pressure level threshold, optional mode) * * This function associates eventfd context with the vmpressure * infrastructure, so that the notifications will be delivered to the * @eventfd. The @args parameter is a comma-delimited string that denotes a * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium", * or "critical") and an optional mode (one of vmpressure_str_modes, i.e. * "hierarchy" or "local"). * * To be used as memcg event method. * * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could * not be parsed. */ int vmpressure_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); struct vmpressure_event *ev; enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH; enum vmpressure_levels level; char *spec, *spec_orig; char *token; int ret = 0; spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL); if (!spec) return -ENOMEM; /* Find required level */ token = strsep(&spec, ","); ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token); if (ret < 0) goto out; level = ret; /* Find optional mode */ token = strsep(&spec, ","); if (token) { ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token); if (ret < 0) goto out; mode = ret; } ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) { ret = -ENOMEM; goto out; } ev->efd = eventfd; ev->level = level; ev->mode = mode; mutex_lock(&vmpr->events_lock); list_add(&ev->node, &vmpr->events); mutex_unlock(&vmpr->events_lock); ret = 0; out: kfree(spec_orig); return ret; } /** * vmpressure_unregister_event() - Unbind eventfd from vmpressure * @memcg: memcg handle * @eventfd: eventfd context that was used to link vmpressure with the @cg * * This function does internal manipulations to detach the @eventfd from * the vmpressure notifications, and then frees internal resources * associated with the @eventfd (but the @eventfd itself is not freed). * * To be used as memcg event method. */ void vmpressure_unregister_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd) { struct vmpressure *vmpr = memcg_to_vmpressure(memcg); struct vmpressure_event *ev; mutex_lock(&vmpr->events_lock); list_for_each_entry(ev, &vmpr->events, node) { if (ev->efd != eventfd) continue; list_del(&ev->node); kfree(ev); break; } mutex_unlock(&vmpr->events_lock); } /** * vmpressure_init() - Initialize vmpressure control structure * @vmpr: Structure to be initialized * * This function should be called on every allocated vmpressure structure * before any usage. */ void vmpressure_init(struct vmpressure *vmpr) { spin_lock_init(&vmpr->sr_lock); mutex_init(&vmpr->events_lock); INIT_LIST_HEAD(&vmpr->events); INIT_WORK(&vmpr->work, vmpressure_work_fn); } /** * vmpressure_cleanup() - shuts down vmpressure control structure * @vmpr: Structure to be cleaned up * * This function should be called before the structure in which it is * embedded is cleaned up. */ void vmpressure_cleanup(struct vmpressure *vmpr) { /* * Make sure there is no pending work before eventfd infrastructure * goes away. */ flush_work(&vmpr->work); }
linux-master
mm/vmpressure.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mman.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/userfaultfd_k.h> #include <linux/elf.h> #include <linux/elf-randomize.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/processor.h> #include <linux/sizes.h> #include <linux/compat.h> #include <linux/uaccess.h> #include "internal.h" #include "swap.h" /** * kfree_const - conditionally free memory * @x: pointer to the memory * * Function calls kfree only if @x is not in .rodata section. */ void kfree_const(const void *x) { if (!is_kernel_rodata((unsigned long)x)) kfree(x); } EXPORT_SYMBOL(kfree_const); /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s or %NULL in case of error */ noinline char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strlen(s) + 1; buf = kmalloc_track_caller(len, gfp); if (buf) memcpy(buf, s, len); return buf; } EXPORT_SYMBOL(kstrdup); /** * kstrdup_const - conditionally duplicate an existing const string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Strings allocated by kstrdup_const should be freed by kfree_const and * must not be passed to krealloc(). * * Return: source string if it is in .rodata section otherwise * fallback to kstrdup. */ const char *kstrdup_const(const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return kstrdup(s, gfp); } EXPORT_SYMBOL(kstrdup_const); /** * kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Use kmemdup_nul() instead if the size is known exactly. * * Return: newly allocated copy of @s or %NULL in case of error */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strnlen(s, max); buf = kmalloc_track_caller(len+1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kstrndup); /** * kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kmalloc_track_caller(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup); /** * kvmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result may be not physically contiguous. Use kvfree() to free. */ void *kvmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kvmalloc(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kvmemdup); /** * kmemdup_nul - Create a NUL-terminated string from unterminated data * @s: The data to stringify * @len: The size of the data * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) { char *buf; if (!s) return NULL; buf = kmalloc_track_caller(len + 1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kmemdup_nul); /** * memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ void *memdup_user(const void __user *src, size_t len) { void *p; p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); /** * vmemdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ void *vmemdup_user(const void __user *src, size_t len) { void *p; p = kvmalloc(len, GFP_USER); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kvfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(vmemdup_user); /** * strndup_user - duplicate an existing string from user space * @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. * * Return: newly allocated copy of @s or an ERR_PTR() in case of error */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); p = memdup_user(s, length); if (IS_ERR(p)) return p; p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc_track_caller(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); /* Check if the vma is being used as a stack by this task */ int vma_is_stack_for_current(struct vm_area_struct *vma) { struct task_struct * __maybe_unused t = current; return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } /* * Change backing file, only valid to use during initial VMA setup. */ void vma_set_file(struct vm_area_struct *vma, struct file *file) { /* Changing an anonymous vma with this is illegal */ get_file(file); swap(vma->vm_file, file); fput(file); } EXPORT_SYMBOL(vma_set_file); #ifndef STACK_RND_MASK #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ #endif unsigned long randomize_stack_top(unsigned long stack_top) { unsigned long random_variable = 0; if (current->flags & PF_RANDOMIZE) { random_variable = get_random_long(); random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP return PAGE_ALIGN(stack_top) + random_variable; #else return PAGE_ALIGN(stack_top) - random_variable; #endif } /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. * @range: The size of the area, starting at @start, within which the * random address must fall. * * If @start + @range would overflow, @range is capped. * * NOTE: Historical use of randomize_range, which this replaces, presumed that * @start was already page aligned. We now align it regardless. * * Return: A page aligned address within [start, start + range). On error, * @start is returned. */ unsigned long randomize_page(unsigned long start, unsigned long range) { if (!PAGE_ALIGNED(start)) { range -= PAGE_ALIGN(start) - start; start = PAGE_ALIGN(start); } if (start > ULONG_MAX - range) range = ULONG_MAX - start; range >>= PAGE_SHIFT; if (range == 0) return start; return start + (get_random_long() % range << PAGE_SHIFT); } #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long __weak arch_randomize_brk(struct mm_struct *mm) { /* Is the current task 32bit ? */ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) return randomize_page(mm->brk, SZ_32M); return randomize_page(mm->brk, SZ_1G); } unsigned long arch_mmap_rnd(void) { unsigned long rnd; #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } static int mmap_is_legacy(struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; /* On parisc the stack always grows up - so a unlimited stack should * not be an indicator to use the legacy memory layout. */ if (rlim_stack->rlim_cur == RLIM_INFINITY && !IS_ENABLED(CONFIG_STACK_GROWSUP)) return 1; return sysctl_legacy_va_layout; } /* * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ #define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP / 6 * 5) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; unsigned long pad = stack_guard_gap; /* Account for stack randomization if necessary */ if (current->flags & PF_RANDOMIZE) pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - rnd); } void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; } #endif /** * __account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * @task: task used to check RLIMIT_MEMLOCK * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped * * Assumes @task and @mm are valid (i.e. at least one reference on each), and * that mmap_lock is held as writer. * * Return: * * 0 on success * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, bool bypass_rlim) { unsigned long locked_vm, limit; int ret = 0; mmap_assert_write_locked(mm); locked_vm = mm->locked_vm; if (inc) { if (!bypass_rlim) { limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked_vm + pages > limit) ret = -ENOMEM; } if (!ret) mm->locked_vm = locked_vm + pages; } else { WARN_ON_ONCE(pages > locked_vm); mm->locked_vm = locked_vm - pages; } pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); return ret; } EXPORT_SYMBOL_GPL(__account_locked_vm); /** * account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against, may be NULL * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). * * Return: * * 0 on success, or if mm is NULL * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) { int ret; if (pages == 0 || !mm) return 0; mmap_write_lock(mm); ret = __account_locked_vm(mm, pages, inc, current, capable(CAP_IPC_LOCK)); mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(account_locked_vm); unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { unsigned long ret; struct mm_struct *mm = current->mm; unsigned long populate; LIST_HEAD(uf); ret = security_mmap_file(file, prot, flag); if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, &uf); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(ret, populate); } return ret; } unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; if (unlikely(offset_in_page(offset))) return -EINVAL; return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap); /** * kvmalloc_node - attempt to allocate physically contiguous memory, but upon * failure, fall back to non-contiguous (vmalloc) allocation. * @size: size of the request. * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. * @node: numa node to allocate from * * Uses kmalloc to get the memory but if the allocation fails then falls back * to the vmalloc allocator. Use kvfree for freeing the memory. * * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is * preferable to the vmalloc fallback, due to visible performance drawbacks. * * Return: pointer to the allocated memory of %NULL in case of failure */ void *kvmalloc_node(size_t size, gfp_t flags, int node) { gfp_t kmalloc_flags = flags; void *ret; /* * We want to attempt a large physically contiguous block first because * it is less likely to fragment multiple larger blocks and therefore * contribute to a long term fragmentation less than vmalloc fallback. * However make sure that larger requests are not too disruptive - no * OOM killer and no allocation failure warnings as we have a fallback. */ if (size > PAGE_SIZE) { kmalloc_flags |= __GFP_NOWARN; if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) kmalloc_flags |= __GFP_NORETRY; /* nofail semantic is implemented by the vmalloc fallback */ kmalloc_flags &= ~__GFP_NOFAIL; } ret = kmalloc_node(size, kmalloc_flags, node); /* * It doesn't really make sense to fallback to vmalloc for sub page * requests */ if (ret || size <= PAGE_SIZE) return ret; /* non-sleeping allocations are not supported by vmalloc */ if (!gfpflags_allow_blocking(flags)) return NULL; /* Don't even allow crazy sizes */ if (unlikely(size > INT_MAX)) { WARN_ON_ONCE(!(flags & __GFP_NOWARN)); return NULL; } /* * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, * since the callers already cannot assume anything * about the resulting pointer, and cannot play * protection games. */ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } EXPORT_SYMBOL(kvmalloc_node); /** * kvfree() - Free memory. * @addr: Pointer to allocated memory. * * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). * It is slightly more efficient to use kfree() or vfree() if you are certain * that you know which one to use. * * Context: Either preemptible task context or not-NMI interrupt. */ void kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); else kfree(addr); } EXPORT_SYMBOL(kvfree); /** * kvfree_sensitive - Free a data object containing sensitive information. * @addr: address of the data object to be freed. * @len: length of the data object. * * Use the special memzero_explicit() function to clear the content of a * kvmalloc'ed object containing sensitive data to make sure that the * compiler won't optimize out the data clearing. */ void kvfree_sensitive(const void *addr, size_t len) { if (likely(!ZERO_OR_NULL_PTR(addr))) { memzero_explicit((void *)addr, len); kvfree(addr); } } EXPORT_SYMBOL(kvfree_sensitive); void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) { void *newp; if (oldsize >= newsize) return (void *)p; newp = kvmalloc(newsize, flags); if (!newp) return NULL; memcpy(newp, p, oldsize); kvfree(p); return newp; } EXPORT_SYMBOL(kvrealloc); /** * __vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return __vmalloc(bytes, flags); } EXPORT_SYMBOL(__vmalloc_array); /** * vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vmalloc_array(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL); } EXPORT_SYMBOL(vmalloc_array); /** * __vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vcalloc(size_t n, size_t size, gfp_t flags) { return __vmalloc_array(n, size, flags | __GFP_ZERO); } EXPORT_SYMBOL(__vcalloc); /** * vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vcalloc(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vcalloc); struct anon_vma *folio_anon_vma(struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) return NULL; return (void *)(mapping - PAGE_MAPPING_ANON); } /** * folio_mapping - Find the mapping where this folio is stored. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the swap mapping * this page is stored in (which is different from the mapping for the * swap file or swap device where the data is stored). * * You can call this for folios which aren't in the swap cache or page * cache and it will return NULL. */ struct address_space *folio_mapping(struct folio *folio) { struct address_space *mapping; /* This happens if someone calls flush_dcache_page on slab page */ if (unlikely(folio_test_slab(folio))) return NULL; if (unlikely(folio_test_swapcache(folio))) return swap_address_space(folio->swap); mapping = folio->mapping; if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) return NULL; return mapping; } EXPORT_SYMBOL(folio_mapping); /** * folio_copy - Copy the contents of one folio to another. * @dst: Folio to copy to. * @src: Folio to copy from. * * The bytes in the folio represented by @src are copied to @dst. * Assumes the caller has validated that @dst is at least as large as @src. * Can be called in atomic context for order-0 folios, but if the folio is * larger, it may sleep. */ void folio_copy(struct folio *dst, struct folio *src) { long i = 0; long nr = folio_nr_pages(src); for (;;) { copy_highpage(folio_page(dst, i), folio_page(src, i)); if (++i == nr) break; cond_resched(); } } int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; } static void sync_overcommit_as(struct work_struct *dummy) { percpu_counter_sync(&vm_committed_as); } int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; /* * The deviation of sync_overcommit_as could be big with loose policy * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply * with the strict "NEVER", and to avoid possible race condition (even * though user usually won't too frequently do the switching to policy * OVERCOMMIT_NEVER), the switch is done in the following order: * 1. changing the batch * 2. sync percpu count on each CPU * 3. switch the policy */ if (write) { t = *table; t.data = &new_policy; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || new_policy == -1) return ret; mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } return ret; } int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_ratio = 0; return ret; } /* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { unsigned long allowed; if (sysctl_overcommit_kbytes) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); else allowed = ((totalram_pages() - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100); allowed += total_swap_pages; return allowed; } /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. * * The time cost of this is very low for small platforms, and for big * platform like a 2S/36C/72T Skylake server, in worst case where * vm_committed_as's spinlock is under severe contention, the time cost * could be about 30~40 microseconds. */ unsigned long vm_memory_committed(void) { return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: pr_warn_ratelimited("%s: pid: %d, comm: %s, not enough memory for the allocation\n", __func__, current->pid, current->comm); vm_unacct_memory(pages); return -ENOMEM; } /** * get_cmdline() - copy the cmdline value to a buffer. * @task: the task whose cmdline value to copy. * @buffer: the buffer to copy to. * @buflen: the length of the buffer. Larger cmdline values are truncated * to this length. * * Return: the size of the cmdline field copied. Note that the copy does * not guarantee an ending NULL byte. */ int get_cmdline(struct task_struct *task, char *buffer, int buflen) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); unsigned long arg_start, arg_end, env_start, env_end; if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; spin_unlock(&mm->arg_lock); len = arg_end - arg_start; if (len > buflen) len = buflen; res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); /* * If the nul at the end of args has been overwritten, then * assume application is using setproctitle(3). */ if (res > 0 && buffer[res-1] != '\0' && len < buflen) { len = strnlen(buffer, res); if (len < res) { res = len; } else { len = env_end - env_start; if (len > buflen - res) len = buflen - res; res += access_process_vm(task, env_start, buffer+res, len, FOLL_FORCE); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } int __weak memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_atomic(page1); addr2 = kmap_atomic(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_atomic(addr2); kunmap_atomic(addr1); return ret; } #ifdef CONFIG_PRINTK /** * mem_dump_obj - Print available provenance information * @object: object for which to find provenance information. * * This function uses pr_cont(), so that the caller is expected to have * printed out whatever preamble is appropriate. The provenance information * depends on the type of object and on how much debugging is enabled. * For example, for a slab-cache object, the slab name is printed, and, * if available, the return address and stack trace from the allocation * and last free path of that object. */ void mem_dump_obj(void *object) { const char *type; if (kmem_valid_obj(object)) { kmem_dump_obj(object); return; } if (vmalloc_dump_obj(object)) return; if (is_vmalloc_addr(object)) type = "vmalloc memory"; else if (virt_addr_valid(object)) type = "non-slab/vmalloc memory"; else if (object == NULL) type = "NULL pointer"; else if (object == ZERO_SIZE_PTR) type = "zero-size pointer"; else type = "non-paged memory"; pr_cont(" %s\n", type); } EXPORT_SYMBOL_GPL(mem_dump_obj); #endif /* * A driver might set a page logically offline -- PageOffline() -- and * turn the page inaccessible in the hypervisor; after that, access to page * content can be fatal. * * Some special PFN walkers -- i.e., /proc/kcore -- read content of random * pages after checking PageOffline(); however, these PFN walkers can race * with drivers that set PageOffline(). * * page_offline_freeze()/page_offline_thaw() allows for a subsystem to * synchronize with such drivers, achieving that a page cannot be set * PageOffline() while frozen. * * page_offline_begin()/page_offline_end() is used by drivers that care about * such races when setting a page PageOffline(). */ static DECLARE_RWSEM(page_offline_rwsem); void page_offline_freeze(void) { down_read(&page_offline_rwsem); } void page_offline_thaw(void) { up_read(&page_offline_rwsem); } void page_offline_begin(void) { down_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_begin); void page_offline_end(void) { up_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_end); #ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio) { long i, nr = folio_nr_pages(folio); for (i = 0; i < nr; i++) flush_dcache_page(folio_page(folio, i)); } EXPORT_SYMBOL(flush_dcache_folio); #endif
linux-master
mm/util.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/seq_file.h> static bool early_memtest_done; static phys_addr_t early_memtest_bad_size; static u64 patterns[] __initdata = { /* The first entry has to be 0 to leave memtest with zeroed memory */ 0, 0xffffffffffffffffULL, 0x5555555555555555ULL, 0xaaaaaaaaaaaaaaaaULL, 0x1111111111111111ULL, 0x2222222222222222ULL, 0x4444444444444444ULL, 0x8888888888888888ULL, 0x3333333333333333ULL, 0x6666666666666666ULL, 0x9999999999999999ULL, 0xccccccccccccccccULL, 0x7777777777777777ULL, 0xbbbbbbbbbbbbbbbbULL, 0xddddddddddddddddULL, 0xeeeeeeeeeeeeeeeeULL, 0x7a6c7258554e494cULL, /* yeah ;-) */ }; static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad) { pr_info(" %016llx bad mem addr %pa - %pa reserved\n", cpu_to_be64(pattern), &start_bad, &end_bad); memblock_reserve(start_bad, end_bad - start_bad); early_memtest_bad_size += (end_bad - start_bad); } static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size) { u64 *p, *start, *end; phys_addr_t start_bad, last_bad; phys_addr_t start_phys_aligned; const size_t incr = sizeof(pattern); start_phys_aligned = ALIGN(start_phys, incr); start = __va(start_phys_aligned); end = start + (size - (start_phys_aligned - start_phys)) / incr; start_bad = 0; last_bad = 0; for (p = start; p < end; p++) *p = pattern; for (p = start; p < end; p++, start_phys_aligned += incr) { if (*p == pattern) continue; if (start_phys_aligned == last_bad + incr) { last_bad += incr; continue; } if (start_bad) reserve_bad_mem(pattern, start_bad, last_bad + incr); start_bad = last_bad = start_phys_aligned; } if (start_bad) reserve_bad_mem(pattern, start_bad, last_bad + incr); early_memtest_done = true; } static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) { u64 i; phys_addr_t this_start, this_end; for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start, &this_end, NULL) { this_start = clamp(this_start, start, end); this_end = clamp(this_end, start, end); if (this_start < this_end) { pr_info(" %pa - %pa pattern %016llx\n", &this_start, &this_end, cpu_to_be64(pattern)); memtest(pattern, this_start, this_end - this_start); } } } /* default is disabled */ static unsigned int memtest_pattern __initdata; static int __init parse_memtest(char *arg) { int ret = 0; if (arg) ret = kstrtouint(arg, 0, &memtest_pattern); else memtest_pattern = ARRAY_SIZE(patterns); return ret; } early_param("memtest", parse_memtest); void __init early_memtest(phys_addr_t start, phys_addr_t end) { unsigned int i; unsigned int idx = 0; if (!memtest_pattern) return; pr_info("early_memtest: # of tests: %u\n", memtest_pattern); for (i = memtest_pattern-1; i < UINT_MAX; --i) { idx = i % ARRAY_SIZE(patterns); do_one_pass(patterns[idx], start, end); } } void memtest_report_meminfo(struct seq_file *m) { unsigned long early_memtest_bad_size_kb; if (!IS_ENABLED(CONFIG_PROC_FS)) return; if (!early_memtest_done) return; early_memtest_bad_size_kb = early_memtest_bad_size >> 10; if (early_memtest_bad_size && !early_memtest_bad_size_kb) early_memtest_bad_size_kb = 1; /* When 0 is reported, it means there actually was a successful test */ seq_printf(m, "EarlyMemtestBad: %5lu kB\n", early_memtest_bad_size_kb); }
linux-master
mm/memtest.c
/* * * Copyright IBM Corporation, 2012 * Author Aneesh Kumar K.V <[email protected]> * * Cgroup v2 * Copyright (C) 2019 Red Hat, Inc. * Author: Giuseppe Scrivano <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/cgroup.h> #include <linux/page_counter.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff) static struct hugetlb_cgroup *root_h_cgroup __read_mostly; static inline struct page_counter * __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx, bool rsvd) { if (rsvd) return &h_cg->rsvd_hugepage[idx]; return &h_cg->hugepage[idx]; } static inline struct page_counter * hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx) { return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false); } static inline struct page_counter * hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx) { return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true); } static inline struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) { return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; } static inline struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) { return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); } static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) { return (h_cg == root_h_cgroup); } static inline struct hugetlb_cgroup * parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg) { return hugetlb_cgroup_from_css(h_cg->css.parent); } static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) { struct hstate *h; for_each_hstate(h) { if (page_counter_read( hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h)))) return true; } return false; } static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, struct hugetlb_cgroup *parent_h_cgroup) { int idx; for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { struct page_counter *fault_parent = NULL; struct page_counter *rsvd_parent = NULL; unsigned long limit; int ret; if (parent_h_cgroup) { fault_parent = hugetlb_cgroup_counter_from_cgroup( parent_h_cgroup, idx); rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd( parent_h_cgroup, idx); } page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx), fault_parent); page_counter_init( hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), rsvd_parent); limit = round_down(PAGE_COUNTER_MAX, pages_per_huge_page(&hstates[idx])); ret = page_counter_set_max( hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx), limit); VM_BUG_ON(ret); ret = page_counter_set_max( hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx), limit); VM_BUG_ON(ret); } } static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup) { int node; for_each_node(node) kfree(h_cgroup->nodeinfo[node]); kfree(h_cgroup); } static struct cgroup_subsys_state * hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css); struct hugetlb_cgroup *h_cgroup; int node; h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids), GFP_KERNEL); if (!h_cgroup) return ERR_PTR(-ENOMEM); if (!parent_h_cgroup) root_h_cgroup = h_cgroup; /* * TODO: this routine can waste much memory for nodes which will * never be onlined. It's better to use memory hotplug callback * function. */ for_each_node(node) { /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */ int node_to_alloc = node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE; h_cgroup->nodeinfo[node] = kzalloc_node(sizeof(struct hugetlb_cgroup_per_node), GFP_KERNEL, node_to_alloc); if (!h_cgroup->nodeinfo[node]) goto fail_alloc_nodeinfo; } hugetlb_cgroup_init(h_cgroup, parent_h_cgroup); return &h_cgroup->css; fail_alloc_nodeinfo: hugetlb_cgroup_free(h_cgroup); return ERR_PTR(-ENOMEM); } static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css) { hugetlb_cgroup_free(hugetlb_cgroup_from_css(css)); } /* * Should be called with hugetlb_lock held. * Since we are holding hugetlb_lock, pages cannot get moved from * active list or uncharged from the cgroup, So no need to get * page reference and test for page active here. This function * cannot fail. */ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, struct page *page) { unsigned int nr_pages; struct page_counter *counter; struct hugetlb_cgroup *page_hcg; struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); struct folio *folio = page_folio(page); page_hcg = hugetlb_cgroup_from_folio(folio); /* * We can have pages in active list without any cgroup * ie, hugepage with less than 3 pages. We can safely * ignore those pages. */ if (!page_hcg || page_hcg != h_cg) goto out; nr_pages = compound_nr(page); if (!parent) { parent = root_h_cgroup; /* root has no limit */ page_counter_charge(&parent->hugepage[idx], nr_pages); } counter = &h_cg->hugepage[idx]; /* Take the pages off the local counter */ page_counter_cancel(counter, nr_pages); set_hugetlb_cgroup(folio, parent); out: return; } /* * Force the hugetlb cgroup to empty the hugetlb resources by moving them to * the parent cgroup. */ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) { struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); struct hstate *h; struct page *page; do { for_each_hstate(h) { spin_lock_irq(&hugetlb_lock); list_for_each_entry(page, &h->hugepage_activelist, lru) hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page); spin_unlock_irq(&hugetlb_lock); } cond_resched(); } while (hugetlb_cgroup_have_usage(h_cg)); } static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx, enum hugetlb_memory_event event) { atomic_long_inc(&hugetlb->events_local[idx][event]); cgroup_file_notify(&hugetlb->events_local_file[idx]); do { atomic_long_inc(&hugetlb->events[idx][event]); cgroup_file_notify(&hugetlb->events_file[idx]); } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) && !hugetlb_cgroup_is_root(hugetlb)); } static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr, bool rsvd) { int ret = 0; struct page_counter *counter; struct hugetlb_cgroup *h_cg = NULL; if (hugetlb_cgroup_disabled()) goto done; /* * We don't charge any cgroup if the compound page have less * than 3 pages. */ if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) goto done; again: rcu_read_lock(); h_cg = hugetlb_cgroup_from_task(current); if (!css_tryget(&h_cg->css)) { rcu_read_unlock(); goto again; } rcu_read_unlock(); if (!page_counter_try_charge( __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd), nr_pages, &counter)) { ret = -ENOMEM; hugetlb_event(h_cg, idx, HUGETLB_MAX); css_put(&h_cg->css); goto done; } /* Reservations take a reference to the css because they do not get * reparented. */ if (!rsvd) css_put(&h_cg->css); done: *ptr = h_cg; return ret; } int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false); } int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true); } /* Should be called with hugetlb_lock held */ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct folio *folio, bool rsvd) { if (hugetlb_cgroup_disabled() || !h_cg) return; __set_hugetlb_cgroup(folio, h_cg, rsvd); if (!rsvd) { unsigned long usage = h_cg->nodeinfo[folio_nid(folio)]->usage[idx]; /* * This write is not atomic due to fetching usage and writing * to it, but that's fine because we call this with * hugetlb_lock held anyway. */ WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx], usage + nr_pages); } } void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct folio *folio) { __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false); } void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct folio *folio) { __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true); } /* * Should be called with hugetlb_lock held */ static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, struct folio *folio, bool rsvd) { struct hugetlb_cgroup *h_cg; if (hugetlb_cgroup_disabled()) return; lockdep_assert_held(&hugetlb_lock); h_cg = __hugetlb_cgroup_from_folio(folio, rsvd); if (unlikely(!h_cg)) return; __set_hugetlb_cgroup(folio, NULL, rsvd); page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd), nr_pages); if (rsvd) css_put(&h_cg->css); else { unsigned long usage = h_cg->nodeinfo[folio_nid(folio)]->usage[idx]; /* * This write is not atomic due to fetching usage and writing * to it, but that's fine because we call this with * hugetlb_lock held anyway. */ WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx], usage - nr_pages); } } void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages, struct folio *folio) { __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false); } void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages, struct folio *folio) { __hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true); } static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, bool rsvd) { if (hugetlb_cgroup_disabled() || !h_cg) return; if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) return; page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd), nr_pages); if (rsvd) css_put(&h_cg->css); } void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false); } void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true); } void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start, unsigned long end) { if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter || !resv->css) return; page_counter_uncharge(resv->reservation_counter, (end - start) * resv->pages_per_hpage); css_put(resv->css); } void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv, struct file_region *rg, unsigned long nr_pages, bool region_del) { if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages) return; if (rg->reservation_counter && resv->pages_per_hpage && !resv->reservation_counter) { page_counter_uncharge(rg->reservation_counter, nr_pages * resv->pages_per_hpage); /* * Only do css_put(rg->css) when we delete the entire region * because one file_region must hold exactly one css reference. */ if (region_del) css_put(rg->css); } } enum { RES_USAGE, RES_RSVD_USAGE, RES_LIMIT, RES_RSVD_LIMIT, RES_MAX_USAGE, RES_RSVD_MAX_USAGE, RES_FAILCNT, RES_RSVD_FAILCNT, }; static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy) { int nid; struct cftype *cft = seq_cft(seq); int idx = MEMFILE_IDX(cft->private); bool legacy = MEMFILE_ATTR(cft->private); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq)); struct cgroup_subsys_state *css; unsigned long usage; if (legacy) { /* Add up usage across all nodes for the non-hierarchical total. */ usage = 0; for_each_node_state(nid, N_MEMORY) usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]); seq_printf(seq, "total=%lu", usage * PAGE_SIZE); /* Simply print the per-node usage for the non-hierarchical total. */ for_each_node_state(nid, N_MEMORY) seq_printf(seq, " N%d=%lu", nid, READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) * PAGE_SIZE); seq_putc(seq, '\n'); } /* * The hierarchical total is pretty much the value recorded by the * counter, so use that. */ seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "", page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE); /* * For each node, transverse the css tree to obtain the hierarchical * node usage. */ for_each_node_state(nid, N_MEMORY) { usage = 0; rcu_read_lock(); css_for_each_descendant_pre(css, &h_cg->css) { usage += READ_ONCE(hugetlb_cgroup_from_css(css) ->nodeinfo[nid] ->usage[idx]); } rcu_read_unlock(); seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE); } seq_putc(seq, '\n'); return 0; } static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { struct page_counter *counter; struct page_counter *rsvd_counter; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)]; rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)]; switch (MEMFILE_ATTR(cft->private)) { case RES_USAGE: return (u64)page_counter_read(counter) * PAGE_SIZE; case RES_RSVD_USAGE: return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE; case RES_LIMIT: return (u64)counter->max * PAGE_SIZE; case RES_RSVD_LIMIT: return (u64)rsvd_counter->max * PAGE_SIZE; case RES_MAX_USAGE: return (u64)counter->watermark * PAGE_SIZE; case RES_RSVD_MAX_USAGE: return (u64)rsvd_counter->watermark * PAGE_SIZE; case RES_FAILCNT: return counter->failcnt; case RES_RSVD_FAILCNT: return rsvd_counter->failcnt; default: BUG(); } } static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v) { int idx; u64 val; struct cftype *cft = seq_cft(seq); unsigned long limit; struct page_counter *counter; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq)); idx = MEMFILE_IDX(cft->private); counter = &h_cg->hugepage[idx]; limit = round_down(PAGE_COUNTER_MAX, pages_per_huge_page(&hstates[idx])); switch (MEMFILE_ATTR(cft->private)) { case RES_RSVD_USAGE: counter = &h_cg->rsvd_hugepage[idx]; fallthrough; case RES_USAGE: val = (u64)page_counter_read(counter); seq_printf(seq, "%llu\n", val * PAGE_SIZE); break; case RES_RSVD_LIMIT: counter = &h_cg->rsvd_hugepage[idx]; fallthrough; case RES_LIMIT: val = (u64)counter->max; if (val == limit) seq_puts(seq, "max\n"); else seq_printf(seq, "%llu\n", val * PAGE_SIZE); break; default: BUG(); } return 0; } static DEFINE_MUTEX(hugetlb_limit_mutex); static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, const char *max) { int ret, idx; unsigned long nr_pages; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); bool rsvd = false; if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */ return -EINVAL; buf = strstrip(buf); ret = page_counter_memparse(buf, max, &nr_pages); if (ret) return ret; idx = MEMFILE_IDX(of_cft(of)->private); nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx])); switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_RSVD_LIMIT: rsvd = true; fallthrough; case RES_LIMIT: mutex_lock(&hugetlb_limit_mutex); ret = page_counter_set_max( __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd), nr_pages); mutex_unlock(&hugetlb_limit_mutex); break; default: ret = -EINVAL; break; } return ret ?: nbytes; } static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { return hugetlb_cgroup_write(of, buf, nbytes, off, "-1"); } static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { return hugetlb_cgroup_write(of, buf, nbytes, off, "max"); } static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { int ret = 0; struct page_counter *counter, *rsvd_counter; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)]; rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)]; switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_MAX_USAGE: page_counter_reset_watermark(counter); break; case RES_RSVD_MAX_USAGE: page_counter_reset_watermark(rsvd_counter); break; case RES_FAILCNT: counter->failcnt = 0; break; case RES_RSVD_FAILCNT: rsvd_counter->failcnt = 0; break; default: ret = -EINVAL; break; } return ret ?: nbytes; } static char *mem_fmt(char *buf, int size, unsigned long hsize) { if (hsize >= SZ_1G) snprintf(buf, size, "%luGB", hsize / SZ_1G); else if (hsize >= SZ_1M) snprintf(buf, size, "%luMB", hsize / SZ_1M); else snprintf(buf, size, "%luKB", hsize / SZ_1K); return buf; } static int __hugetlb_events_show(struct seq_file *seq, bool local) { int idx; long max; struct cftype *cft = seq_cft(seq); struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq)); idx = MEMFILE_IDX(cft->private); if (local) max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]); else max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]); seq_printf(seq, "max %lu\n", max); return 0; } static int hugetlb_events_show(struct seq_file *seq, void *v) { return __hugetlb_events_show(seq, false); } static int hugetlb_events_local_show(struct seq_file *seq, void *v) { return __hugetlb_events_show(seq, true); } static void __init __hugetlb_cgroup_file_dfl_init(int idx) { char buf[32]; struct cftype *cft; struct hstate *h = &hstates[idx]; /* format the size */ mem_fmt(buf, sizeof(buf), huge_page_size(h)); /* Add the limit file */ cft = &h->cgroup_files_dfl[0]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf); cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); cft->seq_show = hugetlb_cgroup_read_u64_max; cft->write = hugetlb_cgroup_write_dfl; cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the reservation limit file */ cft = &h->cgroup_files_dfl[1]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT); cft->seq_show = hugetlb_cgroup_read_u64_max; cft->write = hugetlb_cgroup_write_dfl; cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the current usage file */ cft = &h->cgroup_files_dfl[2]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf); cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); cft->seq_show = hugetlb_cgroup_read_u64_max; cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the current reservation usage file */ cft = &h->cgroup_files_dfl[3]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE); cft->seq_show = hugetlb_cgroup_read_u64_max; cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the events file */ cft = &h->cgroup_files_dfl[4]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf); cft->private = MEMFILE_PRIVATE(idx, 0); cft->seq_show = hugetlb_events_show; cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]); cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the events.local file */ cft = &h->cgroup_files_dfl[5]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf); cft->private = MEMFILE_PRIVATE(idx, 0); cft->seq_show = hugetlb_events_local_show; cft->file_offset = offsetof(struct hugetlb_cgroup, events_local_file[idx]); cft->flags = CFTYPE_NOT_ON_ROOT; /* Add the numa stat file */ cft = &h->cgroup_files_dfl[6]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf); cft->private = MEMFILE_PRIVATE(idx, 0); cft->seq_show = hugetlb_cgroup_read_numa_stat; cft->flags = CFTYPE_NOT_ON_ROOT; /* NULL terminate the last cft */ cft = &h->cgroup_files_dfl[7]; memset(cft, 0, sizeof(*cft)); WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files_dfl)); } static void __init __hugetlb_cgroup_file_legacy_init(int idx) { char buf[32]; struct cftype *cft; struct hstate *h = &hstates[idx]; /* format the size */ mem_fmt(buf, sizeof(buf), huge_page_size(h)); /* Add the limit file */ cft = &h->cgroup_files_legacy[0]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); cft->read_u64 = hugetlb_cgroup_read_u64; cft->write = hugetlb_cgroup_write_legacy; /* Add the reservation limit file */ cft = &h->cgroup_files_legacy[1]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT); cft->read_u64 = hugetlb_cgroup_read_u64; cft->write = hugetlb_cgroup_write_legacy; /* Add the usage file */ cft = &h->cgroup_files_legacy[2]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the reservation usage file */ cft = &h->cgroup_files_legacy[3]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE); cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the MAX usage file */ cft = &h->cgroup_files_legacy[4]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE); cft->write = hugetlb_cgroup_reset; cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the MAX reservation usage file */ cft = &h->cgroup_files_legacy[5]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE); cft->write = hugetlb_cgroup_reset; cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the failcntfile */ cft = &h->cgroup_files_legacy[6]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf); cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT); cft->write = hugetlb_cgroup_reset; cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the reservation failcntfile */ cft = &h->cgroup_files_legacy[7]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf); cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT); cft->write = hugetlb_cgroup_reset; cft->read_u64 = hugetlb_cgroup_read_u64; /* Add the numa stat file */ cft = &h->cgroup_files_legacy[8]; snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf); cft->private = MEMFILE_PRIVATE(idx, 1); cft->seq_show = hugetlb_cgroup_read_numa_stat; /* NULL terminate the last cft */ cft = &h->cgroup_files_legacy[9]; memset(cft, 0, sizeof(*cft)); WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files_legacy)); } static void __init __hugetlb_cgroup_file_init(int idx) { __hugetlb_cgroup_file_dfl_init(idx); __hugetlb_cgroup_file_legacy_init(idx); } void __init hugetlb_cgroup_file_init(void) { struct hstate *h; for_each_hstate(h) { /* * Add cgroup control files only if the huge page consists * of more than two normal pages. This is because we use * page[2].private for storing cgroup details. */ if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) __hugetlb_cgroup_file_init(hstate_index(h)); } } /* * hugetlb_lock will make sure a parallel cgroup rmdir won't happen * when we migrate hugepages */ void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio) { struct hugetlb_cgroup *h_cg; struct hugetlb_cgroup *h_cg_rsvd; struct hstate *h = folio_hstate(old_folio); if (hugetlb_cgroup_disabled()) return; spin_lock_irq(&hugetlb_lock); h_cg = hugetlb_cgroup_from_folio(old_folio); h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio); set_hugetlb_cgroup(old_folio, NULL); set_hugetlb_cgroup_rsvd(old_folio, NULL); /* move the h_cg details to new cgroup */ set_hugetlb_cgroup(new_folio, h_cg); set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd); list_move(&new_folio->lru, &h->hugepage_activelist); spin_unlock_irq(&hugetlb_lock); return; } static struct cftype hugetlb_files[] = { {} /* terminate */ }; struct cgroup_subsys hugetlb_cgrp_subsys = { .css_alloc = hugetlb_cgroup_css_alloc, .css_offline = hugetlb_cgroup_css_offline, .css_free = hugetlb_cgroup_css_free, .dfl_cftypes = hugetlb_files, .legacy_cftypes = hugetlb_files, };
linux-master
mm/hugetlb_cgroup.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/compaction.c * * Memory compaction for the reduction of external fragmentation. Note that * this heavily depends upon page migration to do all the real heavy * lifting * * Copyright IBM Corp. 2007-2010 Mel Gorman <[email protected]> */ #include <linux/cpu.h> #include <linux/swap.h> #include <linux/migrate.h> #include <linux/compaction.h> #include <linux/mm_inline.h> #include <linux/sched/signal.h> #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> #include <linux/page-isolation.h> #include <linux/kasan.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/page_owner.h> #include <linux/psi.h> #include "internal.h" #ifdef CONFIG_COMPACTION /* * Fragmentation score check interval for proactive compaction purposes. */ #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) static inline void count_compact_event(enum vm_event_item item) { count_vm_event(item); } static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA #define CREATE_TRACE_POINTS #include <trace/events/compaction.h> #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) /* * Page order with-respect-to which proactive compaction * calculates external fragmentation, which is used as * the "fragmentation score" of a node/zone. */ #if defined CONFIG_TRANSPARENT_HUGEPAGE #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER #elif defined CONFIG_HUGETLBFS #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER #else #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; unsigned long high_pfn = 0; list_for_each_entry_safe(page, next, freelist, lru) { unsigned long pfn = page_to_pfn(page); list_del(&page->lru); __free_page(page); if (pfn > high_pfn) high_pfn = pfn; } return high_pfn; } static void split_map_pages(struct list_head *list) { unsigned int i, order, nr_pages; struct page *page, *next; LIST_HEAD(tmp_list); list_for_each_entry_safe(page, next, list, lru) { list_del(&page->lru); order = page_private(page); nr_pages = 1 << order; post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); for (i = 0; i < nr_pages; i++) { list_add(&page->lru, &tmp_list); page++; } } list_splice(&tmp_list, list); } #ifdef CONFIG_COMPACTION bool PageMovable(struct page *page) { const struct movable_operations *mops; VM_BUG_ON_PAGE(!PageLocked(page), page); if (!__PageMovable(page)) return false; mops = page_movable_ops(page); if (mops) return true; return false; } void __SetPageMovable(struct page *page, const struct movable_operations *mops) { VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); } EXPORT_SYMBOL(__SetPageMovable); void __ClearPageMovable(struct page *page) { VM_BUG_ON_PAGE(!PageMovable(page), page); /* * This page still has the type of a movable page, but it's * actually not movable any more. */ page->mapping = (void *)PAGE_MAPPING_MOVABLE; } EXPORT_SYMBOL(__ClearPageMovable); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 /* * Compaction is deferred when compaction fails to result in a page * allocation success. 1 << compact_defer_shift, compactions are skipped up * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT */ static void defer_compaction(struct zone *zone, int order) { zone->compact_considered = 0; zone->compact_defer_shift++; if (order < zone->compact_order_failed) zone->compact_order_failed = order; if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; trace_mm_compaction_defer_compaction(zone, order); } /* Returns true if compaction should be skipped this time */ static bool compaction_deferred(struct zone *zone, int order) { unsigned long defer_limit = 1UL << zone->compact_defer_shift; if (order < zone->compact_order_failed) return false; /* Avoid possible overflow */ if (++zone->compact_considered >= defer_limit) { zone->compact_considered = defer_limit; return false; } trace_mm_compaction_deferred(zone, order); return true; } /* * Update defer tracking counters after successful compaction of given order, * which means an allocation either succeeded (alloc_success == true) or is * expected to succeed. */ void compaction_defer_reset(struct zone *zone, int order, bool alloc_success) { if (alloc_success) { zone->compact_considered = 0; zone->compact_defer_shift = 0; } if (order >= zone->compact_order_failed) zone->compact_order_failed = order + 1; trace_mm_compaction_defer_reset(zone, order); } /* Returns true if restarting compaction after many failures */ static bool compaction_restarting(struct zone *zone, int order) { if (order < zone->compact_order_failed) return false; return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && zone->compact_considered >= 1UL << zone->compact_defer_shift; } /* Returns true if the pageblock should be scanned for pages to isolate. */ static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { if (cc->ignore_skip_hint) return true; return !get_pageblock_skip(page); } static void reset_cached_positions(struct zone *zone) { zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; zone->compact_cached_free_pfn = pageblock_start_pfn(zone_end_pfn(zone) - 1); } #ifdef CONFIG_SPARSEMEM /* * If the PFN falls into an offline section, return the start PFN of the * next online section. If the PFN falls into an online section or if * there is no next online section, return 0. */ static unsigned long skip_offline_sections(unsigned long start_pfn) { unsigned long start_nr = pfn_to_section_nr(start_pfn); if (online_section_nr(start_nr)) return 0; while (++start_nr <= __highest_present_section_nr) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr); } return 0; } /* * If the PFN falls into an offline section, return the end PFN of the * next online section in reverse. If the PFN falls into an online section * or if there is no next online section in reverse, return 0. */ static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) { unsigned long start_nr = pfn_to_section_nr(start_pfn); if (!start_nr || online_section_nr(start_nr)) return 0; while (start_nr-- > 0) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION; } return 0; } #else static unsigned long skip_offline_sections(unsigned long start_pfn) { return 0; } static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) { return 0; } #endif /* * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are * migratable), and the pageblocks they occupy cannot contain any free pages. */ static bool pageblock_skip_persistent(struct page *page) { if (!PageCompound(page)) return false; page = compound_head(page); if (compound_order(page) >= pageblock_order) return true; return false; } static bool __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, bool check_target) { struct page *page = pfn_to_online_page(pfn); struct page *block_page; struct page *end_page; unsigned long block_pfn; if (!page) return false; if (zone != page_zone(page)) return false; if (pageblock_skip_persistent(page)) return false; /* * If skip is already cleared do no further checking once the * restart points have been set. */ if (check_source && check_target && !get_pageblock_skip(page)) return true; /* * If clearing skip for the target scanner, do not select a * non-movable pageblock as the starting point. */ if (!check_source && check_target && get_pageblock_migratetype(page) != MIGRATE_MOVABLE) return false; /* Ensure the start of the pageblock or zone is online and valid */ block_pfn = pageblock_start_pfn(pfn); block_pfn = max(block_pfn, zone->zone_start_pfn); block_page = pfn_to_online_page(block_pfn); if (block_page) { page = block_page; pfn = block_pfn; } /* Ensure the end of the pageblock or zone is online and valid */ block_pfn = pageblock_end_pfn(pfn) - 1; block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); end_page = pfn_to_online_page(block_pfn); if (!end_page) return false; /* * Only clear the hint if a sample indicates there is either a * free page or an LRU page in the block. One or other condition * is necessary for the block to be a migration source/target. */ do { if (check_source && PageLRU(page)) { clear_pageblock_skip(page); return true; } if (check_target && PageBuddy(page)) { clear_pageblock_skip(page); return true; } page += (1 << PAGE_ALLOC_COSTLY_ORDER); } while (page <= end_page); return false; } /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner * meet. */ static void __reset_isolation_suitable(struct zone *zone) { unsigned long migrate_pfn = zone->zone_start_pfn; unsigned long free_pfn = zone_end_pfn(zone) - 1; unsigned long reset_migrate = free_pfn; unsigned long reset_free = migrate_pfn; bool source_set = false; bool free_set = false; if (!zone->compact_blockskip_flush) return; zone->compact_blockskip_flush = false; /* * Walk the zone and update pageblock skip information. Source looks * for PageLRU while target looks for PageBuddy. When the scanner * is found, both PageBuddy and PageLRU are checked as the pageblock * is suitable as both source and target. */ for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, free_pfn -= pageblock_nr_pages) { cond_resched(); /* Update the migrate PFN */ if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && migrate_pfn < reset_migrate) { source_set = true; reset_migrate = migrate_pfn; zone->compact_init_migrate_pfn = reset_migrate; zone->compact_cached_migrate_pfn[0] = reset_migrate; zone->compact_cached_migrate_pfn[1] = reset_migrate; } /* Update the free PFN */ if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && free_pfn > reset_free) { free_set = true; reset_free = free_pfn; zone->compact_init_free_pfn = reset_free; zone->compact_cached_free_pfn = reset_free; } } /* Leave no distance if no suitable block was reset */ if (reset_migrate >= reset_free) { zone->compact_cached_migrate_pfn[0] = migrate_pfn; zone->compact_cached_migrate_pfn[1] = migrate_pfn; zone->compact_cached_free_pfn = free_pfn; } } void reset_isolation_suitable(pg_data_t *pgdat) { int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; /* Only flush if a full compaction finished recently */ if (zone->compact_blockskip_flush) __reset_isolation_suitable(zone); } } /* * Sets the pageblock skip bit if it was clear. Note that this is a hint as * locks are not required for read/writers. Returns true if it was already set. */ static bool test_and_set_skip(struct compact_control *cc, struct page *page) { bool skip; /* Do not update if skip hint is being ignored */ if (cc->ignore_skip_hint) return false; skip = get_pageblock_skip(page); if (!skip && !cc->no_set_skip_hint) set_pageblock_skip(page); return skip; } static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { struct zone *zone = cc->zone; /* Set for isolation rather than compaction */ if (cc->no_set_skip_hint) return; pfn = pageblock_end_pfn(pfn); /* Update where async and sync compaction should restart */ if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC && pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } /* * If no pages were isolated then mark this pageblock to be skipped in the * future. The information is later cleared by __reset_isolation_suitable(). */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) { struct zone *zone = cc->zone; if (cc->no_set_skip_hint) return; set_pageblock_skip(page); if (pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } #else static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { return true; } static inline bool pageblock_skip_persistent(struct page *page) { return false; } static inline void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) { } static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { } static bool test_and_set_skip(struct compact_control *cc, struct page *page) { return false; } #endif /* CONFIG_COMPACTION */ /* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. For async compaction, trylock and record if the * lock is contended. The lock will still be acquired but compaction will * abort when the current block is finished regardless of success rate. * Sync compaction acquires the lock. * * Always returns true which makes it easier to track lock state in callers. */ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, struct compact_control *cc) __acquires(lock) { /* Track if the lock is contended in async mode */ if (cc->mode == MIGRATE_ASYNC && !cc->contended) { if (spin_trylock_irqsave(lock, *flags)) return true; cc->contended = true; } spin_lock_irqsave(lock, *flags); return true; } /* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. The lock should be periodically unlocked to avoid * having disabled IRQs for a long time, even when there is nobody waiting on * the lock. It might also be that allowing the IRQs will result in * need_resched() becoming true. If scheduling is needed, compaction schedules. * Either compaction type will also abort if a fatal signal is pending. * In either case if the lock was locked, it is dropped and not regained. * * Returns true if compaction should abort due to fatal signal pending. * Returns false when compaction can continue. */ static bool compact_unlock_should_abort(spinlock_t *lock, unsigned long flags, bool *locked, struct compact_control *cc) { if (*locked) { spin_unlock_irqrestore(lock, flags); *locked = false; } if (fatal_signal_pending(current)) { cc->contended = true; return true; } cond_resched(); return false; } /* * Isolate free pages onto a private freelist. If @strict is true, will abort * returning 0 on any invalid PFNs or non-free pages inside of the pageblock * (even though it may still end up isolating some pages). */ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long *start_pfn, unsigned long end_pfn, struct list_head *freelist, unsigned int stride, bool strict) { int nr_scanned = 0, total_isolated = 0; struct page *page; unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; unsigned int order; /* Strict mode is for isolation, speed is secondary */ if (strict) stride = 1; page = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { int isolated; /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort if fatal signal * pending. */ if (!(blockpfn % COMPACT_CLUSTER_MAX) && compact_unlock_should_abort(&cc->zone->lock, flags, &locked, cc)) break; nr_scanned++; /* * For compound pages such as THP and hugetlbfs, we can save * potentially a lot of iterations if we skip them at once. * The check is racy, but we can consider only valid values * and the only danger is skipping too much. */ if (PageCompound(page)) { const unsigned int order = compound_order(page); if (likely(order <= MAX_ORDER)) { blockpfn += (1UL << order) - 1; page += (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } goto isolate_fail; } if (!PageBuddy(page)) goto isolate_fail; /* If we already hold the lock, we can skip some rechecking. */ if (!locked) { locked = compact_lock_irqsave(&cc->zone->lock, &flags, cc); /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail; } /* Found a free page, will break it into order-0 pages */ order = buddy_order(page); isolated = __isolate_free_page(page, order); if (!isolated) break; set_page_private(page, order); nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; list_add_tail(&page->lru, freelist); if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; } /* Advance to the end of split page */ blockpfn += isolated - 1; page += isolated - 1; continue; isolate_fail: if (strict) break; } if (locked) spin_unlock_irqrestore(&cc->zone->lock, flags); /* * There is a tiny chance that we have read bogus compound_order(), * so be careful to not go outside of the pageblock. */ if (unlikely(blockpfn > end_pfn)) blockpfn = end_pfn; trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, nr_scanned, total_isolated); /* Record how far we have got within the block */ *start_pfn = blockpfn; /* * If strict isolation is requested by CMA then check that all the * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ if (strict && blockpfn < end_pfn) total_isolated = 0; cc->total_free_scanned += nr_scanned; if (total_isolated) count_compact_events(COMPACTISOLATED, total_isolated); return total_isolated; } /** * isolate_freepages_range() - isolate free pages. * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Non-free pages, invalid PFNs, or zone boundaries within the * [start_pfn, end_pfn) range are considered errors, cause function to * undo its actions and return zero. * * Otherwise, function returns one-past-the-last PFN of isolated page * (which may be greater then end_pfn if end fell in a middle of * a free page). */ unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_start_pfn, block_end_pfn; LIST_HEAD(freelist); pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = pageblock_end_pfn(pfn); for (; pfn < end_pfn; pfn += isolated, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { /* Protect pfn from changing by isolate_freepages_block */ unsigned long isolate_start_pfn = pfn; /* * pfn could pass the block_end_pfn if isolated freepage * is more than pageblock order. In this case, we adjust * scanning range to right one. */ if (pfn >= block_end_pfn) { block_start_pfn = pageblock_start_pfn(pfn); block_end_pfn = pageblock_end_pfn(pfn); } block_end_pfn = min(block_end_pfn, end_pfn); if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) break; isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, &freelist, 0, true); /* * In strict mode, isolate_freepages_block() returns 0 if * there are any holes in the block (ie. invalid PFNs or * non-free pages). */ if (!isolated) break; /* * If we managed to isolate pages, it is always (1 << n) * * pageblock_nr_pages for some non-negative n. (Max order * page may span two pageblocks). */ } /* __isolate_free_page() does not map the pages */ split_map_pages(&freelist); if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ release_freepages(&freelist); return 0; } /* We don't use freelists for anything. */ return pfn; } /* Similar to reclaim, but different enough that they don't share logic */ static bool too_many_isolated(struct compact_control *cc) { pg_data_t *pgdat = cc->zone->zone_pgdat; bool too_many; unsigned long active, inactive, isolated; inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + node_page_state(pgdat, NR_INACTIVE_ANON); active = node_page_state(pgdat, NR_ACTIVE_FILE) + node_page_state(pgdat, NR_ACTIVE_ANON); isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + node_page_state(pgdat, NR_ISOLATED_ANON); /* * Allow GFP_NOFS to isolate past the limit set for regular * compaction runs. This prevents an ABBA deadlock when other * compactors have already isolated to the limit, but are * blocked on filesystem locks held by the GFP_NOFS thread. */ if (cc->gfp_mask & __GFP_FS) { inactive >>= 3; active >>= 3; } too_many = isolated > (inactive + active) / 2; if (!too_many) wake_throttle_isolated(pgdat); return too_many; } /** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock * @cc: Compaction control structure. * @low_pfn: The first PFN to isolate * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock * @mode: Isolation mode to be used. * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, * -ENOMEM in case we could not allocate a page, or 0. * cc->migrate_pfn will contain the next pfn to scan. * * The pages are isolated on cc->migratepages list (not required to be empty), * and cc->nr_migratepages is updated accordingly. */ static int isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn, isolate_mode_t mode) { pg_data_t *pgdat = cc->zone->zone_pgdat; unsigned long nr_scanned = 0, nr_isolated = 0; struct lruvec *lruvec; unsigned long flags = 0; struct lruvec *locked = NULL; struct folio *folio = NULL; struct page *page = NULL, *valid_page = NULL; struct address_space *mapping; unsigned long start_pfn = low_pfn; bool skip_on_failure = false; unsigned long next_skip_pfn = 0; bool skip_updated = false; int ret = 0; cc->migrate_pfn = low_pfn; /* * Ensure that there are not too many pages isolated from the LRU * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated */ while (unlikely(too_many_isolated(cc))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) return -EAGAIN; /* async migration should just abort */ if (cc->mode == MIGRATE_ASYNC) return -EAGAIN; reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); if (fatal_signal_pending(current)) return -EINTR; } cond_resched(); if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { skip_on_failure = true; next_skip_pfn = block_end_pfn(low_pfn, cc->order); } /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { if (skip_on_failure && low_pfn >= next_skip_pfn) { /* * We have isolated all migration candidates in the * previous order-aligned block, and did not skip it due * to failure. We should migrate the pages now and * hopefully succeed compaction. */ if (nr_isolated) break; /* * We failed to isolate in the previous order-aligned * block. Set the new boundary to the end of the * current block. Note we can't simply increase * next_skip_pfn by 1 << order, as low_pfn might have * been incremented by a higher number due to skipping * a compound or a high-order buddy page in the * previous loop iteration. */ next_skip_pfn = block_end_pfn(low_pfn, cc->order); } /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort completely if * a fatal signal is pending. */ if (!(low_pfn % COMPACT_CLUSTER_MAX)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } if (fatal_signal_pending(current)) { cc->contended = true; ret = -EINTR; goto fatal_pending; } cond_resched(); } nr_scanned++; page = pfn_to_page(low_pfn); /* * Check if the pageblock has already been marked skipped. * Only the first PFN is checked as the caller isolates * COMPACT_CLUSTER_MAX at a time so the second call must * not falsely conclude that the block should be skipped. */ if (!valid_page && (pageblock_aligned(low_pfn) || low_pfn == cc->zone->zone_start_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; folio = NULL; goto isolate_abort; } valid_page = page; } if (PageHuge(page) && cc->alloc_contig) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); /* * Fail isolation in case isolate_or_dissolve_huge_page() * reports an error. In case of -ENOMEM, abort right away. */ if (ret < 0) { /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; low_pfn += compound_nr(page) - 1; nr_scanned += compound_nr(page) - 1; goto isolate_fail; } if (PageHuge(page)) { /* * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ folio = page_folio(page); low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list; } /* * Ok, the hugepage was dissolved. Now these pages are * Buddy and cannot be re-allocated because they are * isolated. Fall-through as the check below handles * Buddy pages. */ } /* * Skip if free. We read page order here without zone lock * which is generally unsafe, but the race window is small and * the worst thing that can happen is that we skip some * potential isolation targets. */ if (PageBuddy(page)) { unsigned long freepage_order = buddy_order_unsafe(page); /* * Without lock, we cannot be sure that what we got is * a valid page order. Consider only values in the * valid order range to prevent low_pfn overflow. */ if (freepage_order > 0 && freepage_order <= MAX_ORDER) { low_pfn += (1UL << freepage_order) - 1; nr_scanned += (1UL << freepage_order) - 1; } continue; } /* * Regardless of being on LRU, compound pages such as THP and * hugetlbfs are not to be compacted unless we are attempting * an allocation much larger than the huge page size (eg CMA). * We can potentially save a lot of iterations if we skip them * at once. The check is racy, but we can consider only valid * values and the only danger is skipping too much. */ if (PageCompound(page) && !cc->alloc_contig) { const unsigned int order = compound_order(page); if (likely(order <= MAX_ORDER)) { low_pfn += (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } goto isolate_fail; } /* * Check may be lockless but that's ok as we recheck later. * It's possible to migrate LRU and non-lru movable pages. * Skip any other type of page */ if (!PageLRU(page)) { /* * __PageMovable can return false positive so we need * to verify it under page_lock. */ if (unlikely(__PageMovable(page)) && !PageIsolated(page)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } if (isolate_movable_page(page, mode)) { folio = page_folio(page); goto isolate_success; } } goto isolate_fail; } /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ folio = folio_get_nontail_page(page); if (unlikely(!folio)) goto isolate_fail; /* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ mapping = folio_mapping(folio); if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) goto isolate_fail_put; /* * Only allow to migrate anonymous pages in GFP_NOFS context * because those do not depend on fs locks. */ if (!(cc->gfp_mask & __GFP_FS) && mapping) goto isolate_fail_put; /* Only take pages on LRU: a check now makes later tests safe */ if (!folio_test_lru(folio)) goto isolate_fail_put; /* Compaction might skip unevictable pages but CMA takes them */ if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) goto isolate_fail_put; /* * To minimise LRU disruption, the caller can indicate with * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages * it will be able to migrate without blocking - clean pages * for the most part. PageWriteback would require blocking. */ if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { bool migrate_dirty; /* * Only folios without mappings or that have * a ->migrate_folio callback are possible to * migrate without blocking. However, we may * be racing with truncation, which can free * the mapping. Truncation holds the folio lock * until after the folio is removed from the page * cache so holding it ourselves is sufficient. */ if (!folio_trylock(folio)) goto isolate_fail_put; mapping = folio_mapping(folio); migrate_dirty = !mapping || mapping->a_ops->migrate_folio; folio_unlock(folio); if (!migrate_dirty) goto isolate_fail_put; } /* Try isolate the folio */ if (!folio_test_clear_lru(folio)) goto isolate_fail_put; lruvec = folio_lruvec(folio); /* If we already hold the lock, we can skip some rechecking */ if (lruvec != locked) { if (locked) unlock_page_lruvec_irqrestore(locked, flags); compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); locked = lruvec; lruvec_memcg_debug(lruvec, folio); /* * Try get exclusive access under lock. If marked for * skip, the scan is aborted unless the current context * is a rescan to reach the end of the pageblock. */ if (!skip_updated && valid_page) { skip_updated = true; if (test_and_set_skip(cc, valid_page) && !cc->finish_pageblock) { low_pfn = end_pfn; goto isolate_abort; } } /* * folio become large since the non-locked check, * and it's on LRU. */ if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { low_pfn += folio_nr_pages(folio) - 1; nr_scanned += folio_nr_pages(folio) - 1; folio_set_lru(folio); goto isolate_fail_put; } } /* The folio is taken off the LRU */ if (folio_test_large(folio)) low_pfn += folio_nr_pages(folio) - 1; /* Successfully isolated */ lruvec_del_folio(lruvec, folio); node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); isolate_success: list_add(&folio->lru, &cc->migratepages); isolate_success_no_list: cc->nr_migratepages += folio_nr_pages(folio); nr_isolated += folio_nr_pages(folio); nr_scanned += folio_nr_pages(folio) - 1; /* * Avoid isolating too much unless this block is being * fully scanned (e.g. dirty/writeback pages, parallel allocation) * or a lock is contended. For contention, isolate quickly to * potentially remove one source of contention. */ if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && !cc->finish_pageblock && !cc->contended) { ++low_pfn; break; } continue; isolate_fail_put: /* Avoid potential deadlock in freeing page under lru_lock */ if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } folio_put(folio); isolate_fail: if (!skip_on_failure && ret != -ENOMEM) continue; /* * We have isolated some pages, but then failed. Release them * instead of migrating, as we cannot form the cc->order buddy * page anyway. */ if (nr_isolated) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; nr_isolated = 0; } if (low_pfn < next_skip_pfn) { low_pfn = next_skip_pfn - 1; /* * The check near the loop beginning would have updated * next_skip_pfn too, but this is a bit simpler. */ next_skip_pfn += 1UL << cc->order; } if (ret == -ENOMEM) break; } /* * The PageBuddy() check could have potentially brought us outside * the range to be scanned. */ if (unlikely(low_pfn > end_pfn)) low_pfn = end_pfn; folio = NULL; isolate_abort: if (locked) unlock_page_lruvec_irqrestore(locked, flags); if (folio) { folio_set_lru(folio); folio_put(folio); } /* * Update the cached scanner pfn once the pageblock has been scanned. * Pages will either be migrated in which case there is no point * scanning in the near future or migration failed in which case the * failure reason may persist. The block is marked for skipping if * there were no pages isolated in the block or if the block is * rescanned twice in a row. */ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { if (!cc->no_set_skip_hint && valid_page && !skip_updated) set_pageblock_skip(valid_page); update_cached_migrate(cc, low_pfn); } trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, nr_scanned, nr_isolated); fatal_pending: cc->total_migrate_scanned += nr_scanned; if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); cc->migrate_pfn = low_pfn; return ret; } /** * isolate_migratepages_range() - isolate migrate-able pages in a PFN range * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM * in case we could not allocate a page, or 0. */ int isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, block_start_pfn, block_end_pfn; int ret = 0; /* Scan block by block. First and last block may be incomplete */ pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = pageblock_end_pfn(pfn); for (; pfn < end_pfn; pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { block_end_pfn = min(block_end_pfn, end_pfn); if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) continue; ret = isolate_migratepages_block(cc, pfn, block_end_pfn, ISOLATE_UNEVICTABLE); if (ret) break; if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) break; } return ret; } #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION static bool suitable_migration_source(struct compact_control *cc, struct page *page) { int block_mt; if (pageblock_skip_persistent(page)) return false; if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) return true; block_mt = get_pageblock_migratetype(page); if (cc->migratetype == MIGRATE_MOVABLE) return is_migrate_movable(block_mt); else return block_mt == cc->migratetype; } /* Returns true if the page is within a block suitable for migration to */ static bool suitable_migration_target(struct compact_control *cc, struct page *page) { /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { /* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ if (buddy_order_unsafe(page) >= pageblock_order) return false; } if (cc->ignore_block_suitable) return true; /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ if (is_migrate_movable(get_pageblock_migratetype(page))) return true; /* Otherwise skip the block */ return false; } static inline unsigned int freelist_scan_limit(struct compact_control *cc) { unsigned short shift = BITS_PER_LONG - 1; return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; } /* * Test whether the free scanner has reached the same or lower pageblock than * the migration scanner, and compaction should thus terminate. */ static inline bool compact_scanners_met(struct compact_control *cc) { return (cc->free_pfn >> pageblock_order) <= (cc->migrate_pfn >> pageblock_order); } /* * Used when scanning for a suitable migration target which scans freelists * in reverse. Reorders the list such as the unscanned pages are scanned * first on the next iteration of the free scanner */ static void move_freelist_head(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); if (!list_is_last(freelist, &freepage->lru)) { list_cut_before(&sublist, freelist, &freepage->lru); list_splice_tail(&sublist, freelist); } } /* * Similar to move_freelist_head except used by the migration scanner * when scanning forward. It's possible for these list operations to * move against each other if they search the free list exactly in * lockstep. */ static void move_freelist_tail(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); if (!list_is_first(freelist, &freepage->lru)) { list_cut_position(&sublist, freelist, &freepage->lru); list_splice_tail(&sublist, freelist); } } static void fast_isolate_around(struct compact_control *cc, unsigned long pfn) { unsigned long start_pfn, end_pfn; struct page *page; /* Do not search around if there are enough pages already */ if (cc->nr_freepages >= cc->nr_migratepages) return; /* Minimise scanning during async compaction */ if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) return; /* Pageblock boundaries */ start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); if (!page) return; isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (start_pfn == end_pfn && !cc->no_set_skip_hint) set_pageblock_skip(page); } /* Search orders in round-robin fashion */ static int next_search_order(struct compact_control *cc, int order) { order--; if (order < 0) order = cc->order - 1; /* Search wrapped around? */ if (order == cc->search_order) { cc->search_order--; if (cc->search_order < 0) cc->search_order = cc->order - 1; return -1; } return order; } static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); unsigned int nr_scanned = 0, total_isolated = 0; unsigned long low_pfn, min_pfn, highest = 0; unsigned long nr_isolated = 0; unsigned long distance; struct page *page = NULL; bool scan_start = false; int order; /* Full compaction passes in a negative order */ if (cc->order <= 0) return; /* * If starting the scan, use a deeper search and use the highest * PFN found if a suitable one is not found. */ if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { limit = pageblock_nr_pages >> 1; scan_start = true; } /* * Preferred point is in the top quarter of the scan space but take * a pfn from the top half if the search is problematic. */ distance = (cc->free_pfn - cc->migrate_pfn); low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); if (WARN_ON_ONCE(min_pfn > low_pfn)) low_pfn = min_pfn; /* * Search starts from the last successful isolation order or the next * order to search after a previous failure */ cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); for (order = cc->search_order; !page && order >= 0; order = next_search_order(cc, order)) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; struct page *freepage; unsigned long flags; unsigned int order_scanned = 0; unsigned long high_pfn = 0; if (!area->nr_free) continue; spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; list_for_each_entry_reverse(freepage, freelist, buddy_list) { unsigned long pfn; order_scanned++; nr_scanned++; pfn = page_to_pfn(freepage); if (pfn >= highest) highest = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); if (pfn >= low_pfn) { cc->fast_search_fail = 0; cc->search_order = order; page = freepage; break; } if (pfn >= min_pfn && pfn > high_pfn) { high_pfn = pfn; /* Shorten the scan if a candidate is found */ limit >>= 1; } if (order_scanned >= limit) break; } /* Use a maximum candidate pfn if a preferred one was not found */ if (!page && high_pfn) { page = pfn_to_page(high_pfn); /* Update freepage for the list reorder below */ freepage = page; } /* Reorder to so a future search skips recent pages */ move_freelist_head(freelist, freepage); /* Isolate the page if available */ if (page) { if (__isolate_free_page(page, order)) { set_page_private(page, order); nr_isolated = 1 << order; nr_scanned += nr_isolated - 1; total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; list_add_tail(&page->lru, &cc->freepages); count_compact_events(COMPACTISOLATED, nr_isolated); } else { /* If isolation fails, abort the search */ order = cc->search_order + 1; page = NULL; } } spin_unlock_irqrestore(&cc->zone->lock, flags); /* Skip fast search if enough freepages isolated */ if (cc->nr_freepages >= cc->nr_migratepages) break; /* * Smaller scan on next order so the total scan is related * to freelist_scan_limit. */ if (order_scanned >= limit) limit = max(1U, limit >> 1); } trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, nr_scanned, total_isolated); if (!page) { cc->fast_search_fail++; if (scan_start) { /* * Use the highest PFN found above min. If one was * not found, be pessimistic for direct compaction * and use the min mark. */ if (highest >= min_pfn) { page = pfn_to_page(highest); cc->free_pfn = highest; } else { if (cc->direct_compaction && pfn_valid(min_pfn)) { page = pageblock_pfn_to_page(min_pfn, min(pageblock_end_pfn(min_pfn), zone_end_pfn(cc->zone)), cc->zone); cc->free_pfn = min_pfn; } } } } if (highest && highest >= cc->zone->compact_cached_free_pfn) { highest -= pageblock_nr_pages; cc->zone->compact_cached_free_pfn = highest; } cc->total_free_scanned += nr_scanned; if (!page) return; low_pfn = page_to_pfn(page); fast_isolate_around(cc, low_pfn); } /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ static void isolate_freepages(struct compact_control *cc) { struct zone *zone = cc->zone; struct page *page; unsigned long block_start_pfn; /* start of current pageblock */ unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ struct list_head *freelist = &cc->freepages; unsigned int stride; /* Try a small search of the free lists for a candidate */ fast_isolate_freepages(cc); if (cc->nr_freepages) goto splitmap; /* * Initialise the free scanner. The starting point is where we last * successfully isolated from, zone-cached value, or the end of the * zone when isolating for the first time. For looping we also need * this pfn aligned down to the pageblock boundary, because we do * block_start_pfn -= pageblock_nr_pages in the for loop. * For ending point, take care when isolating in last pageblock of a * zone which ends in the middle of a pageblock. * The low boundary is the end of the pageblock the migration scanner * is using. */ isolate_start_pfn = cc->free_pfn; block_start_pfn = pageblock_start_pfn(isolate_start_pfn); block_end_pfn = min(block_start_pfn + pageblock_nr_pages, zone_end_pfn(zone)); low_pfn = pageblock_end_pfn(cc->migrate_pfn); stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; /* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ for (; block_start_pfn >= low_pfn; block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { unsigned long nr_isolated; /* * This can iterate a massively long zone without finding any * suitable migration targets, so periodically check resched. */ if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone); if (!page) { unsigned long next_pfn; next_pfn = skip_offline_sections_reverse(block_start_pfn); if (next_pfn) block_start_pfn = max(next_pfn, low_pfn); continue; } /* Check the block is suitable for migration */ if (!suitable_migration_target(cc, page)) continue; /* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue; /* Found a block suitable for isolating free pages from. */ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, freelist, stride, false); /* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn) update_pageblock_skip(cc, page, block_start_pfn - pageblock_nr_pages); /* Are enough freepages isolated? */ if (cc->nr_freepages >= cc->nr_migratepages) { if (isolate_start_pfn >= block_end_pfn) { /* * Restart at previous pageblock if more * freepages can be isolated next time. */ isolate_start_pfn = block_start_pfn - pageblock_nr_pages; } break; } else if (isolate_start_pfn < block_end_pfn) { /* * If isolation failed early, do not continue * needlessly. */ break; } /* Adjust stride depending on isolation */ if (nr_isolated) { stride = 1; continue; } stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); } /* * Record where the free scanner will restart next time. Either we * broke from the loop and set isolate_start_pfn based on the last * call to isolate_freepages_block(), or we met the migration scanner * and the loop terminated due to isolate_start_pfn < low_pfn */ cc->free_pfn = isolate_start_pfn; splitmap: /* __isolate_free_page() does not map the pages */ split_map_pages(freelist); } /* * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; if (list_empty(&cc->freepages)) { isolate_freepages(cc); if (list_empty(&cc->freepages)) return NULL; } dst = list_entry(cc->freepages.next, struct folio, lru); list_del(&dst->lru); cc->nr_freepages--; return dst; } /* * This is a migrate-callback that "frees" freepages back to the isolated * freelist. All pages on the freelist are from the same zone, so there is no * special handling needed for NUMA. */ static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; list_add(&dst->lru, &cc->freepages); cc->nr_freepages++; } /* possible outcome of isolate_migratepages */ typedef enum { ISOLATE_ABORT, /* Abort compaction now */ ISOLATE_NONE, /* No pages isolated, continue scanning */ ISOLATE_SUCCESS, /* Pages isolated, migrate */ } isolate_migrate_t; /* * Allow userspace to control policy on scanning the unevictable LRU for * compactable pages. */ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; /* * Tunable for proactive compaction. It determines how * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; static inline void update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) { if (cc->fast_start_pfn == ULONG_MAX) return; if (!cc->fast_start_pfn) cc->fast_start_pfn = pfn; cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); } static inline unsigned long reinit_migrate_pfn(struct compact_control *cc) { if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) return cc->migrate_pfn; cc->migrate_pfn = cc->fast_start_pfn; cc->fast_start_pfn = ULONG_MAX; return cc->migrate_pfn; } /* * Briefly search the free lists for a migration source that already has * some free pages to reduce the number of pages that need migration * before a pageblock is free. */ static unsigned long fast_find_migrateblock(struct compact_control *cc) { unsigned int limit = freelist_scan_limit(cc); unsigned int nr_scanned = 0; unsigned long distance; unsigned long pfn = cc->migrate_pfn; unsigned long high_pfn; int order; bool found_block = false; /* Skip hints are relied on to avoid repeats on the fast search */ if (cc->ignore_skip_hint) return pfn; /* * If the pageblock should be finished then do not select a different * pageblock. */ if (cc->finish_pageblock) return pfn; /* * If the migrate_pfn is not at the start of a zone or the start * of a pageblock then assume this is a continuation of a previous * scan restarted due to COMPACT_CLUSTER_MAX. */ if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) return pfn; /* * For smaller orders, just linearly scan as the number of pages * to migrate should be relatively small and does not necessarily * justify freeing up a large block for a small allocation. */ if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) return pfn; /* * Only allow kcompactd and direct requests for movable pages to * quickly clear out a MOVABLE pageblock for allocation. This * reduces the risk that a large movable pageblock is freed for * an unmovable/reclaimable small allocation. */ if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) return pfn; /* * When starting the migration scanner, pick any pageblock within the * first half of the search space. Otherwise try and pick a pageblock * within the first eighth to reduce the chances that a migration * target later becomes a source. */ distance = (cc->free_pfn - cc->migrate_pfn) >> 1; if (cc->migrate_pfn != cc->zone->zone_start_pfn) distance >>= 2; high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); for (order = cc->order - 1; order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; order--) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; unsigned long flags; struct page *freepage; if (!area->nr_free) continue; spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; list_for_each_entry(freepage, freelist, buddy_list) { unsigned long free_pfn; if (nr_scanned++ >= limit) { move_freelist_tail(freelist, freepage); break; } free_pfn = page_to_pfn(freepage); if (free_pfn < high_pfn) { /* * Avoid if skipped recently. Ideally it would * move to the tail but even safe iteration of * the list assumes an entry is deleted, not * reordered. */ if (get_pageblock_skip(freepage)) continue; /* Reorder to so a future search skips recent pages */ move_freelist_tail(freelist, freepage); update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); if (pfn < cc->zone->zone_start_pfn) pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; break; } } spin_unlock_irqrestore(&cc->zone->lock, flags); } cc->total_migrate_scanned += nr_scanned; /* * If fast scanning failed then use a cached entry for a page block * that had free pages as the basis for starting a linear scan. */ if (!found_block) { cc->fast_search_fail++; pfn = reinit_migrate_pfn(cc); } return pfn; } /* * Isolate all pages that can be migrated from the first suitable block, * starting at the block pointed to by the migrate scanner pfn within * compact_control. */ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) { unsigned long block_start_pfn; unsigned long block_end_pfn; unsigned long low_pfn; struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); bool fast_find_block; /* * Start at where we last stopped, or beginning of the zone as * initialized by compact_zone(). The first failure will use * the lowest PFN as the starting point for linear scanning. */ low_pfn = fast_find_migrateblock(cc); block_start_pfn = pageblock_start_pfn(low_pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; /* * fast_find_migrateblock() has already ensured the pageblock is not * set with a skipped flag, so to avoid the isolation_suitable check * below again, check whether the fast search was successful. */ fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; /* Only scan within a pageblock boundary */ block_end_pfn = pageblock_end_pfn(low_pfn); /* * Iterate over whole pageblocks until we find the first suitable. * Do not cross the free scanner. */ for (; block_end_pfn <= cc->free_pfn; fast_find_block = false, cc->migrate_pfn = low_pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { /* * This can potentially iterate a massively long zone with * many pageblocks unsuitable, so periodically check if we * need to schedule. */ if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone); if (!page) { unsigned long next_pfn; next_pfn = skip_offline_sections(block_start_pfn); if (next_pfn) block_end_pfn = min(next_pfn, cc->free_pfn); continue; } /* * If isolation recently failed, do not retry. Only check the * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock * to be visited multiple times. Assume skip was checked * before making it "skip" so other compaction instances do * not scan the same block. */ if ((pageblock_aligned(low_pfn) || low_pfn == cc->zone->zone_start_pfn) && !fast_find_block && !isolation_suitable(cc, page)) continue; /* * For async direct compaction, only scan the pageblocks of the * same migratetype without huge pages. Async direct compaction * is optimistic to see if the minimum amount of work satisfies * the allocation. The cached PFN is updated as it's possible * that all remaining blocks between source and target are * unsuitable and the compaction scanners fail to meet. */ if (!suitable_migration_source(cc, page)) { update_cached_migrate(cc, block_end_pfn); continue; } /* Perform the isolation */ if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, isolate_mode)) return ISOLATE_ABORT; /* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should * continue or not. */ break; } return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } /* * order == -1 is expected when compacting via * /proc/sys/vm/compact_memory */ static inline bool is_via_compact_memory(int order) { return order == -1; } /* * Determine whether kswapd is (or recently was!) running on this node. * * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't * zero it. */ static bool kswapd_is_running(pg_data_t *pgdat) { bool running; pgdat_kswapd_lock(pgdat); running = pgdat->kswapd && task_is_running(pgdat->kswapd); pgdat_kswapd_unlock(pgdat); return running; } /* * A zone's fragmentation score is the external fragmentation wrt to the * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. */ static unsigned int fragmentation_score_zone(struct zone *zone) { return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); } /* * A weighted zone's fragmentation score is the external fragmentation * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It * returns a value in the range [0, 100]. * * The scaling factor ensures that proactive compaction focuses on larger * zones like ZONE_NORMAL, rather than smaller, specialized zones like * ZONE_DMA32. For smaller zones, the score value remains close to zero, * and thus never exceeds the high threshold for proactive compaction. */ static unsigned int fragmentation_score_zone_weighted(struct zone *zone) { unsigned long score; score = zone->present_pages * fragmentation_score_zone(zone); return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); } /* * The per-node proactive (background) compaction process is started by its * corresponding kcompactd thread when the node's fragmentation score * exceeds the high threshold. The compaction process remains active till * the node's score falls below the low threshold, or one of the back-off * conditions is met. */ static unsigned int fragmentation_score_node(pg_data_t *pgdat) { unsigned int score = 0; int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; score += fragmentation_score_zone_weighted(zone); } return score; } static unsigned int fragmentation_score_wmark(bool low) { unsigned int wmark_low; /* * Cap the low watermark to avoid excessive compaction * activity in case a user sets the proactiveness tunable * close to 100 (maximum). */ wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); return low ? wmark_low : min(wmark_low + 10, 100U); } static bool should_proactive_compact_node(pg_data_t *pgdat) { int wmark_high; if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) return false; wmark_high = fragmentation_score_wmark(false); return fragmentation_score_node(pgdat) > wmark_high; } static enum compact_result __compact_finished(struct compact_control *cc) { unsigned int order; const int migratetype = cc->migratetype; int ret; /* Compaction run completes if the migrate and free scanner meet */ if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ reset_cached_positions(cc->zone); /* * Mark that the PG_migrate_skip information should be cleared * by kswapd when it goes to sleep. kcompactd does not set the * flag itself as the decision to be clear should be directly * based on an allocation request. */ if (cc->direct_compaction) cc->zone->compact_blockskip_flush = true; if (cc->whole_zone) return COMPACT_COMPLETE; else return COMPACT_PARTIAL_SKIPPED; } if (cc->proactive_compaction) { int score, wmark_low; pg_data_t *pgdat; pgdat = cc->zone->zone_pgdat; if (kswapd_is_running(pgdat)) return COMPACT_PARTIAL_SKIPPED; score = fragmentation_score_zone(cc->zone); wmark_low = fragmentation_score_wmark(true); if (score > wmark_low) ret = COMPACT_CONTINUE; else ret = COMPACT_SUCCESS; goto out; } if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE; /* * Always finish scanning a pageblock to reduce the possibility of * fallbacks in the future. This is particularly important when * migration source is unmovable/reclaimable but it's not worth * special casing. */ if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order <= MAX_ORDER; order++) { struct free_area *area = &cc->zone->free_area[order]; bool can_steal; /* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) return COMPACT_SUCCESS; #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ if (migratetype == MIGRATE_MOVABLE && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif /* * Job done if allocation would steal freepages from * other migratetype buddy lists. */ if (find_suitable_fallback(area, order, migratetype, true, &can_steal) != -1) /* * Movable pages are OK in any pageblock. If we are * stealing for a non-movable allocation, make sure * we finish compacting the current pageblock first * (which is assured by the above migrate_pfn align * check) so it is as free as possible and we won't * have to steal another one soon. */ return COMPACT_SUCCESS; } out: if (cc->contended || fatal_signal_pending(current)) ret = COMPACT_CONTENDED; return ret; } static enum compact_result compact_finished(struct compact_control *cc) { int ret; ret = __compact_finished(cc); trace_mm_compaction_finished(cc->zone, cc->order, ret); if (ret == COMPACT_NO_SUITABLE_PAGE) ret = COMPACT_CONTINUE; return ret; } static bool __compaction_suitable(struct zone *zone, int order, int highest_zoneidx, unsigned long wmark_target) { unsigned long watermark; /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the * watermark and alloc_flags have to match, or be more pessimistic than * the check in __isolate_free_page(). We don't use the direct * compactor's alloc_flags, as they are not relevant for freepage * isolation. We however do use the direct compactor's highest_zoneidx * to skip over zones where lowmem reserves would prevent allocation * even if compaction succeeds. * For costly orders, we require low watermark instead of min for * compaction to proceed to increase its chances. * ALLOC_CMA is used, as pages in CMA pageblocks are considered * suitable migration targets */ watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, ALLOC_CMA, wmark_target); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? */ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { enum compact_result compact_result; bool suitable; suitable = __compaction_suitable(zone, order, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation * * index of -1000 would imply allocations might succeed depending on * watermarks, but we already failed the high-order watermark check * index towards 0 implies failure is due to lack of memory * index towards 1000 implies failure is due to fragmentation * * Only compact if a failure would be due to fragmentation. Also * ignore fragindex for non-costly orders where the alternative to * a successful reclaim/compaction is OOM. Fragindex and the * vm.extfrag_threshold sysctl is meant as a heuristic to prevent * excessive compaction for costly orders, but it should not be at the * expense of system stability. */ if (suitable) { compact_result = COMPACT_CONTINUE; if (order > PAGE_ALLOC_COSTLY_ORDER) { int fragindex = fragmentation_index(zone, order); if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) { suitable = false; compact_result = COMPACT_NOT_SUITABLE_ZONE; } } } else { compact_result = COMPACT_SKIPPED; } trace_mm_compaction_suitable(zone, order, compact_result); return suitable; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) { struct zone *zone; struct zoneref *z; /* * Make sure at least one zone would pass __compaction_suitable if we continue * retrying the reclaim. */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; /* * Do not consider all the reclaimable memory because we do not * want to trash just for a single high order allocation which * is even not guaranteed to appear even if __compaction_suitable * is happy about the watermark check. */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); if (__compaction_suitable(zone, order, ac->highest_zoneidx, available)) return true; } return false; } static enum compact_result compact_zone(struct compact_control *cc, struct capture_control *capc) { enum compact_result ret; unsigned long start_pfn = cc->zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(cc->zone); unsigned long last_migrated_pfn; const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; unsigned int nr_succeeded = 0; /* * These counters track activities during zone compaction. Initialize * them before compacting a new zone. */ cc->total_migrate_scanned = 0; cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; INIT_LIST_HEAD(&cc->freepages); INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); if (!is_via_compact_memory(cc->order)) { unsigned long watermark; /* Allocation can already succeed, nothing to do */ watermark = wmark_pages(cc->zone, cc->alloc_flags & ALLOC_WMARK_MASK); if (zone_watermark_ok(cc->zone, cc->order, watermark, cc->highest_zoneidx, cc->alloc_flags)) return COMPACT_SUCCESS; /* Compaction is likely to fail */ if (!compaction_suitable(cc->zone, cc->order, cc->highest_zoneidx)) return COMPACT_SKIPPED; } /* * Clear pageblock skip if there were failures recently and compaction * is about to be retried after being deferred. */ if (compaction_restarting(cc->zone, cc->order)) __reset_isolation_suitable(cc->zone); /* * Setup to move all movable pages to the end of the zone. Used cached * information on where the scanners should start (unless we explicitly * want to compact the whole zone), but check that it is initialised * by ensuring the values are within zone boundaries. */ cc->fast_start_pfn = 0; if (cc->whole_zone) { cc->migrate_pfn = start_pfn; cc->free_pfn = pageblock_start_pfn(end_pfn - 1); } else { cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; cc->free_pfn = cc->zone->compact_cached_free_pfn; if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { cc->free_pfn = pageblock_start_pfn(end_pfn - 1); cc->zone->compact_cached_free_pfn = cc->free_pfn; } if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { cc->migrate_pfn = start_pfn; cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) cc->whole_zone = true; } last_migrated_pfn = 0; /* * Migrate has separate cached PFNs for ASYNC and SYNC* migration on * the basis that some migrations will fail in ASYNC mode. However, * if the cached PFNs match and pageblocks are skipped due to having * no isolation candidates, then the sync state does not matter. * Until a pageblock with isolation candidates is found, keep the * cached PFNs in sync to avoid revisiting the same blocks. */ update_cached = !sync && cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); /* lru_add_drain_all could be expensive with involving other CPUs */ lru_add_drain(); while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { int err; unsigned long iteration_start_pfn = cc->migrate_pfn; /* * Avoid multiple rescans of the same pageblock which can * happen if a page cannot be isolated (dirty/writeback in * async mode) or if the migrated pages are being allocated * before the pageblock is cleared. The first rescan will * capture the entire pageblock for migration. If it fails, * it'll be marked skip and scanning will proceed as normal. */ cc->finish_pageblock = false; if (pageblock_start_pfn(last_migrated_pfn) == pageblock_start_pfn(iteration_start_pfn)) { cc->finish_pageblock = true; } rescan: switch (isolate_migratepages(cc)) { case ISOLATE_ABORT: ret = COMPACT_CONTENDED; putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; goto out; case ISOLATE_NONE: if (update_cached) { cc->zone->compact_cached_migrate_pfn[1] = cc->zone->compact_cached_migrate_pfn[0]; } /* * We haven't isolated and migrated anything, but * there might still be unflushed migrations from * previous cc->order aligned block. */ goto check_drain; case ISOLATE_SUCCESS: update_cached = false; last_migrated_pfn = max(cc->zone->zone_start_pfn, pageblock_start_pfn(cc->migrate_pfn - 1)); } err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION, &nr_succeeded); trace_mm_compaction_migratepages(cc, nr_succeeded); /* All pages were either migrated or will be released */ cc->nr_migratepages = 0; if (err) { putback_movable_pages(&cc->migratepages); /* * migrate_pages() may return -ENOMEM when scanners meet * and we want compact_finished() to detect it */ if (err == -ENOMEM && !compact_scanners_met(cc)) { ret = COMPACT_CONTENDED; goto out; } /* * If an ASYNC or SYNC_LIGHT fails to migrate a page * within the pageblock_order-aligned block and * fast_find_migrateblock may be used then scan the * remainder of the pageblock. This will mark the * pageblock "skip" to avoid rescanning in the near * future. This will isolate more pages than necessary * for the request but avoid loops due to * fast_find_migrateblock revisiting blocks that were * recently partially scanned. */ if (!pageblock_aligned(cc->migrate_pfn) && !cc->ignore_skip_hint && !cc->finish_pageblock && (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; /* * Draining pcplists does not help THP if * any page failed to migrate. Even after * drain, the pageblock will not be free. */ if (cc->order == COMPACTION_HPAGE_ORDER) last_migrated_pfn = 0; goto rescan; } } /* Stop if a page has been captured */ if (capc && capc->page) { ret = COMPACT_SUCCESS; break; } check_drain: /* * Has the migration scanner moved away from the previous * cc->order aligned block where we migrated from? If yes, * flush the pages that were freed, so that they can merge and * compact_finished() can detect immediately if allocation * would succeed. */ if (cc->order > 0 && last_migrated_pfn) { unsigned long current_block_start = block_start_pfn(cc->migrate_pfn, cc->order); if (last_migrated_pfn < current_block_start) { lru_add_drain_cpu_zone(cc->zone); /* No more flushing until we migrate again */ last_migrated_pfn = 0; } } } out: /* * Release free pages and update where the free scanner should restart, * so we don't leave any returned pages behind in the next attempt. */ if (cc->nr_freepages > 0) { unsigned long free_pfn = release_freepages(&cc->freepages); cc->nr_freepages = 0; VM_BUG_ON(free_pfn == 0); /* The cached pfn is always the first in a pageblock */ free_pfn = pageblock_start_pfn(free_pfn); /* * Only go back, not forward. The cached pfn might have been * already reset to zone end in compact_finished() */ if (free_pfn > cc->zone->compact_cached_free_pfn) cc->zone->compact_cached_free_pfn = free_pfn; } count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); return ret; } static enum compact_result compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int highest_zoneidx, struct page **capture) { enum compact_result ret; struct compact_control cc = { .order = order, .search_order = order, .gfp_mask = gfp_mask, .zone = zone, .mode = (prio == COMPACT_PRIO_ASYNC) ? MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, .alloc_flags = alloc_flags, .highest_zoneidx = highest_zoneidx, .direct_compaction = true, .whole_zone = (prio == MIN_COMPACT_PRIORITY), .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) }; struct capture_control capc = { .cc = &cc, .page = NULL, }; /* * Make sure the structs are really initialized before we expose the * capture control, in case we are interrupted and the interrupt handler * frees a page. */ barrier(); WRITE_ONCE(current->capture_control, &capc); ret = compact_zone(&cc, &capc); /* * Make sure we hide capture control first before we read the captured * page pointer, otherwise an interrupt could free and capture a page * and we would leak it. */ WRITE_ONCE(current->capture_control, NULL); *capture = READ_ONCE(capc.page); /* * Technically, it is also possible that compaction is skipped but * the page is still captured out of luck(IRQ came and freed the page). * Returning COMPACT_SUCCESS in such cases helps in properly accounting * the COMPACT[STALL|FAIL] when compaction is skipped. */ if (*capture) ret = COMPACT_SUCCESS; return ret; } /** * try_to_compact_pages - Direct compact to satisfy a high-order allocation * @gfp_mask: The GFP mask of the current allocation * @order: The order of the current allocation * @alloc_flags: The allocation flags of the current allocation * @ac: The context of current allocation * @prio: Determines how hard direct compaction should try to succeed * @capture: Pointer to free page created by compaction will be stored here * * This is the main entry point for direct page compaction. */ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) { int may_perform_io = (__force int)(gfp_mask & __GFP_IO); struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; /* * Check if the GFP flags allow compaction - GFP_NOIO is really * tricky context because the migration might require IO */ if (!may_perform_io) return COMPACT_SKIPPED; trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { enum compact_result status; if (prio > MIN_COMPACT_PRIORITY && compaction_deferred(zone, order)) { rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); continue; } status = compact_zone_order(zone, order, gfp_mask, prio, alloc_flags, ac->highest_zoneidx, capture); rc = max(status, rc); /* The allocation should succeed, stop compacting */ if (status == COMPACT_SUCCESS) { /* * We think the allocation will succeed in this zone, * but it is not certain, hence the false. The caller * will repeat this with true if allocation indeed * succeeds in this zone. */ compaction_defer_reset(zone, order, false); break; } if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || status == COMPACT_PARTIAL_SKIPPED)) /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up * succeeding after all, it will be reset. */ defer_compaction(zone, order); /* * We might have stopped compacting due to need_resched() in * async compaction, or due to a fatal signal detected. In that * case do not try further zones */ if ((prio == COMPACT_PRIO_ASYNC && need_resched()) || fatal_signal_pending(current)) break; } return rc; } /* * Compact all zones within a node till each zone's fragmentation score * reaches within proactive compaction thresholds (as determined by the * proactiveness tunable). * * It is possible that the function returns before reaching score targets * due to various back-off conditions, such as, contention on per-node or * per-zone locks. */ static void proactive_compact_node(pg_data_t *pgdat) { int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, .proactive_compaction = true, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; cc.zone = zone; compact_zone(&cc, NULL); count_compact_events(KCOMPACTD_MIGRATE_SCANNED, cc.total_migrate_scanned); count_compact_events(KCOMPACTD_FREE_SCANNED, cc.total_free_scanned); } } /* Compact all zones within a node */ static void compact_node(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; cc.zone = zone; compact_zone(&cc, NULL); } } /* Compact all nodes in the system */ static void compact_nodes(void) { int nid; /* Flush pending updates to the LRU lists */ lru_add_drain_all(); for_each_online_node(nid) compact_node(nid); } static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc, nid; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; if (write && sysctl_compaction_proactiveness) { for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); if (pgdat->proactive_compact_trigger) continue; pgdat->proactive_compact_trigger = true; trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, pgdat->nr_zones - 1); wake_up_interruptible(&pgdat->kcompactd_wait); } } return 0; } /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory */ static int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, length, ppos); if (ret) return ret; if (sysctl_compact_memory != 1) return -EINVAL; if (write) compact_nodes(); return 0; } #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nid = dev->id; if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { /* Flush pending updates to the LRU lists */ lru_add_drain_all(); compact_node(nid); } return count; } static DEVICE_ATTR_WO(compact); int compaction_register_node(struct node *node) { return device_create_file(&node->dev, &dev_attr_compact); } void compaction_unregister_node(struct node *node) { device_remove_file(&node->dev, &dev_attr_compact); } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ static inline bool kcompactd_work_requested(pg_data_t *pgdat) { return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || pgdat->proactive_compact_trigger; } static bool kcompactd_node_suitable(pg_data_t *pgdat) { int zoneid; struct zone *zone; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; /* Allocation can already succeed, check other zones */ if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, min_wmark_pages(zone), highest_zoneidx, 0)) continue; if (compaction_suitable(zone, pgdat->kcompactd_max_order, highest_zoneidx)) return true; } return false; } static void kcompactd_do_work(pg_data_t *pgdat) { /* * With no special task, compact all zones so that a page of requested * order is allocatable. */ int zoneid; struct zone *zone; struct compact_control cc = { .order = pgdat->kcompactd_max_order, .search_order = pgdat->kcompactd_max_order, .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, }; trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, cc.highest_zoneidx); count_compact_event(KCOMPACTD_WAKE); for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { int status; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; if (compaction_deferred(zone, cc.order)) continue; /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, cc.order, min_wmark_pages(zone), zoneid, 0)) continue; if (!compaction_suitable(zone, cc.order, zoneid)) continue; if (kthread_should_stop()) return; cc.zone = zone; status = compact_zone(&cc, NULL); if (status == COMPACT_SUCCESS) { compaction_defer_reset(zone, cc.order, false); } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { /* * Buddy pages may become stranded on pcps that could * otherwise coalesce on the zone's free area for * order >= cc.order. This is ratelimited by the * upcoming deferral. */ drain_all_pages(zone); /* * We use sync migration mode here, so we defer like * sync direct compaction does. */ defer_compaction(zone, cc.order); } count_compact_events(KCOMPACTD_MIGRATE_SCANNED, cc.total_migrate_scanned); count_compact_events(KCOMPACTD_FREE_SCANNED, cc.total_free_scanned); } /* * Regardless of success, we are done until woken up next. But remember * the requested order/highest_zoneidx in case it was higher/tighter * than our current ones */ if (pgdat->kcompactd_max_order <= cc.order) pgdat->kcompactd_max_order = 0; if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; } void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) { if (!order) return; if (pgdat->kcompactd_max_order < order) pgdat->kcompactd_max_order = order; if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) pgdat->kcompactd_highest_zoneidx = highest_zoneidx; /* * Pairs with implicit barrier in wait_event_freezable() * such that wakeups are not missed. */ if (!wq_has_sleeper(&pgdat->kcompactd_wait)) return; if (!kcompactd_node_suitable(pgdat)) return; trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, highest_zoneidx); wake_up_interruptible(&pgdat->kcompactd_wait); } /* * The background compaction daemon, started as a kernel thread * from the init process. */ static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); long timeout = default_timeout; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); set_freezable(); pgdat->kcompactd_max_order = 0; pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; while (!kthread_should_stop()) { unsigned long pflags; /* * Avoid the unnecessary wakeup for proactive compaction * when it is disabled. */ if (!sysctl_compaction_proactiveness) timeout = MAX_SCHEDULE_TIMEOUT; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); if (wait_event_freezable_timeout(pgdat->kcompactd_wait, kcompactd_work_requested(pgdat), timeout) && !pgdat->proactive_compact_trigger) { psi_memstall_enter(&pflags); kcompactd_do_work(pgdat); psi_memstall_leave(&pflags); /* * Reset the timeout value. The defer timeout from * proactive compaction is lost here but that is fine * as the condition of the zone changing substantionally * then carrying on with the previous defer interval is * not useful. */ timeout = default_timeout; continue; } /* * Start the proactive work with default timeout. Based * on the fragmentation score, this timeout is updated. */ timeout = default_timeout; if (should_proactive_compact_node(pgdat)) { unsigned int prev_score, score; prev_score = fragmentation_score_node(pgdat); proactive_compact_node(pgdat); score = fragmentation_score_node(pgdat); /* * Defer proactive compaction if the fragmentation * score did not go down i.e. no progress made. */ if (unlikely(score >= prev_score)) timeout = default_timeout << COMPACT_MAX_DEFER_SHIFT; } if (unlikely(pgdat->proactive_compact_trigger)) pgdat->proactive_compact_trigger = false; } return 0; } /* * This kcompactd start function will be called by init and node-hot-add. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. */ void __meminit kcompactd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); if (pgdat->kcompactd) return; pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); if (IS_ERR(pgdat->kcompactd)) { pr_err("Failed to start kcompactd on node %d\n", nid); pgdat->kcompactd = NULL; } } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ void __meminit kcompactd_stop(int nid) { struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; if (kcompactd) { kthread_stop(kcompactd); NODE_DATA(nid)->kcompactd = NULL; } } /* * It's optimal to keep kcompactd on the same CPUs as their memory, but * not required for correctness. So if the last cpu in a node goes * away, we get changed to run anywhere: as the first one comes back, * restore their cpu bindings. */ static int kcompactd_cpu_online(unsigned int cpu) { int nid; for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ if (pgdat->kcompactd) set_cpus_allowed_ptr(pgdat->kcompactd, mask); } return 0; } static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret, old; if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write) return proc_dointvec_minmax(table, write, buffer, lenp, ppos); old = *(int *)table->data; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) return ret; if (old != *(int *)table->data) pr_warn_once("sysctl attribute %s changed by %s[%d]\n", table->procname, current->comm, task_pid_nr(current)); return ret; } static struct ctl_table vm_compaction[] = { { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "compaction_proactiveness", .data = &sysctl_compaction_proactiveness, .maxlen = sizeof(sysctl_compaction_proactiveness), .mode = 0644, .proc_handler = compaction_proactiveness_sysctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_THOUSAND, }, { .procname = "compact_unevictable_allowed", .data = &sysctl_compact_unevictable_allowed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_warn_RT_change, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { } }; static int __init kcompactd_init(void) { int nid; int ret; ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/compaction:online", kcompactd_cpu_online, NULL); if (ret < 0) { pr_err("kcompactd: failed to register hotplug callbacks.\n"); return ret; } for_each_node_state(nid, N_MEMORY) kcompactd_run(nid); register_sysctl_init("vm", vm_compaction); return 0; } subsys_initcall(kcompactd_init) #endif /* CONFIG_COMPACTION */
linux-master
mm/compaction.c
// SPDX-License-Identifier: GPL-2.0-only /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork() * * Copyright (C) 2008-2009 Red Hat, Inc. * Authors: * Izik Eidus * Andrea Arcangeli * Chris Wright * Hugh Dickins */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/spinlock.h> #include <linux/xxhash.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/memory.h> #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <linux/ksm.h> #include <linux/hashtable.h> #include <linux/freezer.h> #include <linux/oom.h> #include <linux/numa.h> #include <linux/pagewalk.h> #include <asm/tlbflush.h> #include "internal.h" #include "mm_slot.h" #define CREATE_TRACE_POINTS #include <trace/events/ksm.h> #ifdef CONFIG_NUMA #define NUMA(x) (x) #define DO_NUMA(x) do { (x); } while (0) #else #define NUMA(x) (0) #define DO_NUMA(x) do { } while (0) #endif /** * DOC: Overview * * A few notes about the KSM scanning process, * to make it easier to understand the data structures below: * * In order to reduce excessive scanning, KSM sorts the memory pages by their * contents into a data structure that holds pointers to the pages' locations. * * Since the contents of the pages may change at any moment, KSM cannot just * insert the pages into a normal sorted tree and expect it to find anything. * Therefore KSM uses two data structures - the stable and the unstable tree. * * The stable tree holds pointers to all the merged pages (ksm pages), sorted * by their contents. Because each such page is write-protected, searching on * this tree is fully assured to be working (except when pages are unmapped), * and therefore this tree is called the stable tree. * * The stable tree node includes information required for reverse * mapping from a KSM page to virtual addresses that map this page. * * In order to avoid large latencies of the rmap walks on KSM pages, * KSM maintains two types of nodes in the stable tree: * * * the regular nodes that keep the reverse mapping structures in a * linked list * * the "chains" that link nodes ("dups") that represent the same * write protected memory content, but each "dup" corresponds to a * different KSM page copy of that content * * Internally, the regular nodes, "dups" and "chains" are represented * using the same struct ksm_stable_node structure. * * In addition to the stable tree, KSM uses a second data structure called the * unstable tree: this tree holds pointers to pages which have been found to * be "unchanged for a period of time". The unstable tree sorts these pages * by their contents, but since they are not write-protected, KSM cannot rely * upon the unstable tree to work correctly - the unstable tree is liable to * be corrupted as its contents are modified, and so it is called unstable. * * KSM solves this problem by several techniques: * * 1) The unstable tree is flushed every time KSM completes scanning all * memory areas, and then the tree is rebuilt again from the beginning. * 2) KSM will only insert into the unstable tree, pages whose hash value * has not changed since the previous scan of all memory areas. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the * colors of the nodes and not on their contents, assuring that even when * the tree gets "corrupted" it won't get out of balance, so scanning time * remains the same (also, searching and inserting nodes in an rbtree uses * the same algorithm, so we have no overhead when we flush and rebuild). * 4) KSM never flushes the stable tree, which means that even if it were to * take 10 attempts to find a page in the unstable tree, once it is found, * it is secured in the stable tree. (When we scan a new page, we first * compare it against the stable tree, and then against the unstable tree.) * * If the merge_across_nodes tunable is unset, then KSM maintains multiple * stable trees and multiple unstable trees: one of each for each NUMA node. */ /** * struct ksm_mm_slot - ksm information per mm that is being scanned * @slot: hash lookup from mm to mm_slot * @rmap_list: head for this mm_slot's singly-linked list of rmap_items */ struct ksm_mm_slot { struct mm_slot slot; struct ksm_rmap_item *rmap_list; }; /** * struct ksm_scan - cursor for scanning * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * @rmap_list: link to the next rmap to be scanned in the rmap_list * @seqnr: count of completed full scans (needed when removing unstable node) * * There is only the one ksm_scan instance of this cursor structure. */ struct ksm_scan { struct ksm_mm_slot *mm_slot; unsigned long address; struct ksm_rmap_item **rmap_list; unsigned long seqnr; }; /** * struct ksm_stable_node - node of the stable rbtree * @node: rb node of this ksm page in the stable tree * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list * @hlist_dup: linked into the stable_node->hlist with a stable_node chain * @list: linked into migrate_nodes, pending placement in the proper node tree * @hlist: hlist head of rmap_items using this ksm page * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) * @chain_prune_time: time of the last full garbage collection * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN * @nid: NUMA node id of stable tree in which linked (may not match kpfn) */ struct ksm_stable_node { union { struct rb_node node; /* when node of stable tree */ struct { /* when listed for migration */ struct list_head *head; struct { struct hlist_node hlist_dup; struct list_head list; }; }; }; struct hlist_head hlist; union { unsigned long kpfn; unsigned long chain_prune_time; }; /* * STABLE_NODE_CHAIN can be any negative number in * rmap_hlist_len negative range, but better not -1 to be able * to reliably detect underflows. */ #define STABLE_NODE_CHAIN -1024 int rmap_hlist_len; #ifdef CONFIG_NUMA int nid; #endif }; /** * struct ksm_rmap_item - reverse mapping item for virtual addresses * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @nid: NUMA node id of unstable tree in which linked (may not match page) * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address * @node: rb node of this rmap_item in the unstable tree * @head: pointer to stable_node heading this list in the stable tree * @hlist: link into hlist of rmap_items hanging off that stable_node */ struct ksm_rmap_item { struct ksm_rmap_item *rmap_list; union { struct anon_vma *anon_vma; /* when stable */ #ifdef CONFIG_NUMA int nid; /* when node of unstable tree */ #endif }; struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ union { struct rb_node node; /* when node of unstable tree */ struct { /* when listed from stable tree */ struct ksm_stable_node *head; struct hlist_node hlist; }; }; }; #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ /* The stable and unstable tree heads */ static struct rb_root one_stable_tree[1] = { RB_ROOT }; static struct rb_root one_unstable_tree[1] = { RB_ROOT }; static struct rb_root *root_stable_tree = one_stable_tree; static struct rb_root *root_unstable_tree = one_unstable_tree; /* Recently migrated nodes of stable tree, pending proper placement */ static LIST_HEAD(migrate_nodes); #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) #define MM_SLOTS_HASH_BITS 10 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct ksm_mm_slot ksm_mm_head = { .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), }; static struct ksm_scan ksm_scan = { .mm_slot = &ksm_mm_head, }; static struct kmem_cache *rmap_item_cache; static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; /* The number of pages scanned */ static unsigned long ksm_pages_scanned; /* The number of nodes in the stable tree */ static unsigned long ksm_pages_shared; /* The number of page slots additionally sharing those nodes */ static unsigned long ksm_pages_sharing; /* The number of nodes in the unstable tree */ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; /* The number of stable_node chains */ static unsigned long ksm_stable_node_chains; /* The number of stable_node dups linked to the stable_node chains */ static unsigned long ksm_stable_node_dups; /* Delay in pruning stale stable_node_dups in the stable_node_chains */ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; /* Maximum number of page slots sharing a stable node */ static int ksm_max_page_sharing = 256; /* Number of pages ksmd should scan in one batch */ static unsigned int ksm_thread_pages_to_scan = 100; /* Milliseconds ksmd should sleep between batches */ static unsigned int ksm_thread_sleep_millisecs = 20; /* Checksum of an empty (zeroed) page */ static unsigned int zero_checksum __read_mostly; /* Whether to merge empty (zeroed) pages with actual zero pages */ static bool ksm_use_zero_pages __read_mostly; /* The number of zero pages which is placed by KSM */ unsigned long ksm_zero_pages; #ifdef CONFIG_NUMA /* Zeroed when merging across nodes is not allowed */ static unsigned int ksm_merge_across_nodes = 1; static int ksm_nr_node_ids = 1; #else #define ksm_merge_across_nodes 1U #define ksm_nr_node_ids 1 #endif #define KSM_RUN_STOP 0 #define KSM_RUN_MERGE 1 #define KSM_RUN_UNMERGE 2 #define KSM_RUN_OFFLINE 4 static unsigned long ksm_run = KSM_RUN_STOP; static void wait_while_offlining(void); static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ sizeof(struct __struct), __alignof__(struct __struct),\ (__flags), NULL) static int __init ksm_slab_init(void) { rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0); if (!rmap_item_cache) goto out; stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0); if (!stable_node_cache) goto out_free1; mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0); if (!mm_slot_cache) goto out_free2; return 0; out_free2: kmem_cache_destroy(stable_node_cache); out_free1: kmem_cache_destroy(rmap_item_cache); out: return -ENOMEM; } static void __init ksm_slab_free(void) { kmem_cache_destroy(mm_slot_cache); kmem_cache_destroy(stable_node_cache); kmem_cache_destroy(rmap_item_cache); mm_slot_cache = NULL; } static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) { return chain->rmap_hlist_len == STABLE_NODE_CHAIN; } static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) { return dup->head == STABLE_NODE_DUP_HEAD; } static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, struct ksm_stable_node *chain) { VM_BUG_ON(is_stable_node_dup(dup)); dup->head = STABLE_NODE_DUP_HEAD; VM_BUG_ON(!is_stable_node_chain(chain)); hlist_add_head(&dup->hlist_dup, &chain->hlist); ksm_stable_node_dups++; } static inline void __stable_node_dup_del(struct ksm_stable_node *dup) { VM_BUG_ON(!is_stable_node_dup(dup)); hlist_del(&dup->hlist_dup); ksm_stable_node_dups--; } static inline void stable_node_dup_del(struct ksm_stable_node *dup) { VM_BUG_ON(is_stable_node_chain(dup)); if (is_stable_node_dup(dup)) __stable_node_dup_del(dup); else rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); #ifdef CONFIG_DEBUG_VM dup->head = NULL; #endif } static inline struct ksm_rmap_item *alloc_rmap_item(void) { struct ksm_rmap_item *rmap_item; rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (rmap_item) ksm_rmap_items++; return rmap_item; } static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) { ksm_rmap_items--; rmap_item->mm->ksm_rmap_items--; rmap_item->mm = NULL; /* debug safety */ kmem_cache_free(rmap_item_cache, rmap_item); } static inline struct ksm_stable_node *alloc_stable_node(void) { /* * The allocation can take too long with GFP_KERNEL when memory is under * pressure, which may lead to hung task warnings. Adding __GFP_HIGH * grants access to memory reserves, helping to avoid this problem. */ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); } static inline void free_stable_node(struct ksm_stable_node *stable_node) { VM_BUG_ON(stable_node->rmap_hlist_len && !is_stable_node_chain(stable_node)); kmem_cache_free(stable_node_cache, stable_node); } /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, * takes mmap_lock briefly to serialize against them. ksm_exit() does not set * a special flag: they can just back out as soon as mm_users goes to zero. * ksm_test_exit() is used throughout to make this test for exit: in some * places for correctness, in some places just to avoid unnecessary work. */ static inline bool ksm_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { struct page *page = NULL; spinlock_t *ptl; pte_t *pte; pte_t ptent; int ret; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte) return 0; ptent = ptep_get(pte); if (pte_present(ptent)) { page = vm_normal_page(walk->vma, addr, ptent); } else if (!pte_none(ptent)) { swp_entry_t entry = pte_to_swp_entry(ptent); /* * As KSM pages remain KSM pages until freed, no need to wait * here for migration to end. */ if (is_migration_entry(entry)) page = pfn_swap_entry_to_page(entry); } /* return 1 if the page is an normal ksm page or KSM-placed zero page */ ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte); pte_unmap_unlock(pte, ptl); return ret; } static const struct mm_walk_ops break_ksm_ops = { .pmd_entry = break_ksm_pmd_entry, .walk_lock = PGWALK_RDLOCK, }; static const struct mm_walk_ops break_ksm_lock_vma_ops = { .pmd_entry = break_ksm_pmd_entry, .walk_lock = PGWALK_WRLOCK, }; /* * We use break_ksm to break COW on a ksm page by triggering unsharing, * such that the ksm page will get replaced by an exclusive anonymous page. * * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP * mmap of /dev/mem, where we would not want to touch it. * * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context * of the process that owns 'vma'. We also do not want to enforce * protection keys here anyway. */ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) { vm_fault_t ret = 0; const struct mm_walk_ops *ops = lock_vma ? &break_ksm_lock_vma_ops : &break_ksm_ops; do { int ksm_page; cond_resched(); ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL); if (WARN_ON_ONCE(ksm_page < 0)) return ksm_page; if (!ksm_page) return 0; ret = handle_mm_fault(vma, addr, FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, NULL); } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); /* * We must loop until we no longer find a KSM page because * handle_mm_fault() may back out if there's any difficulty e.g. if * pte accessed bit gets updated concurrently. * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's * okay, that truncation will have unmapped the PageKsm for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the * current task has TIF_MEMDIE set, and will be OOM killed on return * to user; and ksmd, having no mm, would never be chosen for that. * * But if the mm is in a limited mem_cgroup, then the fault may fail * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and * even ksmd can fail in this way - though it's usually breaking ksm * just to undo a merge it made a moment before, so unlikely to oom. * * That's a pity: we might therefore have more kernel pages allocated * than we're counting as nodes in the stable tree; but ksm_do_scan * will retry to break_cow on each pass, so should recover the page * in due course. The important thing is to not let VM_MERGEABLE * be cleared while any such pages might remain in the area. */ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } static bool vma_ksm_compatible(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_HUGETLB | VM_MIXEDMAP)) return false; /* just ignore the advice */ if (vma_is_dax(vma)) return false; #ifdef VM_SAO if (vma->vm_flags & VM_SAO) return false; #endif #ifdef VM_SPARC_ADI if (vma->vm_flags & VM_SPARC_ADI) return false; #endif return true; } static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; if (ksm_test_exit(mm)) return NULL; vma = vma_lookup(mm, addr); if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) return NULL; return vma; } static void break_cow(struct ksm_rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; /* * It is not an accident that whenever we want to break COW * to undo, we also need to drop a reference to the anon_vma. */ put_anon_vma(rmap_item->anon_vma); mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (vma) break_ksm(vma, addr, false); mmap_read_unlock(mm); } static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; struct page *page; mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (!vma) goto out; page = follow_page(vma, addr, FOLL_GET); if (IS_ERR_OR_NULL(page)) goto out; if (is_zone_device_page(page)) goto out_putpage; if (PageAnon(page)) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } else { out_putpage: put_page(page); out: page = NULL; } mmap_read_unlock(mm); return page; } /* * This helper is used for getting right index into array of tree roots. * When merge_across_nodes knob is set to 1, there are only two rb-trees for * stable and unstable pages from all nodes with roots in index 0. Otherwise, * every node has its own stable and unstable tree. */ static inline int get_kpfn_nid(unsigned long kpfn) { return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); } static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, struct rb_root *root) { struct ksm_stable_node *chain = alloc_stable_node(); VM_BUG_ON(is_stable_node_chain(dup)); if (likely(chain)) { INIT_HLIST_HEAD(&chain->hlist); chain->chain_prune_time = jiffies; chain->rmap_hlist_len = STABLE_NODE_CHAIN; #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) chain->nid = NUMA_NO_NODE; /* debug */ #endif ksm_stable_node_chains++; /* * Put the stable node chain in the first dimension of * the stable tree and at the same time remove the old * stable node. */ rb_replace_node(&dup->node, &chain->node, root); /* * Move the old stable node to the second dimension * queued in the hlist_dup. The invariant is that all * dup stable_nodes in the chain->hlist point to pages * that are write protected and have the exact same * content. */ stable_node_chain_add_dup(dup, chain); } return chain; } static inline void free_stable_node_chain(struct ksm_stable_node *chain, struct rb_root *root) { rb_erase(&chain->node, root); free_stable_node(chain); ksm_stable_node_chains--; } static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) { struct ksm_rmap_item *rmap_item; /* check it's not STABLE_NODE_CHAIN or negative */ BUG_ON(stable_node->rmap_hlist_len < 0); hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { if (rmap_item->hlist.next) { ksm_pages_sharing--; trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); } else { ksm_pages_shared--; } rmap_item->mm->ksm_merging_pages--; VM_BUG_ON(stable_node->rmap_hlist_len <= 0); stable_node->rmap_hlist_len--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; cond_resched(); } /* * We need the second aligned pointer of the migrate_nodes * list_head to stay clear from the rb_parent_color union * (aligned and different than any node) and also different * from &migrate_nodes. This will verify that future list.h changes * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. */ BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); trace_ksm_remove_ksm_page(stable_node->kpfn); if (stable_node->head == &migrate_nodes) list_del(&stable_node->list); else stable_node_dup_del(stable_node); free_stable_node(stable_node); } enum get_ksm_page_flags { GET_KSM_PAGE_NOLOCK, GET_KSM_PAGE_LOCK, GET_KSM_PAGE_TRYLOCK }; /* * get_ksm_page: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, * remove the stale node from the stable tree and return NULL. * But beware, the stable node's page might be being migrated. * * You would expect the stable_node to hold a reference to the ksm page. * But if it increments the page's count, swapping out has to wait for * ksmd to come around again before it can free the page, which may take * seconds or even minutes: much too unresponsive. So instead we use a * "keyhole reference": access to the ksm page from the stable node peeps * out through its keyhole to see if that page still holds the right key, * pointing back to this stable node. This relies on freeing a PageAnon * page to reset its page->mapping to NULL, and relies on no other use of * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, enum get_ksm_page_flags flags) { struct page *page; void *expected_mapping; unsigned long kpfn; expected_mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ page = pfn_to_page(kpfn); if (READ_ONCE(page->mapping) != expected_mapping) goto stale; /* * We cannot do anything with the page while its refcount is 0. * Usually 0 means free, or tail of a higher-order page: in which * case this node is no longer referenced, and should be freed; * however, it might mean that the page is under page_ref_freeze(). * The __remove_mapping() case is easy, again the node is now stale; * the same is in reuse_ksm_page() case; but if page is swapcache * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ while (!get_page_unless_zero(page)) { /* * Another check for page->mapping != expected_mapping would * work here too. We have chosen the !PageSwapCache test to * optimize the common case, when the page is or is about to * be freed: PageSwapCache is cleared (under spin_lock_irq) * in the ref_freeze section of __remove_mapping(); but Anon * page->mapping reset to NULL later, in free_pages_prepare(). */ if (!PageSwapCache(page)) goto stale; cpu_relax(); } if (READ_ONCE(page->mapping) != expected_mapping) { put_page(page); goto stale; } if (flags == GET_KSM_PAGE_TRYLOCK) { if (!trylock_page(page)) { put_page(page); return ERR_PTR(-EBUSY); } } else if (flags == GET_KSM_PAGE_LOCK) lock_page(page); if (flags != GET_KSM_PAGE_NOLOCK) { if (READ_ONCE(page->mapping) != expected_mapping) { unlock_page(page); put_page(page); goto stale; } } return page; stale: /* * We come here from above when page->mapping or !PageSwapCache * suggests that the node is stale; but it might be under migration. * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), * before checking whether node->kpfn has been changed. */ smp_rmb(); if (READ_ONCE(stable_node->kpfn) != kpfn) goto again; remove_node_from_stable_tree(stable_node); return NULL; } /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. */ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct ksm_stable_node *stable_node; struct page *page; stable_node = rmap_item->head; page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); if (!page) goto out; hlist_del(&rmap_item->hlist); unlock_page(page); put_page(page); if (!hlist_empty(&stable_node->hlist)) ksm_pages_sharing--; else ksm_pages_shared--; rmap_item->mm->ksm_merging_pages--; VM_BUG_ON(stable_node->rmap_hlist_len <= 0); stable_node->rmap_hlist_len--; put_anon_vma(rmap_item->anon_vma); rmap_item->head = NULL; rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { unsigned char age; /* * Usually ksmd can and must skip the rb_erase, because * root_unstable_tree was already reset to RB_ROOT. * But be careful when an mm is exiting: do the rb_erase * if this rmap_item was inserted by this scan, rather * than left over from before. */ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); BUG_ON(age > 1); if (!age) rb_erase(&rmap_item->node, root_unstable_tree + NUMA(rmap_item->nid)); ksm_pages_unshared--; rmap_item->address &= PAGE_MASK; } out: cond_resched(); /* we're called from many long loops */ } static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) { while (*rmap_list) { struct ksm_rmap_item *rmap_item = *rmap_list; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } } /* * Though it's very tempting to unmerge rmap_items from stable tree rather * than check every pte of a given vma, the locking doesn't quite work for * that - an rmap_item is assigned to the stable tree after inserting ksm * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing * rmap_items from parent to child at fork time (so as not to waste time * if exit comes before the next scan reaches it). * * Similarly, although we'd like to remove rmap_items (so updating counts * and freeing memory) when unmerging an area, it's easier to leave that * to the next pass of ksmd - consider, for example, how ksmd might be * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool lock_vma) { unsigned long addr; int err = 0; for (addr = start; addr < end && !err; addr += PAGE_SIZE) { if (ksm_test_exit(vma->vm_mm)) break; if (signal_pending(current)) err = -ERESTARTSYS; else err = break_ksm(vma, addr, lock_vma); } return err; } static inline struct ksm_stable_node *folio_stable_node(struct folio *folio) { return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; } static inline struct ksm_stable_node *page_stable_node(struct page *page) { return folio_stable_node(page_folio(page)); } static inline void set_page_stable_node(struct page *page, struct ksm_stable_node *stable_node) { VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page); page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } #ifdef CONFIG_SYSFS /* * Only called through the sysfs control interface: */ static int remove_stable_node(struct ksm_stable_node *stable_node) { struct page *page; int err; page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); if (!page) { /* * get_ksm_page did remove_node_from_stable_tree itself. */ return 0; } /* * Page could be still mapped if this races with __mmput() running in * between ksm_exit() and exit_mmap(). Just refuse to let * merge_across_nodes/max_page_sharing be switched. */ err = -EBUSY; if (!page_mapped(page)) { /* * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized * right up until it is freed; but the node is safe to remove. * This page might be in an LRU cache waiting to be freed, * or it might be PageSwapCache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ set_page_stable_node(page, NULL); remove_node_from_stable_tree(stable_node); err = 0; } unlock_page(page); put_page(page); return err; } static int remove_stable_node_chain(struct ksm_stable_node *stable_node, struct rb_root *root) { struct ksm_stable_node *dup; struct hlist_node *hlist_safe; if (!is_stable_node_chain(stable_node)) { VM_BUG_ON(is_stable_node_dup(stable_node)); if (remove_stable_node(stable_node)) return true; else return false; } hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { VM_BUG_ON(!is_stable_node_dup(dup)); if (remove_stable_node(dup)) return true; } BUG_ON(!hlist_empty(&stable_node->hlist)); free_stable_node_chain(stable_node, root); return false; } static int remove_all_stable_nodes(void) { struct ksm_stable_node *stable_node, *next; int nid; int err = 0; for (nid = 0; nid < ksm_nr_node_ids; nid++) { while (root_stable_tree[nid].rb_node) { stable_node = rb_entry(root_stable_tree[nid].rb_node, struct ksm_stable_node, node); if (remove_stable_node_chain(stable_node, root_stable_tree + nid)) { err = -EBUSY; break; /* proceed to next nid */ } cond_resched(); } } list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { if (remove_stable_node(stable_node)) err = -EBUSY; cond_resched(); } return err; } static int unmerge_and_remove_all_rmap_items(void) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; struct mm_struct *mm; struct vm_area_struct *vma; int err = 0; spin_lock(&ksm_mmlist_lock); slot = list_entry(ksm_mm_head.slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); spin_unlock(&ksm_mmlist_lock); for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); mm = mm_slot->slot.mm; mmap_read_lock(mm); /* * Exit right away if mm is exiting to avoid lockdep issue in * the maple tree */ if (ksm_test_exit(mm)) goto mm_exiting; for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, false); if (err) goto error; } mm_exiting: remove_trailing_rmap_items(&mm_slot->rmap_list); mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (ksm_test_exit(mm)) { hash_del(&mm_slot->slot.hash); list_del(&mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmdrop(mm); } else spin_unlock(&ksm_mmlist_lock); } /* Clean up stable nodes, but don't worry if some are still busy */ remove_all_stable_nodes(); ksm_scan.seqnr = 0; return 0; error: mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; spin_unlock(&ksm_mmlist_lock); return err; } #endif /* CONFIG_SYSFS */ static u32 calc_checksum(struct page *page) { u32 checksum; void *addr = kmap_atomic(page); checksum = xxhash(addr, PAGE_SIZE, 0); kunmap_atomic(addr); return checksum; } static int write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; pvmw.address = page_address_in_vma(page, vma); if (pvmw.address == -EFAULT) goto out; BUG_ON(PageTransCompound(page)); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); if (!page_vma_mapped_walk(&pvmw)) goto out_mn; if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; anon_exclusive = PageAnonExclusive(page); entry = ptep_get(pvmw.pte); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { swapped = PageSwapCache(page); flush_cache_page(vma, pvmw.address, page_to_pfn(page)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make * with the pagecount against the mapcount is racy and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check * or in the middle of the check. * * No need to notify as we are downgrading page table to read * only not changing it to point to a new page. * * See Documentation/mm/mmu_notifier.rst */ entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); /* * Check that no O_DIRECT or similar I/O is in progress on the * page */ if (page_mapcount(page) + 1 + swapped != page_count(page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } /* See page_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } if (pte_dirty(entry)) set_page_dirty(page); entry = pte_mkclean(entry); if (pte_write(entry)) entry = pte_wrprotect(entry); set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = entry; err = 0; out_unlock: page_vma_mapped_walk_done(&pvmw); out_mn: mmu_notifier_invalidate_range_end(&range); out: return err; } /** * replace_page - replace page in vma by new ksm page * @vma: vma that holds the pte pointing to page * @page: the page we are replacing by kpage * @kpage: the ksm page we replace page by * @orig_pte: the original value of the pte * * Returns 0 on success, -EFAULT on failure. */ static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { struct mm_struct *mm = vma->vm_mm; struct folio *folio; pmd_t *pmd; pmd_t pmde; pte_t *ptep; pte_t newpte; spinlock_t *ptl; unsigned long addr; int err = -EFAULT; struct mmu_notifier_range range; addr = page_address_in_vma(page, vma); if (addr == -EFAULT) goto out; pmd = mm_find_pmd(mm, addr); if (!pmd) goto out; /* * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() * without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ pmde = pmdp_get_lockless(pmd); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) goto out; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) goto out_mn; if (!pte_same(ptep_get(ptep), orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; } VM_BUG_ON_PAGE(PageAnonExclusive(page), page); VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); /* * No need to check ksm_use_zero_pages here: we can only have a * zero_page here if ksm_use_zero_pages was enabled already. */ if (!is_zero_pfn(page_to_pfn(kpage))) { get_page(kpage); page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { /* * Use pte_mkdirty to mark the zero page mapped by KSM, and then * we can easily track all KSM-placed zero pages by checking if * the dirty bit in zero page's PTE is set. */ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); ksm_zero_pages++; mm->ksm_zero_pages++; /* * We're replacing an anonymous page with a zero page, which is * not anonymous. We need to do proper accounting otherwise we * will get wrong values in /proc, and a BUG message in dmesg * when tearing down the mm. */ dec_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); /* * No need to notify as we are replacing a read only page with another * read only page with the same content. * * See Documentation/mm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, newpte); folio = page_folio(page); page_remove_rmap(page, vma, false); if (!folio_mapped(folio)) folio_free_swap(folio); folio_put(folio); pte_unmap_unlock(ptep, ptl); err = 0; out_mn: mmu_notifier_invalidate_range_end(&range); out: return err; } /* * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage * @kpage: the PageKsm page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; if (!PageAnon(page)) goto out; /* * We need the page lock to read a stable PageSwapCache in * write_protect_page(). We use trylock_page() instead of * lock_page() because we don't want to wait here - we * prefer to continue scanning and merging different pages, * then come back to this page when it is unlocked. */ if (!trylock_page(page)) goto out; if (PageTransCompound(page)) { if (split_huge_page(page)) goto out_unlock; } /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ if (write_protect_page(vma, page, &orig_pte) == 0) { if (!kpage) { /* * While we hold page lock, upgrade page from * PageAnon+anon_vma to PageKsm+NULL stable_node: * stable_tree_insert() will update stable_node. */ set_page_stable_node(page, NULL); mark_page_accessed(page); /* * Page reclaim just frees a clean page with no dirty * ptes: make sure that the ksm page would be swapped. */ if (!PageDirty(page)) SetPageDirty(page); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } out_unlock: unlock_page(page); out: return err; } /* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, struct page *page, struct page *kpage) { struct mm_struct *mm = rmap_item->mm; struct vm_area_struct *vma; int err = -EFAULT; mmap_read_lock(mm); vma = find_mergeable_vma(mm, rmap_item->address); if (!vma) goto out; err = try_to_merge_one_page(vma, page, kpage); if (err) goto out; /* Unstable nid is in union with stable anon_vma: remove first */ remove_rmap_item_from_tree(rmap_item); /* Must get reference to anon_vma while still holding mmap_lock */ rmap_item->anon_vma = vma->anon_vma; get_anon_vma(vma->anon_vma); out: mmap_read_unlock(mm); trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), rmap_item, mm, err); return err; } /* * try_to_merge_two_pages - take two identical pages and prepare them * to be merged into one page. * * This function returns the kpage if we successfully merged two identical * pages into one ksm page, NULL otherwise. * * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, struct page *page, struct ksm_rmap_item *tree_rmap_item, struct page *tree_page) { int err; err = try_to_merge_with_ksm_page(rmap_item, page, NULL); if (!err) { err = try_to_merge_with_ksm_page(tree_rmap_item, tree_page, page); /* * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ if (err) break_cow(rmap_item); } return err ? NULL : page; } static __always_inline bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) { VM_BUG_ON(stable_node->rmap_hlist_len < 0); /* * Check that at least one mapping still exists, otherwise * there's no much point to merge and share with this * stable_node, as the underlying tree_page of the other * sharer is going to be freed soon. */ return stable_node->rmap_hlist_len && stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; } static __always_inline bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) { return __is_page_sharing_candidate(stable_node, 0); } static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup, struct ksm_stable_node **_stable_node, struct rb_root *root, bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct hlist_node *hlist_safe; struct page *_tree_page, *tree_page = NULL; int nr = 0; int found_rmap_hlist_len; if (!prune_stale_stable_nodes || time_before(jiffies, stable_node->chain_prune_time + msecs_to_jiffies( ksm_stable_node_chains_prune_millisecs))) prune_stale_stable_nodes = false; else stable_node->chain_prune_time = jiffies; hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { cond_resched(); /* * We must walk all stable_node_dup to prune the stale * stable nodes during lookup. * * get_ksm_page can drop the nodes from the * stable_node->hlist if they point to freed pages * (that's why we do a _safe walk). The "dup" * stable_node parameter itself will be freed from * under us if it returns NULL. */ _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); if (!_tree_page) continue; nr += 1; if (is_page_sharing_candidate(dup)) { if (!found || dup->rmap_hlist_len > found_rmap_hlist_len) { if (found) put_page(tree_page); found = dup; found_rmap_hlist_len = found->rmap_hlist_len; tree_page = _tree_page; /* skip put_page for found dup */ if (!prune_stale_stable_nodes) break; continue; } } put_page(_tree_page); } if (found) { /* * nr is counting all dups in the chain only if * prune_stale_stable_nodes is true, otherwise we may * break the loop at nr == 1 even if there are * multiple entries. */ if (prune_stale_stable_nodes && nr == 1) { /* * If there's not just one entry it would * corrupt memory, better BUG_ON. In KSM * context with no lock held it's not even * fatal. */ BUG_ON(stable_node->hlist.first->next); /* * There's just one entry and it is below the * deduplication limit so drop the chain. */ rb_replace_node(&stable_node->node, &found->node, root); free_stable_node(stable_node); ksm_stable_node_chains--; ksm_stable_node_dups--; /* * NOTE: the caller depends on the stable_node * to be equal to stable_node_dup if the chain * was collapsed. */ *_stable_node = found; /* * Just for robustness, as stable_node is * otherwise left as a stable pointer, the * compiler shall optimize it away at build * time. */ stable_node = NULL; } else if (stable_node->hlist.first != &found->hlist_dup && __is_page_sharing_candidate(found, 1)) { /* * If the found stable_node dup can accept one * more future merge (in addition to the one * that is underway) and is not at the head of * the chain, put it there so next search will * be quicker in the !prune_stale_stable_nodes * case. * * NOTE: it would be inaccurate to use nr > 1 * instead of checking the hlist.first pointer * directly, because in the * prune_stale_stable_nodes case "nr" isn't * the position of the found dup in the chain, * but the total number of dups in the chain. */ hlist_del(&found->hlist_dup); hlist_add_head(&found->hlist_dup, &stable_node->hlist); } } *_stable_node_dup = found; return tree_page; } static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, struct rb_root *root) { if (!is_stable_node_chain(stable_node)) return stable_node; if (hlist_empty(&stable_node->hlist)) { free_stable_node_chain(stable_node, root); return NULL; } return hlist_entry(stable_node->hlist.first, typeof(*stable_node), hlist_dup); } /* * Like for get_ksm_page, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found * stable_node_dup if the chain is collapsed (in which case * *_stable_node will be equal to *_stable_node_dup like if the chain * never existed). It's up to the caller to verify tree_page is not * NULL before dereferencing *_stable_node or *_stable_node_dup. * * *_stable_node_dup is really a second output parameter of this * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, struct ksm_stable_node **_stable_node, struct rb_root *root, bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node = *_stable_node; if (!is_stable_node_chain(stable_node)) { if (is_page_sharing_candidate(stable_node)) { *_stable_node_dup = stable_node; return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); } /* * _stable_node_dup set to NULL means the stable_node * reached the ksm_max_page_sharing limit. */ *_stable_node_dup = NULL; return NULL; } return stable_node_dup(_stable_node_dup, _stable_node, root, prune_stale_stable_nodes); } static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d, struct ksm_stable_node **s_n, struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, struct ksm_stable_node *s_n, struct rb_root *root) { struct ksm_stable_node *old_stable_node = s_n; struct page *tree_page; tree_page = __stable_node_chain(s_n_d, &s_n, root, false); /* not pruning dups so s_n cannot have changed */ VM_BUG_ON(s_n != old_stable_node); return tree_page; } /* * stable_tree_search - search for page inside the stable tree * * This function checks if there is a page inside the stable tree * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, * NULL otherwise. */ static struct page *stable_tree_search(struct page *page) { int nid; struct rb_root *root; struct rb_node **new; struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; struct ksm_stable_node *page_node; page_node = page_stable_node(page); if (page_node && page_node->head != &migrate_nodes) { /* ksm page forked */ get_page(page); return page; } nid = get_kpfn_nid(page_to_pfn(page)); root = root_stable_tree + nid; again: new = &root->rb_node; parent = NULL; while (*new) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); stable_node_any = NULL; tree_page = chain_prune(&stable_node_dup, &stable_node, root); /* * NOTE: stable_node may have been freed by * chain_prune() if the returned stable_node_dup is * not NULL. stable_node_dup may have been inserted in * the rbtree instead as a regular stable_node (in * order to collapse the stable_node chain if a single * stable_node dup was found in it). In such case the * stable_node is overwritten by the callee to point * to the stable_node_dup that was collapsed in the * stable rbtree and stable_node will be equal to * stable_node_dup like if the chain never existed. */ if (!stable_node_dup) { /* * Either all stable_node dups were full in * this stable_node chain, or this chain was * empty and should be rb_erased. */ stable_node_any = stable_node_dup_any(stable_node, root); if (!stable_node_any) { /* rb_erase just run */ goto again; } /* * Take any of the stable_node dups page of * this stable_node chain to let the tree walk * continue. All KSM pages belonging to the * stable_node dups in a stable_node chain * have the same content and they're * write protected at all times. Any will work * fine to continue the walk. */ tree_page = get_ksm_page(stable_node_any, GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); if (!tree_page) { /* * If we walked over a stale stable_node, * get_ksm_page() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate * false negative insertions just because some * stable_node was stale. */ goto again; } ret = memcmp_pages(page, tree_page); put_page(tree_page); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); /* * Test if the migrated page should be merged * into a stable node dup. If the mapcount is * 1 we can migrate it with another KSM page * without adding it to the chain. */ if (page_mapcount(page) > 1) goto chain_append; } if (!stable_node_dup) { /* * If the stable_node is a chain and * we got a payload match in memcmp * but we cannot merge the scanned * page in any of the existing * stable_node dups because they're * all full, we need to wait the * scanned page to find itself a match * in the unstable tree to create a * brand new KSM page to add later to * the dups of this stable_node. */ return NULL; } /* * Lock and unlock the stable_node's page (which * might already have been migrated) so that page * migration is sure to notice its raised count. * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ tree_page = get_ksm_page(stable_node_dup, GET_KSM_PAGE_TRYLOCK); if (PTR_ERR(tree_page) == -EBUSY) return ERR_PTR(-EBUSY); if (unlikely(!tree_page)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; unlock_page(tree_page); if (get_kpfn_nid(stable_node_dup->kpfn) != NUMA(stable_node_dup->nid)) { put_page(tree_page); goto replace; } return tree_page; } } if (!page_node) return NULL; list_del(&page_node->list); DO_NUMA(page_node->nid = nid); rb_link_node(&page_node->node, parent, new); rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { get_page(page); return page; } else return NULL; replace: /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular * stable_node. A collapse of the chain is indistinguishable * from the case there was no chain in the stable * rbtree. Otherwise stable_node is the chain and * stable_node_dup is the dup to replace. */ if (stable_node_dup == stable_node) { VM_BUG_ON(is_stable_node_chain(stable_node_dup)); VM_BUG_ON(is_stable_node_dup(stable_node_dup)); /* there is no chain */ if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); rb_replace_node(&stable_node_dup->node, &page_node->node, root); if (is_page_sharing_candidate(page_node)) get_page(page); else page = NULL; } else { rb_erase(&stable_node_dup->node, root); page = NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); __stable_node_dup_del(stable_node_dup); if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) get_page(page); else page = NULL; } else { page = NULL; } } stable_node_dup->head = &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); return page; chain_append: /* stable_node_dup could be null if it reached the limit */ if (!stable_node_dup) stable_node_dup = stable_node_any; /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular * stable_node. A collapse of the chain is indistinguishable * from the case there was no chain in the stable * rbtree. Otherwise stable_node is the chain and * stable_node_dup is the dup to replace. */ if (stable_node_dup == stable_node) { VM_BUG_ON(is_stable_node_dup(stable_node_dup)); /* chain is missing so create it */ stable_node = alloc_stable_node_chain(stable_node_dup, root); if (!stable_node) return NULL; } /* * Add this stable_node dup that was * migrated to the stable_node chain * of the current nid for this page * content. */ VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); goto out; } /* * stable_tree_insert - insert stable tree node pointing to new ksm page * into the stable tree. * * This function returns the stable tree node just allocated on success, * NULL otherwise. */ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) { int nid; unsigned long kpfn; struct rb_root *root; struct rb_node **new; struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; bool need_chain = false; kpfn = page_to_pfn(kpage); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: parent = NULL; new = &root->rb_node; while (*new) { struct page *tree_page; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); stable_node_any = NULL; tree_page = chain(&stable_node_dup, stable_node, root); if (!stable_node_dup) { /* * Either all stable_node dups were full in * this stable_node chain, or this chain was * empty and should be rb_erased. */ stable_node_any = stable_node_dup_any(stable_node, root); if (!stable_node_any) { /* rb_erase just run */ goto again; } /* * Take any of the stable_node dups page of * this stable_node chain to let the tree walk * continue. All KSM pages belonging to the * stable_node dups in a stable_node chain * have the same content and they're * write protected at all times. Any will work * fine to continue the walk. */ tree_page = get_ksm_page(stable_node_any, GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); if (!tree_page) { /* * If we walked over a stale stable_node, * get_ksm_page() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate * false negative insertions just because some * stable_node was stale. */ goto again; } ret = memcmp_pages(kpage, tree_page); put_page(tree_page); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { need_chain = true; break; } } stable_node_dup = alloc_stable_node(); if (!stable_node_dup) return NULL; INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; set_page_stable_node(kpage, stable_node_dup); stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { rb_link_node(&stable_node_dup->node, parent, new); rb_insert_color(&stable_node_dup->node, root); } else { if (!is_stable_node_chain(stable_node)) { struct ksm_stable_node *orig = stable_node; /* chain is missing so create it */ stable_node = alloc_stable_node_chain(orig, root); if (!stable_node) { free_stable_node(stable_node_dup); return NULL; } } stable_node_chain_add_dup(stable_node_dup, stable_node); } return stable_node_dup; } /* * unstable_tree_search_insert - search for identical page, * else insert rmap_item into the unstable tree. * * This function searches for a page in the unstable tree identical to the * page currently being scanned; and if no identical page is found in the * tree, we insert rmap_item as a new object into the unstable tree. * * This function returns pointer to rmap_item found to be identical * to the currently scanned page, NULL otherwise. * * This function does both searching and inserting, because they share * the same walking algorithm in an rbtree. */ static struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, struct page *page, struct page **tree_pagep) { struct rb_node **new; struct rb_root *root; struct rb_node *parent = NULL; int nid; nid = get_kpfn_nid(page_to_pfn(page)); root = root_unstable_tree + nid; new = &root->rb_node; while (*new) { struct ksm_rmap_item *tree_rmap_item; struct page *tree_page; int ret; cond_resched(); tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); tree_page = get_mergeable_page(tree_rmap_item); if (!tree_page) return NULL; /* * Don't substitute a ksm page for a forked page. */ if (page == tree_page) { put_page(tree_page); return NULL; } ret = memcmp_pages(page, tree_page); parent = *new; if (ret < 0) { put_page(tree_page); new = &parent->rb_left; } else if (ret > 0) { put_page(tree_page); new = &parent->rb_right; } else if (!ksm_merge_across_nodes && page_to_nid(tree_page) != nid) { /* * If tree_page has been migrated to another NUMA node, * it will be flushed out and put in the right unstable * tree next time: only merge with it when across_nodes. */ put_page(tree_page); return NULL; } else { *tree_pagep = tree_page; return tree_rmap_item; } } rmap_item->address |= UNSTABLE_FLAG; rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); DO_NUMA(rmap_item->nid = nid); rb_link_node(&rmap_item->node, parent, new); rb_insert_color(&rmap_item->node, root); ksm_pages_unshared++; return NULL; } /* * stable_tree_append - add another rmap_item to the linked list of * rmap_items hanging off a given node of the stable tree, all sharing * the same ksm page. */ static void stable_tree_append(struct ksm_rmap_item *rmap_item, struct ksm_stable_node *stable_node, bool max_page_sharing_bypass) { /* * rmap won't find this mapping if we don't insert the * rmap_item in the right stable_node * duplicate. page_migration could break later if rmap breaks, * so we can as well crash here. We really need to check for * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check * for other negative values as an underflow if detected here * for the first time (and not when decreasing rmap_hlist_len) * would be sign of memory corruption in the stable_node. */ BUG_ON(stable_node->rmap_hlist_len < 0); stable_node->rmap_hlist_len++; if (!max_page_sharing_bypass) /* possibly non fatal but unexpected overflow, only warn */ WARN_ON_ONCE(stable_node->rmap_hlist_len > ksm_max_page_sharing); rmap_item->head = stable_node; rmap_item->address |= STABLE_FLAG; hlist_add_head(&rmap_item->hlist, &stable_node->hlist); if (rmap_item->hlist.next) ksm_pages_sharing++; else ksm_pages_shared++; rmap_item->mm->ksm_merging_pages++; } /* * cmp_and_merge_page - first see if page can be merged into the stable tree; * if not, compare checksum to previous and if it's the same, see if page can * be inserted into the unstable tree, or merged with a page already there and * both transferred to the stable tree. * * @page: the page that we are searching identical page to. * @rmap_item: the reverse mapping into the virtual address of this page */ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; struct ksm_rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct ksm_stable_node *stable_node; struct page *kpage; unsigned int checksum; int err; bool max_page_sharing_bypass = false; stable_node = page_stable_node(page); if (stable_node) { if (stable_node->head != &migrate_nodes && get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != NUMA(stable_node->nid)) { stable_node_dup_del(stable_node); stable_node->head = &migrate_nodes; list_add(&stable_node->list, stable_node->head); } if (stable_node->head != &migrate_nodes && rmap_item->head == stable_node) return; /* * If it's a KSM fork, allow it to go over the sharing limit * without warnings. */ if (!is_page_sharing_candidate(stable_node)) max_page_sharing_bypass = true; } /* We first start with searching the page inside the stable tree */ kpage = stable_tree_search(page); if (kpage == page && rmap_item->head == stable_node) { put_page(kpage); return; } remove_rmap_item_from_tree(rmap_item); if (kpage) { if (PTR_ERR(kpage) == -EBUSY) return; err = try_to_merge_with_ksm_page(rmap_item, page, kpage); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ lock_page(kpage); stable_tree_append(rmap_item, page_stable_node(kpage), max_page_sharing_bypass); unlock_page(kpage); } put_page(kpage); return; } /* * If the hash value of the page has changed from the last time * we calculated it, this page is changing frequently: therefore we * don't want to insert it in the unstable tree, and we don't want * to waste our time searching for something identical to it there. */ checksum = calc_checksum(page); if (rmap_item->oldchecksum != checksum) { rmap_item->oldchecksum = checksum; return; } /* * Same checksum as an empty page. We attempt to merge it with the * appropriate zero page if the user enabled this via sysfs. */ if (ksm_use_zero_pages && (checksum == zero_checksum)) { struct vm_area_struct *vma; mmap_read_lock(mm); vma = find_mergeable_vma(mm, rmap_item->address); if (vma) { err = try_to_merge_one_page(vma, page, ZERO_PAGE(rmap_item->address)); trace_ksm_merge_one_page( page_to_pfn(ZERO_PAGE(rmap_item->address)), rmap_item, mm, err); } else { /* * If the vma is out of date, we do not need to * continue. */ err = 0; } mmap_read_unlock(mm); /* * In case of failure, the page was not really empty, so we * need to continue. Otherwise we're done. */ if (!err) return; } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { bool split; kpage = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); /* * If both pages we tried to merge belong to the same compound * page, then we actually ended up increasing the reference * count of the same compound page twice, and split_huge_page * failed. * Here we set a flag if that happened, and we use it later to * try split_huge_page again. Since we call put_page right * afterwards, the reference count will be correct and * split_huge_page should succeed. */ split = PageTransCompound(page) && compound_head(page) == compound_head(tree_page); put_page(tree_page); if (kpage) { /* * The pages were successfully merged: insert new * node in the stable tree and add both rmap_items. */ lock_page(kpage); stable_node = stable_tree_insert(kpage); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); stable_tree_append(rmap_item, stable_node, false); } unlock_page(kpage); /* * If we fail to insert the page into the stable tree, * we will have 2 virtual addresses that are pointing * to a ksm page left outside the stable tree, * in which case we need to break_cow on both. */ if (!stable_node) { break_cow(tree_rmap_item); break_cow(rmap_item); } } else if (split) { /* * We are here if we tried to merge two pages and * failed because they both belonged to the same * compound page. We will split the page now, but no * merging will take place. * We do not want to add the cost of a full lock; if * the page is locked, it is better to skip it and * perhaps try again later. */ if (!trylock_page(page)) return; split_huge_page(page); unlock_page(page); } } } static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, struct ksm_rmap_item **rmap_list, unsigned long addr) { struct ksm_rmap_item *rmap_item; while (*rmap_list) { rmap_item = *rmap_list; if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; if (rmap_item->address > addr) break; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } rmap_item = alloc_rmap_item(); if (rmap_item) { /* It has already been zeroed */ rmap_item->mm = mm_slot->slot.mm; rmap_item->mm->ksm_rmap_items++; rmap_item->address = addr; rmap_item->rmap_list = *rmap_list; *rmap_list = rmap_item; } return rmap_item; } static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) { struct mm_struct *mm; struct ksm_mm_slot *mm_slot; struct mm_slot *slot; struct vm_area_struct *vma; struct ksm_rmap_item *rmap_item; struct vma_iterator vmi; int nid; if (list_empty(&ksm_mm_head.slot.mm_node)) return NULL; mm_slot = ksm_scan.mm_slot; if (mm_slot == &ksm_mm_head) { trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* * A number of pages can hang around indefinitely in per-cpu * LRU cache, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented * LTP's KSM test from succeeding deterministically; so drain * them here (here rather than on entry to ksm_do_scan(), * so we don't IPI too often when pages_to_scan is set low). */ lru_add_drain_all(); /* * Whereas stale stable_nodes on the stable_tree itself * get pruned in the regular course of stable_tree_search(), * those moved out to the migrate_nodes list can accumulate: * so prune them once before each full scan. */ if (!ksm_merge_across_nodes) { struct ksm_stable_node *stable_node, *next; struct page *page; list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); if (page) put_page(page); cond_resched(); } } for (nid = 0; nid < ksm_nr_node_ids; nid++) root_unstable_tree[nid] = RB_ROOT; spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); ksm_scan.mm_slot = mm_slot; spin_unlock(&ksm_mmlist_lock); /* * Although we tested list_empty() above, a racing __ksm_exit * of the last mm on the list may have removed it since then. */ if (mm_slot == &ksm_mm_head) return NULL; next_mm: ksm_scan.address = 0; ksm_scan.rmap_list = &mm_slot->rmap_list; } slot = &mm_slot->slot; mm = slot->mm; vma_iter_init(&vmi, mm, ksm_scan.address); mmap_read_lock(mm); if (ksm_test_exit(mm)) goto no_vmas; for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) ksm_scan.address = vma->vm_start; if (!vma->anon_vma) ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); if (IS_ERR_OR_NULL(*page)) { ksm_scan.address += PAGE_SIZE; cond_resched(); continue; } if (is_zone_device_page(*page)) goto next_page; if (PageAnon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(mm_slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; ksm_scan.address += PAGE_SIZE; } else put_page(*page); mmap_read_unlock(mm); return rmap_item; } next_page: put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); } } if (ksm_test_exit(mm)) { no_vmas: ksm_scan.address = 0; ksm_scan.rmap_list = &mm_slot->rmap_list; } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ remove_trailing_rmap_items(ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (ksm_scan.address == 0) { /* * We've completed a full scan of all vmas, holding mmap_lock * throughout, and found no VM_MERGEABLE: so do the same as * __ksm_exit does to remove this mm from all our lists now. * This applies either when cleaning up after __ksm_exit * (but beware: we can reach here even before __ksm_exit), * or when all VM_MERGEABLE areas have been unmapped (and * mmap_lock then protects against race with MADV_MERGEABLE). */ hash_del(&mm_slot->slot.hash); list_del(&mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmap_read_unlock(mm); mmdrop(mm); } else { mmap_read_unlock(mm); /* * mmap_read_unlock(mm) first because after * spin_unlock(&ksm_mmlist_lock) run, the "mm" may * already have been freed under us by __ksm_exit() * because the "mm_slot" is still hashed and * ksm_scan.mm_slot doesn't point to it anymore. */ spin_unlock(&ksm_mmlist_lock); } /* Repeat until we've completed scanning the whole list */ mm_slot = ksm_scan.mm_slot; if (mm_slot != &ksm_mm_head) goto next_mm; trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); ksm_scan.seqnr++; return NULL; } /** * ksm_do_scan - the ksm scanner main worker function. * @scan_npages: number of pages we want to scan before we return. */ static void ksm_do_scan(unsigned int scan_npages) { struct ksm_rmap_item *rmap_item; struct page *page; unsigned int npages = scan_npages; while (npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; cmp_and_merge_page(page, rmap_item); put_page(page); } ksm_pages_scanned += scan_npages - npages; } static int ksmd_should_run(void) { return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); } static int ksm_scan_thread(void *nothing) { unsigned int sleep_ms; set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksmd_should_run()) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); try_to_freeze(); if (ksmd_should_run()) { sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); wait_event_interruptible_timeout(ksm_iter_wait, sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), msecs_to_jiffies(sleep_ms)); } else { wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } return 0; } static void __ksm_add_vma(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; if (vm_flags & VM_MERGEABLE) return; if (vma_ksm_compatible(vma)) vm_flags_set(vma, VM_MERGEABLE); } static int __ksm_del_vma(struct vm_area_struct *vma) { int err; if (!(vma->vm_flags & VM_MERGEABLE)) return 0; if (vma->anon_vma) { err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); if (err) return err; } vm_flags_clear(vma, VM_MERGEABLE); return 0; } /** * ksm_add_vma - Mark vma as mergeable if compatible * * @vma: Pointer to vma */ void ksm_add_vma(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) __ksm_add_vma(vma); } static void ksm_add_vmas(struct mm_struct *mm) { struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) __ksm_add_vma(vma); } static int ksm_del_vmas(struct mm_struct *mm) { struct vm_area_struct *vma; int err; VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) { err = __ksm_del_vma(vma); if (err) return err; } return 0; } /** * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all * compatible VMA's * * @mm: Pointer to mm * * Returns 0 on success, otherwise error code */ int ksm_enable_merge_any(struct mm_struct *mm) { int err; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return 0; if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } set_bit(MMF_VM_MERGE_ANY, &mm->flags); ksm_add_vmas(mm); return 0; } /** * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, * previously enabled via ksm_enable_merge_any(). * * Disabling merging implies unmerging any merged pages, like setting * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and * merging on all compatible VMA's remains enabled. * * @mm: Pointer to mm * * Returns 0 on success, otherwise error code */ int ksm_disable_merge_any(struct mm_struct *mm) { int err; if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return 0; err = ksm_del_vmas(mm); if (err) { ksm_add_vmas(mm); return err; } clear_bit(MMF_VM_MERGE_ANY, &mm->flags); return 0; } int ksm_disable(struct mm_struct *mm) { mmap_assert_write_locked(mm); if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) return 0; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return ksm_disable_merge_any(mm); return ksm_del_vmas(mm); } int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; int err; switch (advice) { case MADV_MERGEABLE: if (vma->vm_flags & VM_MERGEABLE) return 0; if (!vma_ksm_compatible(vma)) return 0; if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } *vm_flags |= VM_MERGEABLE; break; case MADV_UNMERGEABLE: if (!(*vm_flags & VM_MERGEABLE)) return 0; /* just ignore the advice */ if (vma->anon_vma) { err = unmerge_ksm_pages(vma, start, end, true); if (err) return err; } *vm_flags &= ~VM_MERGEABLE; break; } return 0; } EXPORT_SYMBOL_GPL(ksm_madvise); int __ksm_enter(struct mm_struct *mm) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; int needs_wakeup; mm_slot = mm_slot_alloc(mm_slot_cache); if (!mm_slot) return -ENOMEM; slot = &mm_slot->slot; /* Check ksm_run too? Would need tighter locking */ needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); spin_lock(&ksm_mmlist_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* * When KSM_RUN_MERGE (or KSM_RUN_STOP), * insert just behind the scanning cursor, to let the area settle * down a little; when fork is followed by immediate exec, we don't * want ksmd to waste time setting up and tearing down an rmap_list. * * But when KSM_RUN_UNMERGE, it's important to insert ahead of its * scanning cursor, otherwise KSM pages in newly forked mms will be * missed: then we might as well insert at the end of the list. */ if (ksm_run & KSM_RUN_UNMERGE) list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); else list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); mmgrab(mm); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); trace_ksm_enter(mm); return 0; } void __ksm_exit(struct mm_struct *mm) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; int easy_to_free = 0; /* * This process is exiting: if it's straightforward (as is the * case when ksmd was never running), free mm_slot immediately. * But if it's at the cursor or has rmap_items linked to it, use * mmap_lock to synchronize with any break_cows before pagetables * are freed, and leave the mm_slot on the list for ksmd to free. * Beware: ksm may already have noticed it exiting and freed the slot. */ spin_lock(&ksm_mmlist_lock); slot = mm_slot_lookup(mm_slots_hash, mm); mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (mm_slot && ksm_scan.mm_slot != mm_slot) { if (!mm_slot->rmap_list) { hash_del(&slot->hash); list_del(&slot->mm_node); easy_to_free = 1; } else { list_move(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); } } spin_unlock(&ksm_mmlist_lock); if (easy_to_free) { mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); } else if (mm_slot) { mmap_write_lock(mm); mmap_write_unlock(mm); } trace_ksm_exit(mm); } struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct folio *folio = page_folio(page); struct anon_vma *anon_vma = folio_anon_vma(folio); struct page *new_page; if (PageKsm(page)) { if (page_stable_node(page) && !(ksm_run & KSM_RUN_UNMERGE)) return page; /* no need to copy it */ } else if (!anon_vma) { return page; /* no need to copy it */ } else if (page->index == linear_page_index(vma, address) && anon_vma->root == vma->anon_vma->root) { return page; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); if (!PageUptodate(page)) return page; /* let do_swap_page report the error */ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page && mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { put_page(new_page); new_page = NULL; } if (new_page) { if (copy_mc_user_highpage(new_page, page, address, vma)) { put_page(new_page); memory_failure_queue(page_to_pfn(page), 0); return ERR_PTR(-EHWPOISON); } SetPageDirty(new_page); __SetPageUptodate(new_page); __SetPageLocked(new_page); #ifdef CONFIG_SWAP count_vm_event(KSM_SWPIN_COPY); #endif } return new_page; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; int search_new_forks = 0; VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); /* * Rely on the page lock to protect against concurrent modifications * to that page's node of the stable tree. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); stable_node = folio_stable_node(folio); if (!stable_node) return; again: hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; cond_resched(); if (!anon_vma_trylock_read(anon_vma)) { if (rwc->try_lock) { rwc->contended = true; return; } anon_vma_lock_read(anon_vma); } anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 0, ULONG_MAX) { unsigned long addr; cond_resched(); vma = vmac->vma; /* Ignore the stable/unstable/sqnr flags */ addr = rmap_item->address & PAGE_MASK; if (addr < vma->vm_start || addr >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { anon_vma_unlock_read(anon_vma); return; } if (rwc->done && rwc->done(folio)) { anon_vma_unlock_read(anon_vma); return; } } anon_vma_unlock_read(anon_vma); } if (!search_new_forks++) goto again; } #ifdef CONFIG_MEMORY_FAILURE /* * Collect processes when the error hit an ksm page. */ void collect_procs_ksm(struct page *page, struct list_head *to_kill, int force_early) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; stable_node = folio_stable_node(folio); if (!stable_node) return; hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *av = rmap_item->anon_vma; anon_vma_lock_read(av); rcu_read_lock(); for_each_process(tsk) { struct anon_vma_chain *vmac; unsigned long addr; struct task_struct *t = task_early_kill(tsk, force_early); if (!t) continue; anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, ULONG_MAX) { vma = vmac->vma; if (vma->vm_mm == t->mm) { addr = rmap_item->address & PAGE_MASK; add_to_kill_ksm(t, page, vma, to_kill, addr); } } } rcu_read_unlock(); anon_vma_unlock_read(av); } } #endif #ifdef CONFIG_MIGRATION void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) { struct ksm_stable_node *stable_node; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); stable_node = folio_stable_node(folio); if (stable_node) { VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); stable_node->kpfn = folio_pfn(newfolio); /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible * to get_ksm_page() before it can see that folio->mapping * has gone stale (or that folio_test_swapcache has been cleared). */ smp_wmb(); set_page_stable_node(&folio->page, NULL); } } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_MEMORY_HOTREMOVE static void wait_while_offlining(void) { while (ksm_run & KSM_RUN_OFFLINE) { mutex_unlock(&ksm_thread_mutex); wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), TASK_UNINTERRUPTIBLE); mutex_lock(&ksm_thread_mutex); } } static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, unsigned long start_pfn, unsigned long end_pfn) { if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) { /* * Don't get_ksm_page, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); return true; } return false; } static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, unsigned long start_pfn, unsigned long end_pfn, struct rb_root *root) { struct ksm_stable_node *dup; struct hlist_node *hlist_safe; if (!is_stable_node_chain(stable_node)) { VM_BUG_ON(is_stable_node_dup(stable_node)); return stable_node_dup_remove_range(stable_node, start_pfn, end_pfn); } hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { VM_BUG_ON(!is_stable_node_dup(dup)); stable_node_dup_remove_range(dup, start_pfn, end_pfn); } if (hlist_empty(&stable_node->hlist)) { free_stable_node_chain(stable_node, root); return true; /* notify caller that tree was rebalanced */ } else return false; } static void ksm_check_stable_tree(unsigned long start_pfn, unsigned long end_pfn) { struct ksm_stable_node *stable_node, *next; struct rb_node *node; int nid; for (nid = 0; nid < ksm_nr_node_ids; nid++) { node = rb_first(root_stable_tree + nid); while (node) { stable_node = rb_entry(node, struct ksm_stable_node, node); if (stable_node_chain_remove_range(stable_node, start_pfn, end_pfn, root_stable_tree + nid)) node = rb_first(root_stable_tree + nid); else node = rb_next(node); cond_resched(); } } list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) remove_node_from_stable_tree(stable_node); cond_resched(); } } static int ksm_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; switch (action) { case MEM_GOING_OFFLINE: /* * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() * and remove_all_stable_nodes() while memory is going offline: * it is unsafe for them to touch the stable tree at this time. * But unmerge_ksm_pages(), rmap lookups and other entry points * which do not need the ksm_thread_mutex are all safe. */ mutex_lock(&ksm_thread_mutex); ksm_run |= KSM_RUN_OFFLINE; mutex_unlock(&ksm_thread_mutex); break; case MEM_OFFLINE: /* * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, * otherwise get_ksm_page() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, mn->start_pfn + mn->nr_pages); fallthrough; case MEM_CANCEL_OFFLINE: mutex_lock(&ksm_thread_mutex); ksm_run &= ~KSM_RUN_OFFLINE; mutex_unlock(&ksm_thread_mutex); smp_mb(); /* wake_up_bit advises this */ wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); break; } return NOTIFY_OK; } #else static void wait_while_offlining(void) { } #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_PROC_FS long ksm_process_profit(struct mm_struct *mm) { return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. */ #define KSM_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KSM_ATTR(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RW(_name) static ssize_t sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); } static ssize_t sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; ksm_thread_sleep_millisecs = msecs; wake_up_interruptible(&ksm_iter_wait); return count; } KSM_ATTR(sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int nr_pages; int err; err = kstrtouint(buf, 10, &nr_pages); if (err) return -EINVAL; ksm_thread_pages_to_scan = nr_pages; return count; } KSM_ATTR(pages_to_scan); static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_run); } static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int flags; int err; err = kstrtouint(buf, 10, &flags); if (err) return -EINVAL; if (flags > KSM_RUN_UNMERGE) return -EINVAL; /* * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, * breaking COW to free the pages_shared (but leaves mm_slots * on the list for when ksmd may be set running again). */ mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { set_current_oom_origin(); err = unmerge_and_remove_all_rmap_items(); clear_current_oom_origin(); if (err) { ksm_run = KSM_RUN_STOP; count = err; } } } mutex_unlock(&ksm_thread_mutex); if (flags & KSM_RUN_MERGE) wake_up_interruptible(&ksm_thread_wait); return count; } KSM_ATTR(run); #ifdef CONFIG_NUMA static ssize_t merge_across_nodes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); } static ssize_t merge_across_nodes_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long knob; err = kstrtoul(buf, 10, &knob); if (err) return err; if (knob > 1) return -EINVAL; mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_merge_across_nodes != knob) { if (ksm_pages_shared || remove_all_stable_nodes()) err = -EBUSY; else if (root_stable_tree == one_stable_tree) { struct rb_root *buf; /* * This is the first time that we switch away from the * default of merging across nodes: must now allocate * a buffer to hold as many roots as may be needed. * Allocate stable and unstable together: * MAXSMP NODES_SHIFT 10 will use 16kB. */ buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), GFP_KERNEL); /* Let us assume that RB_ROOT is NULL is zero */ if (!buf) err = -ENOMEM; else { root_stable_tree = buf; root_unstable_tree = buf + nr_node_ids; /* Stable tree is empty but not the unstable */ root_unstable_tree[0] = one_unstable_tree[0]; } } if (!err) { ksm_merge_across_nodes = knob; ksm_nr_node_ids = knob ? 1 : nr_node_ids; } } mutex_unlock(&ksm_thread_mutex); return err ? err : count; } KSM_ATTR(merge_across_nodes); #endif static ssize_t use_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); } static ssize_t use_zero_pages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; bool value; err = kstrtobool(buf, &value); if (err) return -EINVAL; ksm_use_zero_pages = value; return count; } KSM_ATTR(use_zero_pages); static ssize_t max_page_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); } static ssize_t max_page_sharing_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; int knob; err = kstrtoint(buf, 10, &knob); if (err) return err; /* * When a KSM page is created it is shared by 2 mappings. This * being a signed comparison, it implicitly verifies it's not * negative. */ if (knob < 2) return -EINVAL; if (READ_ONCE(ksm_max_page_sharing) == knob) return count; mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_max_page_sharing != knob) { if (ksm_pages_shared || remove_all_stable_nodes()) err = -EBUSY; else ksm_max_page_sharing = knob; } mutex_unlock(&ksm_thread_mutex); return err ? err : count; } KSM_ATTR(max_page_sharing); static ssize_t pages_scanned_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_scanned); } KSM_ATTR_RO(pages_scanned); static ssize_t pages_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_shared); } KSM_ATTR_RO(pages_shared); static ssize_t pages_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); } KSM_ATTR_RO(pages_sharing); static ssize_t pages_unshared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); } KSM_ATTR_RO(pages_unshared); static ssize_t pages_volatile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long ksm_pages_volatile; ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared - ksm_pages_sharing - ksm_pages_unshared; /* * It was not worth any locking to calculate that statistic, * but it might therefore sometimes be negative: conceal that. */ if (ksm_pages_volatile < 0) ksm_pages_volatile = 0; return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); } KSM_ATTR_RO(pages_volatile); static ssize_t ksm_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%ld\n", ksm_zero_pages); } KSM_ATTR_RO(ksm_zero_pages); static ssize_t general_profit_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long general_profit; general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - ksm_rmap_items * sizeof(struct ksm_rmap_item); return sysfs_emit(buf, "%ld\n", general_profit); } KSM_ATTR_RO(general_profit); static ssize_t stable_node_dups_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); } KSM_ATTR_RO(stable_node_dups); static ssize_t stable_node_chains_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); } KSM_ATTR_RO(stable_node_chains); static ssize_t stable_node_chains_prune_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); } static ssize_t stable_node_chains_prune_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; ksm_stable_node_chains_prune_millisecs = msecs; return count; } KSM_ATTR(stable_node_chains_prune_millisecs); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); } KSM_ATTR_RO(full_scans); static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, &run_attr.attr, &pages_scanned_attr.attr, &pages_shared_attr.attr, &pages_sharing_attr.attr, &pages_unshared_attr.attr, &pages_volatile_attr.attr, &ksm_zero_pages_attr.attr, &full_scans_attr.attr, #ifdef CONFIG_NUMA &merge_across_nodes_attr.attr, #endif &max_page_sharing_attr.attr, &stable_node_chains_attr.attr, &stable_node_dups_attr.attr, &stable_node_chains_prune_millisecs_attr.attr, &use_zero_pages_attr.attr, &general_profit_attr.attr, NULL, }; static const struct attribute_group ksm_attr_group = { .attrs = ksm_attrs, .name = "ksm", }; #endif /* CONFIG_SYSFS */ static int __init ksm_init(void) { struct task_struct *ksm_thread; int err; /* The correct value depends on page size and endianness */ zero_checksum = calc_checksum(ZERO_PAGE(0)); /* Default to false for backwards compatibility */ ksm_use_zero_pages = false; err = ksm_slab_init(); if (err) goto out; ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); if (IS_ERR(ksm_thread)) { pr_err("ksm: creating kthread failed\n"); err = PTR_ERR(ksm_thread); goto out_free; } #ifdef CONFIG_SYSFS err = sysfs_create_group(mm_kobj, &ksm_attr_group); if (err) { pr_err("ksm: register sysfs failed\n"); kthread_stop(ksm_thread); goto out_free; } #else ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ #endif /* CONFIG_SYSFS */ #ifdef CONFIG_MEMORY_HOTREMOVE /* There is no significance to this priority 100 */ hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); #endif return 0; out_free: ksm_slab_free(); out: return err; } subsys_initcall(ksm_init);
linux-master
mm/ksm.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/page_io.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, * Asynchronous swapping added 30.12.95. Stephen Tweedie * Removed race in async swapping. 14.4.1996. Bruno Haible * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman */ #include <linux/mm.h> #include <linux/kernel_stat.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/swapops.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/psi.h> #include <linux/uio.h> #include <linux/sched/task.h> #include <linux/delayacct.h> #include <linux/zswap.h> #include "swap.h" static void __end_swap_bio_write(struct bio *bio) { struct folio *folio = bio_first_folio_all(bio); if (bio->bi_status) { /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid folio_rotate_reclaimable() */ folio_mark_dirty(folio); pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), (unsigned long long)bio->bi_iter.bi_sector); folio_clear_reclaim(folio); } folio_end_writeback(folio); } static void end_swap_bio_write(struct bio *bio) { __end_swap_bio_write(bio); bio_put(bio); } static void __end_swap_bio_read(struct bio *bio) { struct folio *folio = bio_first_folio_all(bio); if (bio->bi_status) { pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), (unsigned long long)bio->bi_iter.bi_sector); } else { folio_mark_uptodate(folio); } folio_unlock(folio); } static void end_swap_bio_read(struct bio *bio) { __end_swap_bio_read(bio); bio_put(bio); } int generic_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; unsigned blocks_per_page; unsigned long page_no; unsigned blkbits; sector_t probe_block; sector_t last_block; sector_t lowest_block = -1; sector_t highest_block = 0; int nr_extents = 0; int ret; blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; /* * Map all the blocks into the extent tree. This code doesn't try * to be very smart. */ probe_block = 0; page_no = 0; last_block = i_size_read(inode) >> blkbits; while ((probe_block + blocks_per_page) <= last_block && page_no < sis->max) { unsigned block_in_page; sector_t first_block; cond_resched(); first_block = probe_block; ret = bmap(inode, &first_block); if (ret || !first_block) goto bad_bmap; /* * It must be PAGE_SIZE aligned on-disk */ if (first_block & (blocks_per_page - 1)) { probe_block++; goto reprobe; } for (block_in_page = 1; block_in_page < blocks_per_page; block_in_page++) { sector_t block; block = probe_block + block_in_page; ret = bmap(inode, &block); if (ret || !block) goto bad_bmap; if (block != first_block + block_in_page) { /* Discontiguity */ probe_block++; goto reprobe; } } first_block >>= (PAGE_SHIFT - blkbits); if (page_no) { /* exclude the header page */ if (first_block < lowest_block) lowest_block = first_block; if (first_block > highest_block) highest_block = first_block; } /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ ret = add_swap_extent(sis, page_no, 1, first_block); if (ret < 0) goto out; nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } ret = nr_extents; *span = 1 + highest_block - lowest_block; if (page_no == 0) page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; sis->highest_bit = page_no - 1; out: return ret; bad_bmap: pr_err("swapon: swapfile has holes\n"); ret = -EINVAL; goto out; } /* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */ int swap_writepage(struct page *page, struct writeback_control *wbc) { struct folio *folio = page_folio(page); int ret; if (folio_free_swap(folio)) { folio_unlock(folio); return 0; } /* * Arch code may have to preserve more data than just the page * contents, e.g. memory tags. */ ret = arch_prepare_to_swap(&folio->page); if (ret) { folio_mark_dirty(folio); folio_unlock(folio); return ret; } if (zswap_store(folio)) { folio_start_writeback(folio); folio_unlock(folio); folio_end_writeback(folio); return 0; } __swap_writepage(&folio->page, wbc); return 0; } static inline void count_swpout_vm_event(struct folio *folio) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (unlikely(folio_test_pmd_mappable(folio))) count_vm_event(THP_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) { struct cgroup_subsys_state *css; struct mem_cgroup *memcg; memcg = folio_memcg(folio); if (!memcg) return; rcu_read_lock(); css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); bio_associate_blkg_from_css(bio, css); rcu_read_unlock(); } #else #define bio_associate_blkg_from_page(bio, folio) do { } while (0) #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ struct swap_iocb { struct kiocb iocb; struct bio_vec bvec[SWAP_CLUSTER_MAX]; int pages; int len; }; static mempool_t *sio_pool; int sio_pool_init(void) { if (!sio_pool) { mempool_t *pool = mempool_create_kmalloc_pool( SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); if (cmpxchg(&sio_pool, NULL, pool)) mempool_destroy(pool); } if (!sio_pool) return -ENOMEM; return 0; } static void sio_write_complete(struct kiocb *iocb, long ret) { struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); struct page *page = sio->bvec[0].bv_page; int p; if (ret != sio->len) { /* * In the case of swap-over-nfs, this can be a * temporary failure if the system has limited * memory for allocating transmit buffers. * Mark the page dirty and avoid * folio_rotate_reclaimable but rate-limit the * messages but do not flag PageError like * the normal direct-to-bio case as it could * be temporary. */ pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", ret, page_file_offset(page)); for (p = 0; p < sio->pages; p++) { page = sio->bvec[p].bv_page; set_page_dirty(page); ClearPageReclaim(page); } } else { for (p = 0; p < sio->pages; p++) count_swpout_vm_event(page_folio(sio->bvec[p].bv_page)); } for (p = 0; p < sio->pages; p++) end_page_writeback(sio->bvec[p].bv_page); mempool_free(sio, sio_pool); } static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) { struct swap_iocb *sio = NULL; struct swap_info_struct *sis = page_swap_info(page); struct file *swap_file = sis->swap_file; loff_t pos = page_file_offset(page); set_page_writeback(page); unlock_page(page); if (wbc->swap_plug) sio = *wbc->swap_plug; if (sio) { if (sio->iocb.ki_filp != swap_file || sio->iocb.ki_pos + sio->len != pos) { swap_write_unplug(sio); sio = NULL; } } if (!sio) { sio = mempool_alloc(sio_pool, GFP_NOIO); init_sync_kiocb(&sio->iocb, swap_file); sio->iocb.ki_complete = sio_write_complete; sio->iocb.ki_pos = pos; sio->pages = 0; sio->len = 0; } bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0); sio->len += thp_size(page); sio->pages += 1; if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { swap_write_unplug(sio); sio = NULL; } if (wbc->swap_plug) *wbc->swap_plug = sio; } static void swap_writepage_bdev_sync(struct page *page, struct writeback_control *wbc, struct swap_info_struct *sis) { struct bio_vec bv; struct bio bio; struct folio *folio = page_folio(page); bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc)); bio.bi_iter.bi_sector = swap_page_sector(page); __bio_add_page(&bio, page, thp_size(page), 0); bio_associate_blkg_from_page(&bio, folio); count_swpout_vm_event(folio); folio_start_writeback(folio); folio_unlock(folio); submit_bio_wait(&bio); __end_swap_bio_write(&bio); } static void swap_writepage_bdev_async(struct page *page, struct writeback_control *wbc, struct swap_info_struct *sis) { struct bio *bio; struct folio *folio = page_folio(page); bio = bio_alloc(sis->bdev, 1, REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), GFP_NOIO); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_write; __bio_add_page(bio, page, thp_size(page), 0); bio_associate_blkg_from_page(bio, folio); count_swpout_vm_event(folio); folio_start_writeback(folio); folio_unlock(folio); submit_bio(bio); } void __swap_writepage(struct page *page, struct writeback_control *wbc) { struct swap_info_struct *sis = page_swap_info(page); VM_BUG_ON_PAGE(!PageSwapCache(page), page); /* * ->flags can be updated non-atomicially (scan_swap_map_slots), * but that will never affect SWP_FS_OPS, so the data_race * is safe. */ if (data_race(sis->flags & SWP_FS_OPS)) swap_writepage_fs(page, wbc); else if (sis->flags & SWP_SYNCHRONOUS_IO) swap_writepage_bdev_sync(page, wbc, sis); else swap_writepage_bdev_async(page, wbc, sis); } void swap_write_unplug(struct swap_iocb *sio) { struct iov_iter from; struct address_space *mapping = sio->iocb.ki_filp->f_mapping; int ret; iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); ret = mapping->a_ops->swap_rw(&sio->iocb, &from); if (ret != -EIOCBQUEUED) sio_write_complete(&sio->iocb, ret); } static void sio_read_complete(struct kiocb *iocb, long ret) { struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); int p; if (ret == sio->len) { for (p = 0; p < sio->pages; p++) { struct folio *folio = page_folio(sio->bvec[p].bv_page); folio_mark_uptodate(folio); folio_unlock(folio); } count_vm_events(PSWPIN, sio->pages); } else { for (p = 0; p < sio->pages; p++) { struct folio *folio = page_folio(sio->bvec[p].bv_page); folio_unlock(folio); } pr_alert_ratelimited("Read-error on swap-device\n"); } mempool_free(sio, sio_pool); } static void swap_readpage_fs(struct page *page, struct swap_iocb **plug) { struct swap_info_struct *sis = page_swap_info(page); struct swap_iocb *sio = NULL; loff_t pos = page_file_offset(page); if (plug) sio = *plug; if (sio) { if (sio->iocb.ki_filp != sis->swap_file || sio->iocb.ki_pos + sio->len != pos) { swap_read_unplug(sio); sio = NULL; } } if (!sio) { sio = mempool_alloc(sio_pool, GFP_KERNEL); init_sync_kiocb(&sio->iocb, sis->swap_file); sio->iocb.ki_pos = pos; sio->iocb.ki_complete = sio_read_complete; sio->pages = 0; sio->len = 0; } bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0); sio->len += thp_size(page); sio->pages += 1; if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { swap_read_unplug(sio); sio = NULL; } if (plug) *plug = sio; } static void swap_readpage_bdev_sync(struct page *page, struct swap_info_struct *sis) { struct bio_vec bv; struct bio bio; bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); bio.bi_iter.bi_sector = swap_page_sector(page); __bio_add_page(&bio, page, thp_size(page), 0); /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. */ get_task_struct(current); count_vm_event(PSWPIN); submit_bio_wait(&bio); __end_swap_bio_read(&bio); put_task_struct(current); } static void swap_readpage_bdev_async(struct page *page, struct swap_info_struct *sis) { struct bio *bio; bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read; __bio_add_page(bio, page, thp_size(page), 0); count_vm_event(PSWPIN); submit_bio(bio); } void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) { struct folio *folio = page_folio(page); struct swap_info_struct *sis = page_swap_info(page); bool workingset = folio_test_workingset(folio); unsigned long pflags; bool in_thrashing; VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); /* * Count submission time as memory stall and delay. When the device * is congested, or the submitting cgroup IO-throttled, submission * can be a significant part of overall IO time. */ if (workingset) { delayacct_thrashing_start(&in_thrashing); psi_memstall_enter(&pflags); } delayacct_swapin_start(); if (zswap_load(folio)) { folio_mark_uptodate(folio); folio_unlock(folio); } else if (data_race(sis->flags & SWP_FS_OPS)) { swap_readpage_fs(page, plug); } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) { swap_readpage_bdev_sync(page, sis); } else { swap_readpage_bdev_async(page, sis); } if (workingset) { delayacct_thrashing_end(&in_thrashing); psi_memstall_leave(&pflags); } delayacct_swapin_end(); } void __swap_read_unplug(struct swap_iocb *sio) { struct iov_iter from; struct address_space *mapping = sio->iocb.ki_filp->f_mapping; int ret; iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); ret = mapping->a_ops->swap_rw(&sio->iocb, &from); if (ret != -EIOCBQUEUED) sio_read_complete(&sio->iocb, ret); }
linux-master
mm/page_io.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/interval_tree.c - interval tree for mapping->i_mmap * * Copyright (C) 2012, Michel Lespinasse <[email protected]> */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/rmap.h> #include <linux/interval_tree_generic.h> static inline unsigned long vma_start_pgoff(struct vm_area_struct *v) { return v->vm_pgoff; } static inline unsigned long vma_last_pgoff(struct vm_area_struct *v) { return v->vm_pgoff + vma_pages(v) - 1; } INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, unsigned long, shared.rb_subtree_last, vma_start_pgoff, vma_last_pgoff, /* empty */, vma_interval_tree) /* Insert node immediately after prev in the interval tree */ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, struct rb_root_cached *root) { struct rb_node **link; struct vm_area_struct *parent; unsigned long last = vma_last_pgoff(node); VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); if (!prev->shared.rb.rb_right) { parent = prev; link = &prev->shared.rb.rb_right; } else { parent = rb_entry(prev->shared.rb.rb_right, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; while (parent->shared.rb.rb_left) { parent = rb_entry(parent->shared.rb.rb_left, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; } link = &parent->shared.rb.rb_left; } node->shared.rb_subtree_last = last; rb_link_node(&node->shared.rb, &parent->shared.rb, link); rb_insert_augmented(&node->shared.rb, &root->rb_root, &vma_interval_tree_augment); } static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc) { return vma_start_pgoff(avc->vma); } static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc) { return vma_last_pgoff(avc->vma); } INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, avc_start_pgoff, avc_last_pgoff, static inline, __anon_vma_interval_tree) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, struct rb_root_cached *root) { #ifdef CONFIG_DEBUG_VM_RB node->cached_vma_start = avc_start_pgoff(node); node->cached_vma_last = avc_last_pgoff(node); #endif __anon_vma_interval_tree_insert(node, root); } void anon_vma_interval_tree_remove(struct anon_vma_chain *node, struct rb_root_cached *root) { __anon_vma_interval_tree_remove(node, root); } struct anon_vma_chain * anon_vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_first(root, first, last); } struct anon_vma_chain * anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_next(node, first, last); } #ifdef CONFIG_DEBUG_VM_RB void anon_vma_interval_tree_verify(struct anon_vma_chain *node) { WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node)); WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node)); } #endif
linux-master
mm/interval_tree.c
// SPDX-License-Identifier: GPL-2.0-only /* * Access kernel or user memory without faulting. */ #include <linux/export.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <asm/tlb.h> bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return true; } #define copy_from_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __get_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; if (!copy_from_kernel_nofault_allowed(src, size)) return -ERANGE; pagefault_disable(); if (!(align & 7)) copy_from_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_from_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_from_kernel_nofault_loop(dst, src, size, u16, Efault); copy_from_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } EXPORT_SYMBOL_GPL(copy_from_kernel_nofault); #define copy_to_kernel_nofault_loop(dst, src, len, type, err_label) \ while (len >= sizeof(type)) { \ __put_kernel_nofault(dst, src, type, err_label); \ dst += sizeof(type); \ src += sizeof(type); \ len -= sizeof(type); \ } long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { unsigned long align = 0; if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) align = (unsigned long)dst | (unsigned long)src; pagefault_disable(); if (!(align & 7)) copy_to_kernel_nofault_loop(dst, src, size, u64, Efault); if (!(align & 3)) copy_to_kernel_nofault_loop(dst, src, size, u32, Efault); if (!(align & 1)) copy_to_kernel_nofault_loop(dst, src, size, u16, Efault); copy_to_kernel_nofault_loop(dst, src, size, u8, Efault); pagefault_enable(); return 0; Efault: pagefault_enable(); return -EFAULT; } long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count) { const void *src = unsafe_addr; if (unlikely(count <= 0)) return 0; if (!copy_from_kernel_nofault_allowed(unsafe_addr, count)) return -ERANGE; pagefault_disable(); do { __get_kernel_nofault(dst, src, u8, Efault); dst++; src++; } while (dst[-1] && src - unsafe_addr < count); pagefault_enable(); dst[-1] = '\0'; return src - unsafe_addr; Efault: pagefault_enable(); dst[0] = '\0'; return -EFAULT; } /** * copy_from_user_nofault(): safely attempt to read from a user-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from. This must be a user address. * @size: size of the data chunk * * Safely read from user address @src to the buffer at @dst. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_from_user_nofault(void *dst, const void __user *src, size_t size) { long ret = -EFAULT; if (!__access_ok(src, size)) return ret; if (!nmi_uaccess_okay()) return ret; pagefault_disable(); ret = __copy_from_user_inatomic(dst, src, size); pagefault_enable(); if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_from_user_nofault); /** * copy_to_user_nofault(): safely attempt to write to a user-space location * @dst: address to write to * @src: pointer to the data that shall be written * @size: size of the data chunk * * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ long copy_to_user_nofault(void __user *dst, const void *src, size_t size) { long ret = -EFAULT; if (access_ok(dst, size)) { pagefault_disable(); ret = __copy_to_user_inatomic(dst, src, size); pagefault_enable(); } if (ret) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(copy_to_user_nofault); /** * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user * address. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @unsafe_addr: Unsafe user address. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from unsafe user address to kernel buffer. * * On success, returns the length of the string INCLUDING the trailing NUL. * * If access fails, returns -EFAULT (some data may have been copied * and the trailing NUL added). * * If @count is smaller than the length of the string, copies @count-1 bytes, * sets the last byte of @dst buffer to NUL and returns @count. */ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count) { long ret; if (unlikely(count <= 0)) return 0; pagefault_disable(); ret = strncpy_from_user(dst, unsafe_addr, count); pagefault_enable(); if (ret >= count) { ret = count; dst[ret - 1] = '\0'; } else if (ret > 0) { ret++; } return ret; } /** * strnlen_user_nofault: - Get the size of a user string INCLUDING final NUL. * @unsafe_addr: The string to measure. * @count: Maximum count (including NUL) * * Get the size of a NUL-terminated string in user space without pagefault. * * Returns the size of the string INCLUDING the terminating NUL. * * If the string is too long, returns a number larger than @count. User * has to check the return value against "> count". * On exception (or invalid count), returns 0. * * Unlike strnlen_user, this can be used from IRQ handler etc. because * it disables pagefaults. */ long strnlen_user_nofault(const void __user *unsafe_addr, long count) { int ret; pagefault_disable(); ret = strnlen_user(unsafe_addr, count); pagefault_enable(); return ret; } void __copy_overflow(int size, unsigned long count) { WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } EXPORT_SYMBOL(__copy_overflow);
linux-master
mm/maccess.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <[email protected]>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/set_memory.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/rbtree.h> #include <linux/xarray.h> #include <linux/io.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/memcontrol.h> #include <linux/llist.h> #include <linux/uio.h> #include <linux/bitops.h> #include <linux/rbtree_augmented.h> #include <linux/overflow.h> #include <linux/pgtable.h> #include <linux/hugetlb.h> #include <linux/sched/mm.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> #define CREATE_TRACE_POINTS #include <trace/events/vmalloc.h> #include "internal.h" #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; static int __init set_nohugeiomap(char *str) { ioremap_max_page_shift = PAGE_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC static bool __ro_after_init vmap_allow_huge = true; static int __init set_nohugevmalloc(char *str) { vmap_allow_huge = false; return 0; } early_param("nohugevmalloc", set_nohugevmalloc); #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ static const bool vmap_allow_huge = false; #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ bool is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)kasan_reset_tag(x); return addr >= VMALLOC_START && addr < VMALLOC_END; } EXPORT_SYMBOL(is_vmalloc_addr); struct vfree_deferred { struct llist_head list; struct work_struct wq; }; static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); /*** Page table manipulation functions ***/ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; unsigned long size = PAGE_SIZE; pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { BUG_ON(!pte_none(ptep_get(pte))); #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); if (size != PAGE_SIZE) { pte_t entry = pfn_pte(pfn, prot); entry = arch_make_huge_pte(entry, ilog2(size), 0); set_huge_pte_at(&init_mm, addr, pte, entry); pfn += PFN_DOWN(size); continue; } #endif set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte += PFN_DOWN(size), addr += size, addr != end); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PMD_SHIFT) return 0; if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) return 0; if (!IS_ALIGNED(addr, PMD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PMD_SIZE)) return 0; if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) return 0; return pmd_set_huge(pmd, phys_addr, prot); } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PUD_SHIFT) return 0; if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) return 0; if (!IS_ALIGNED(addr, PUD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PUD_SIZE)) return 0; if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) return 0; return pud_set_huge(pud, phys_addr, prot); } static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } if (vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < P4D_SHIFT) return 0; if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) return 0; if (!IS_ALIGNED(addr, P4D_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, P4D_SIZE)) return 0; if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) return 0; return p4d_set_huge(p4d, phys_addr, prot); } static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } if (vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range_noflush(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; unsigned long next; int err; pgtbl_mod_mask mask = 0; might_sleep(); BUG_ON(addr >= end); start = addr; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return err; } int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { int err; err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), ioremap_max_page_shift); flush_cache_vmap(addr, end); if (!err) err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, ioremap_max_page_shift); return err; } static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; int cleared; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); cleared = pmd_clear_huge(pmd); if (cleared || pmd_bad(*pmd)) *mask |= PGTBL_PMD_MODIFIED; if (cleared) continue; if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next, mask); cond_resched(); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int cleared; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); cleared = pud_clear_huge(pud); if (cleared || pud_bad(*pud)) *mask |= PGTBL_PUD_MODIFIED; if (cleared) continue; if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next, mask); } while (pud++, addr = next, addr != end); } static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); p4d_clear_huge(p4d); if (p4d_bad(*p4d)) *mask |= PGTBL_P4D_MODIFIED; if (p4d_none_or_clear_bad(p4d)) continue; vunmap_pud_range(p4d, addr, next, mask); } while (p4d++, addr = next, addr != end); } /* * vunmap_range_noflush is similar to vunmap_range, but does not * flush caches or TLBs. * * The caller is responsible for calling flush_cache_vmap() before calling * this function, and flush_tlb_kernel_range after it has returned * successfully (and before the addresses are expected to cause a page fault * or be re-mapped for something else, if TLB flushes are being delayed or * coalesced). * * This is an internal function only. Do not use outside mm/. */ void __vunmap_range_noflush(unsigned long start, unsigned long end) { unsigned long next; pgd_t *pgd; unsigned long addr = start; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; if (pgd_none_or_clear_bad(pgd)) continue; vunmap_p4d_range(pgd, addr, next, &mask); } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); } void vunmap_range_noflush(unsigned long start, unsigned long end) { kmsan_vunmap_range_noflush(start, end); __vunmap_range_noflush(start, end); } /** * vunmap_range - unmap kernel virtual addresses * @addr: start of the VM area to unmap * @end: end of the VM area to unmap (non-inclusive) * * Clears any present PTEs in the virtual address range, flushes TLBs and * caches. Any subsequent access to the address before it has been re-mapped * is a kernel bug. */ void vunmap_range(unsigned long addr, unsigned long end) { flush_cache_vunmap(addr, end); vunmap_range_noflush(addr, end); flush_tlb_kernel_range(addr, end); } static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(ptep_get(pte)))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; if (WARN_ON(!pfn_valid(page_to_pfn(page)))) return -EINVAL; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (p4d++, addr = next, addr != end); return 0; } static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages) { unsigned long start = addr; pgd_t *pgd; unsigned long next; int err = 0; int nr = 0; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) return err; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return 0; } /* * vmap_pages_range_noflush is similar to vmap_pages_range, but does not * flush caches. * * The caller is responsible for calling flush_cache_vmap() after this * function returns successfully and before the addresses are accessed. * * This is an internal function only. Do not use outside mm/. */ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { unsigned int i, nr = (end - addr) >> PAGE_SHIFT; WARN_ON(page_shift < PAGE_SHIFT); if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || page_shift == PAGE_SHIFT) return vmap_small_pages_range_noflush(addr, end, prot, pages); for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { int err; err = vmap_range_noflush(addr, addr + (1UL << page_shift), page_to_phys(pages[i]), prot, page_shift); if (err) return err; addr += 1UL << page_shift; } return 0; } int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift); if (ret) return ret; return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); } /** * vmap_pages_range - map pages to a kernel virtual address * @addr: start of the VM area to map * @end: end of the VM area to map (non-inclusive) * @prot: page protection flags to use * @pages: pages to map (always PAGE_SIZE pages) * @page_shift: maximum shift that the pages may be mapped with, @pages must * be aligned and contiguous up to at least this shift. * * RETURNS: * 0 on success, -errno on failure. */ static int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int err; err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); flush_cache_vmap(addr, end); return err; } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)kasan_reset_tag(x); if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); /* * Walk a vmap address to the struct page it maps. Huge vmap mappings will * return the tail page that corresponds to the base page address, which * matches small vmap mappings. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (pgd_none(*pgd)) return NULL; if (WARN_ON_ONCE(pgd_leaf(*pgd))) return NULL; /* XXX: no allowance for huge pgd */ if (WARN_ON_ONCE(pgd_bad(*pgd))) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return NULL; if (p4d_leaf(*p4d)) return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(p4d_bad(*p4d))) return NULL; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return NULL; if (pud_leaf(*pud)) return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pud_bad(*pud))) return NULL; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; if (pmd_leaf(*pmd)) return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pmd_bad(*pmd))) return NULL; ptep = pte_offset_kernel(pmd, addr); pte = ptep_get(ptep); if (pte_present(pte)) page = pte_page(pte); return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 static DEFINE_SPINLOCK(vmap_area_lock); static DEFINE_SPINLOCK(free_vmap_area_lock); /* Export for kexec only */ LIST_HEAD(vmap_area_list); static struct rb_root vmap_area_root = RB_ROOT; static bool vmap_initialized __read_mostly; static struct rb_root purge_vmap_area_root = RB_ROOT; static LIST_HEAD(purge_vmap_area_list); static DEFINE_SPINLOCK(purge_vmap_area_lock); /* * This kmem_cache is used for vmap_area objects. Instead of * allocating from slab we reuse an object from this cache to * make things faster. Especially in "no edge" splitting of * free block. */ static struct kmem_cache *vmap_area_cachep; /* * This linked list is used in pair with free_vmap_area_root. * It gives O(1) access to prev/next to perform fast coalescing. */ static LIST_HEAD(free_vmap_area_list); /* * This augment red-black tree represents the free vmap space. * All vmap_area objects in this tree are sorted by va->va_start * address. It is used for allocation and merging when a vmap * object is released. * * Each vmap_area node contains a maximum available free block * of its sub-tree, right or left. Therefore it is possible to * find a lowest match of free area. */ static struct rb_root free_vmap_area_root = RB_ROOT; /* * Preload a CPU with one object for "no edge" split case. The * aim is to get rid of allocations from the atomic context, thus * to use more permissive allocation masks. */ static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); static __always_inline unsigned long va_size(struct vmap_area *va) { return (va->va_end - va->va_start); } static __always_inline unsigned long get_subtree_max_size(struct rb_node *node) { struct vmap_area *va; va = rb_entry_safe(node, struct vmap_area, rb_node); return va ? va->subtree_max_size : 0; } RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) static void reclaim_and_purge_vmap_areas(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); static void drain_vmap_area_work(struct work_struct *work); static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); static atomic_long_t nr_vmalloc_pages; unsigned long vmalloc_nr_pages(void) { return atomic_long_read(&nr_vmalloc_pages); } /* Look up the first VA which satisfies addr < va_end, NULL if none. */ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) { struct vmap_area *va = NULL; struct rb_node *n = vmap_area_root.rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end > addr) { va = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } return va; } static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) { struct rb_node *n = root->rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr >= va->va_end) n = n->rb_right; else return va; } return NULL; } /* * This function returns back addresses of parent node * and its left or right link for further processing. * * Otherwise NULL is returned. In that case all further * steps regarding inserting of conflicting overlap range * have to be declined and actually considered as a bug. */ static __always_inline struct rb_node ** find_va_links(struct vmap_area *va, struct rb_root *root, struct rb_node *from, struct rb_node **parent) { struct vmap_area *tmp_va; struct rb_node **link; if (root) { link = &root->rb_node; if (unlikely(!*link)) { *parent = NULL; return link; } } else { link = &from; } /* * Go to the bottom of the tree. When we hit the last point * we end up with parent rb_node and correct direction, i name * it link, where the new va->rb_node will be attached to. */ do { tmp_va = rb_entry(*link, struct vmap_area, rb_node); /* * During the traversal we also do some sanity check. * Trigger the BUG() if there are sides(left/right) * or full overlaps. */ if (va->va_end <= tmp_va->va_start) link = &(*link)->rb_left; else if (va->va_start >= tmp_va->va_end) link = &(*link)->rb_right; else { WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); return NULL; } } while (*link); *parent = &tmp_va->rb_node; return link; } static __always_inline struct list_head * get_va_next_sibling(struct rb_node *parent, struct rb_node **link) { struct list_head *list; if (unlikely(!parent)) /* * The red-black tree where we try to find VA neighbors * before merging or inserting is empty, i.e. it means * there is no free vmap space. Normally it does not * happen but we handle this case anyway. */ return NULL; list = &rb_entry(parent, struct vmap_area, rb_node)->list; return (&parent->rb_right == link ? list->next : list); } static __always_inline void __link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head, bool augment) { /* * VA is still not in the list, but we can * identify its future previous list_head node. */ if (likely(parent)) { head = &rb_entry(parent, struct vmap_area, rb_node)->list; if (&parent->rb_right != link) head = head->prev; } /* Insert to the rb-tree */ rb_link_node(&va->rb_node, parent, link); if (augment) { /* * Some explanation here. Just perform simple insertion * to the tree. We do not set va->subtree_max_size to * its current size before calling rb_insert_augmented(). * It is because we populate the tree from the bottom * to parent levels when the node _is_ in the tree. * * Therefore we set subtree_max_size to zero after insertion, * to let __augment_tree_propagate_from() puts everything to * the correct order later on. */ rb_insert_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); va->subtree_max_size = 0; } else { rb_insert_color(&va->rb_node, root); } /* Address-sort this list */ list_add(&va->list, head); } static __always_inline void link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, false); } static __always_inline void link_va_augment(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, true); } static __always_inline void __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) { if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) return; if (augment) rb_erase_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); else rb_erase(&va->rb_node, root); list_del_init(&va->list); RB_CLEAR_NODE(&va->rb_node); } static __always_inline void unlink_va(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, false); } static __always_inline void unlink_va_augment(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, true); } #if DEBUG_AUGMENT_PROPAGATE_CHECK /* * Gets called when remove the node and rotate. */ static __always_inline unsigned long compute_subtree_max_size(struct vmap_area *va) { return max3(va_size(va), get_subtree_max_size(va->rb_node.rb_left), get_subtree_max_size(va->rb_node.rb_right)); } static void augment_tree_propagate_check(void) { struct vmap_area *va; unsigned long computed_size; list_for_each_entry(va, &free_vmap_area_list, list) { computed_size = compute_subtree_max_size(va); if (computed_size != va->subtree_max_size) pr_emerg("tree is corrupted: %lu, %lu\n", va_size(va), va->subtree_max_size); } } #endif /* * This function populates subtree_max_size from bottom to upper * levels starting from VA point. The propagation must be done * when VA size is modified by changing its va_start/va_end. Or * in case of newly inserting of VA to the tree. * * It means that __augment_tree_propagate_from() must be called: * - After VA has been inserted to the tree(free path); * - After VA has been shrunk(allocation path); * - After VA has been increased(merging path). * * Please note that, it does not mean that upper parent nodes * and their subtree_max_size are recalculated all the time up * to the root node. * * 4--8 * /\ * / \ * / \ * 2--2 8--8 * * For example if we modify the node 4, shrinking it to 2, then * no any modification is required. If we shrink the node 2 to 1 * its subtree_max_size is updated only, and set to 1. If we shrink * the node 8 to 6, then its subtree_max_size is set to 6 and parent * node becomes 4--6. */ static __always_inline void augment_tree_propagate_from(struct vmap_area *va) { /* * Populate the tree from bottom towards the root until * the calculated maximum available size of checked node * is equal to its current one. */ free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); #if DEBUG_AUGMENT_PROPAGATE_CHECK augment_tree_propagate_check(); #endif } static void insert_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; link = find_va_links(va, root, NULL, &parent); if (link) link_va(va, root, parent, link, head); } static void insert_vmap_area_augment(struct vmap_area *va, struct rb_node *from, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; if (from) link = find_va_links(va, NULL, from, &parent); else link = find_va_links(va, root, NULL, &parent); if (link) { link_va_augment(va, root, parent, link, head); augment_tree_propagate_from(va); } } /* * Merge de-allocated chunk of VA memory with previous * and next free blocks. If coalesce is not done a new * free area is inserted. If VA has been merged, it is * freed. * * Please note, it can return NULL in case of overlap * ranges, followed by WARN() report. Despite it is a * buggy behaviour, a system can be alive and keep * ongoing. */ static __always_inline struct vmap_area * __merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head, bool augment) { struct vmap_area *sibling; struct list_head *next; struct rb_node **link; struct rb_node *parent; bool merged = false; /* * Find a place in the tree where VA potentially will be * inserted, unless it is merged with its sibling/siblings. */ link = find_va_links(va, root, NULL, &parent); if (!link) return NULL; /* * Get next node of VA to check if merging can be done. */ next = get_va_next_sibling(parent, link); if (unlikely(next == NULL)) goto insert; /* * start end * | | * |<------VA------>|<-----Next----->| * | | * start end */ if (next != head) { sibling = list_entry(next, struct vmap_area, list); if (sibling->va_start == va->va_end) { sibling->va_start = va->va_start; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } /* * start end * | | * |<-----Prev----->|<------VA------>| * | | * start end */ if (next->prev != head) { sibling = list_entry(next->prev, struct vmap_area, list); if (sibling->va_end == va->va_start) { /* * If both neighbors are coalesced, it is important * to unlink the "next" node first, followed by merging * with "previous" one. Otherwise the tree might not be * fully populated if a sibling's augmented value is * "normalized" because of rotation operations. */ if (merged) __unlink_va(va, root, augment); sibling->va_end = va->va_end; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } insert: if (!merged) __link_va(va, root, parent, link, head, augment); return va; } static __always_inline struct vmap_area * merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { return __merge_or_add_vmap_area(va, root, head, false); } static __always_inline struct vmap_area * merge_or_add_vmap_area_augment(struct vmap_area *va, struct rb_root *root, struct list_head *head) { va = __merge_or_add_vmap_area(va, root, head, true); if (va) augment_tree_propagate_from(va); return va; } static __always_inline bool is_within_this_va(struct vmap_area *va, unsigned long size, unsigned long align, unsigned long vstart) { unsigned long nva_start_addr; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Can be overflowed due to big size or alignment. */ if (nva_start_addr + size < nva_start_addr || nva_start_addr < vstart) return false; return (nva_start_addr + size <= va->va_end); } /* * Find the first free block(lowest start address) in the tree, * that will accomplish the request corresponding to passing * parameters. Please note, with an alignment bigger than PAGE_SIZE, * a search length is adjusted to account for worst case alignment * overhead. */ static __always_inline struct vmap_area * find_vmap_lowest_match(struct rb_root *root, unsigned long size, unsigned long align, unsigned long vstart, bool adjust_search_size) { struct vmap_area *va; struct rb_node *node; unsigned long length; /* Start from the root. */ node = root->rb_node; /* Adjust the search size for alignment overhead. */ length = adjust_search_size ? size + align - 1 : size; while (node) { va = rb_entry(node, struct vmap_area, rb_node); if (get_subtree_max_size(node->rb_left) >= length && vstart < va->va_start) { node = node->rb_left; } else { if (is_within_this_va(va, size, align, vstart)) return va; /* * Does not make sense to go deeper towards the right * sub-tree if it does not have a free block that is * equal or bigger to the requested search length. */ if (get_subtree_max_size(node->rb_right) >= length) { node = node->rb_right; continue; } /* * OK. We roll back and find the first right sub-tree, * that will satisfy the search criteria. It can happen * due to "vstart" restriction or an alignment overhead * that is bigger then PAGE_SIZE. */ while ((node = rb_parent(node))) { va = rb_entry(node, struct vmap_area, rb_node); if (is_within_this_va(va, size, align, vstart)) return va; if (get_subtree_max_size(node->rb_right) >= length && vstart <= va->va_start) { /* * Shift the vstart forward. Please note, we update it with * parent's start address adding "1" because we do not want * to enter same sub-tree after it has already been checked * and no suitable free block found there. */ vstart = va->va_start + 1; node = node->rb_right; break; } } } } return NULL; } #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK #include <linux/random.h> static struct vmap_area * find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart) { struct vmap_area *va; list_for_each_entry(va, head, list) { if (!is_within_this_va(va, size, align, vstart)) continue; return va; } return NULL; } static void find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align) { struct vmap_area *va_1, *va_2; unsigned long vstart; unsigned int rnd; get_random_bytes(&rnd, sizeof(rnd)); vstart = VMALLOC_START + rnd; va_1 = find_vmap_lowest_match(root, size, align, vstart, false); va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); if (va_1 != va_2) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", va_1, va_2, vstart); } #endif enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, /* full fit */ LE_FIT_TYPE = 2, /* left edge fit */ RE_FIT_TYPE = 3, /* right edge fit */ NE_FIT_TYPE = 4 /* no edge fit */ }; static __always_inline enum fit_type classify_va_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { enum fit_type type; /* Check if it is within VA. */ if (nva_start_addr < va->va_start || nva_start_addr + size > va->va_end) return NOTHING_FIT; /* Now classify. */ if (va->va_start == nva_start_addr) { if (va->va_end == nva_start_addr + size) type = FL_FIT_TYPE; else type = LE_FIT_TYPE; } else if (va->va_end == nva_start_addr + size) { type = RE_FIT_TYPE; } else { type = NE_FIT_TYPE; } return type; } static __always_inline int adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { struct vmap_area *lva = NULL; enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); if (type == FL_FIT_TYPE) { /* * No need to split VA, it fully fits. * * | | * V NVA V * |---------------| */ unlink_va_augment(va, root); kmem_cache_free(vmap_area_cachep, va); } else if (type == LE_FIT_TYPE) { /* * Split left edge of fit VA. * * | | * V NVA V R * |-------|-------| */ va->va_start += size; } else if (type == RE_FIT_TYPE) { /* * Split right edge of fit VA. * * | | * L V NVA V * |-------|-------| */ va->va_end = nva_start_addr; } else if (type == NE_FIT_TYPE) { /* * Split no edge of fit VA. * * | | * L V NVA V R * |---|-------|---| */ lva = __this_cpu_xchg(ne_fit_preload_node, NULL); if (unlikely(!lva)) { /* * For percpu allocator we do not do any pre-allocation * and leave it as it is. The reason is it most likely * never ends up with NE_FIT_TYPE splitting. In case of * percpu allocations offsets and sizes are aligned to * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE * are its main fitting cases. * * There are a few exceptions though, as an example it is * a first allocation (early boot up) when we have "one" * big free space that has to be split. * * Also we can hit this path in case of regular "vmap" * allocations, if "this" current CPU was not preloaded. * See the comment in alloc_vmap_area() why. If so, then * GFP_NOWAIT is used instead to get an extra object for * split purpose. That is rare and most time does not * occur. * * What happens if an allocation gets failed. Basically, * an "overflow" path is triggered to purge lazily freed * areas to free some memory, then, the "retry" path is * triggered to repeat one more time. See more details * in alloc_vmap_area() function. */ lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); if (!lva) return -1; } /* * Build the remainder. */ lva->va_start = va->va_start; lva->va_end = nva_start_addr; /* * Shrink this VA to remaining size. */ va->va_start = nva_start_addr + size; } else { return -1; } if (type != FL_FIT_TYPE) { augment_tree_propagate_from(va); if (lva) /* type == NE_FIT_TYPE */ insert_vmap_area_augment(lva, &va->rb_node, root, head); } return 0; } /* * Returns a start address of the newly allocated area, if success. * Otherwise a vend is returned that indicates failure. */ static __always_inline unsigned long __alloc_vmap_area(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) { bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; int ret; /* * Do not adjust when: * a) align <= PAGE_SIZE, because it does not make any sense. * All blocks(their start addresses) are at least PAGE_SIZE * aligned anyway; * b) a short range where a requested size corresponds to exactly * specified [vstart:vend] interval and an alignment > PAGE_SIZE. * With adjusted search length an allocation would not succeed. */ if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) adjust_search_size = false; va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); if (unlikely(!va)) return vend; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Check the "vend" restriction. */ if (nva_start_addr + size > vend) return vend; /* Update the free vmap_area. */ ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); if (WARN_ON_ONCE(ret)) return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK find_vmap_lowest_match_check(root, head, size, align); #endif return nva_start_addr; } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { /* * Remove from the busy tree/list. */ spin_lock(&vmap_area_lock); unlink_va(va, &vmap_area_root); spin_unlock(&vmap_area_lock); /* * Insert/Merge it back to the free tree/list. */ spin_lock(&free_vmap_area_lock); merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); spin_unlock(&free_vmap_area_lock); } static inline void preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) { struct vmap_area *va = NULL; /* * Preload this CPU with one extra vmap_area object. It is used * when fit type of free area is NE_FIT_TYPE. It guarantees that * a CPU that does an allocation is preloaded. * * We do it in non-atomic context, thus it allows us to use more * permissive allocation masks to be more stable under low memory * condition and high memory pressure. */ if (!this_cpu_read(ne_fit_preload_node)) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); spin_lock(lock); if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) kmem_cache_free(vmap_area_cachep, va); } /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask, unsigned long va_flags) { struct vmap_area *va; unsigned long freed; unsigned long addr; int purged = 0; int ret; if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) return ERR_PTR(-EINVAL); if (unlikely(!vmap_initialized)) return ERR_PTR(-EBUSY); might_sleep(); gfp_mask = gfp_mask & GFP_RECLAIM_MASK; va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); /* * Only scan the relevant parts containing pointers to other objects * to avoid false negatives. */ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); retry: preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, size, align, vstart, vend); spin_unlock(&free_vmap_area_lock); trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); /* * If an allocation fails, the "vend" address is * returned. Therefore trigger the overflow path. */ if (unlikely(addr == vend)) goto overflow; va->va_start = addr; va->va_end = addr + size; va->vm = NULL; va->flags = va_flags; spin_lock(&vmap_area_lock); insert_vmap_area(va, &vmap_area_root, &vmap_area_list); spin_unlock(&vmap_area_lock); BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); ret = kasan_populate_vmalloc(addr, size); if (ret) { free_vmap_area(va); return ERR_PTR(ret); } return va; overflow: if (!purged) { reclaim_and_purge_vmap_areas(); purged = 1; goto retry; } freed = 0; blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); if (freed > 0) { purged = 0; goto retry; } if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", size); kmem_cache_free(vmap_area_cachep, va); return ERR_PTR(-EBUSY); } int register_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); int unregister_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* * Serialize vmap purging. There is no actual critical section protected * by this lock, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * Purges all lazily-freed vmap areas. */ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) { unsigned long resched_threshold; unsigned int num_purged_areas = 0; struct list_head local_purge_list; struct vmap_area *va, *n_va; lockdep_assert_held(&vmap_purge_lock); spin_lock(&purge_vmap_area_lock); purge_vmap_area_root = RB_ROOT; list_replace_init(&purge_vmap_area_list, &local_purge_list); spin_unlock(&purge_vmap_area_lock); if (unlikely(list_empty(&local_purge_list))) goto out; start = min(start, list_first_entry(&local_purge_list, struct vmap_area, list)->va_start); end = max(end, list_last_entry(&local_purge_list, struct vmap_area, list)->va_end); flush_tlb_kernel_range(start, end); resched_threshold = lazy_max_pages() << 1; spin_lock(&free_vmap_area_lock); list_for_each_entry_safe(va, n_va, &local_purge_list, list) { unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; unsigned long orig_start = va->va_start; unsigned long orig_end = va->va_end; /* * Finally insert or merge lazily-freed area. It is * detached and there is no need to "unlink" it from * anything. */ va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); if (!va) continue; if (is_vmalloc_or_module_addr((void *)orig_start)) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); atomic_long_sub(nr, &vmap_lazy_nr); num_purged_areas++; if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) cond_resched_lock(&free_vmap_area_lock); } spin_unlock(&free_vmap_area_lock); out: trace_purge_vmap_area_lazy(start, end, num_purged_areas); return num_purged_areas > 0; } /* * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. */ static void reclaim_and_purge_vmap_areas(void) { mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); __purge_vmap_area_lazy(ULONG_MAX, 0); mutex_unlock(&vmap_purge_lock); } static void drain_vmap_area_work(struct work_struct *work) { unsigned long nr_lazy; do { mutex_lock(&vmap_purge_lock); __purge_vmap_area_lazy(ULONG_MAX, 0); mutex_unlock(&vmap_purge_lock); /* Recheck if further work is required. */ nr_lazy = atomic_long_read(&vmap_lazy_nr); } while (nr_lazy > lazy_max_pages()); } /* * Free a vmap area, caller ensuring that the area has been unmapped, * unlinked and flush_cache_vunmap had been called for the correct * range previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { unsigned long nr_lazy_max = lazy_max_pages(); unsigned long va_start = va->va_start; unsigned long nr_lazy; if (WARN_ON_ONCE(!list_empty(&va->list))) return; nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); /* * Merge or place it to the purge tree/list. */ spin_lock(&purge_vmap_area_lock); merge_or_add_vmap_area(va, &purge_vmap_area_root, &purge_vmap_area_list); spin_unlock(&purge_vmap_area_lock); trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); /* After this point, we may free va at any time */ if (unlikely(nr_lazy > nr_lazy_max)) schedule_work(&drain_vmap_work); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); vunmap_range_noflush(va->va_start, va->va_end); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); free_vmap_area_noflush(va); } struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr, &vmap_area_root); spin_unlock(&vmap_area_lock); return va; } static struct vmap_area *find_unlink_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr, &vmap_area_root); if (va) unlink_va(va, &vmap_area_root); spin_unlock(&vmap_area_lock); return va; } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) /* * Purge threshold to prevent overeager purging of fragmented blocks for * regular operations: Purge if vb->free is less than 1/4 of the capacity. */ #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ #define VMAP_FLAGS_MASK 0x3 struct vmap_block_queue { spinlock_t lock; struct list_head free; /* * An xarray requires an extra memory dynamically to * be allocated. If it is an issue, we can use rb-tree * instead. */ struct xarray vmap_blocks; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; unsigned long free, dirty; DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); unsigned long dirty_min, dirty_max; /*< dirty range */ struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * In order to fast access to any "vmap_block" associated with a * specific address, we use a hash. * * A per-cpu vmap_block_queue is used in both ways, to serialize * an access to free block chains among CPUs(alloc path) and it * also acts as a vmap_block hash(alloc/free paths). It means we * overload it, since we already have the per-cpu array which is * used as a hash table. When used as a hash a 'cpu' passed to * per_cpu() is not actually a CPU but rather a hash index. * * A hash function is addr_to_vb_xa() which hashes any address * to a specific index(in a hash) it belongs to. This then uses a * per_cpu() macro to access an array with generated index. * * An example: * * CPU_1 CPU_2 CPU_0 * | | | * V V V * 0 10 20 30 40 50 60 * |------|------|------|------|------|------|...<vmap address space> * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 * * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; * * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; * * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. * * This technique almost always avoids lock contention on insert/remove, * however xarray spinlocks protect against any contention that remains. */ static struct xarray * addr_to_vb_xa(unsigned long addr) { int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); return &per_cpu(vmap_block_queue, index).vmap_blocks; } /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) { unsigned long addr; addr = va_start + (pages_off << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); return (void *)addr; } /** * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this * block. Of course pages number can't exceed VMAP_BBMAP_BITS * @order: how many 2^order pages should be occupied in newly allocated block * @gfp_mask: flags for the page level allocator * * Return: virtual address in a newly allocated block or ERR_PTR(-errno) */ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; struct xarray *xa; unsigned long vb_idx; int node, err; void *vaddr; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask, VMAP_RAM|VMAP_BLOCK); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } vaddr = vmap_block_vaddr(va->va_start, 0); spin_lock_init(&vb->lock); vb->va = va; /* At least something should be left free */ BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); vb->free = VMAP_BBMAP_BITS - (1UL << order); vb->dirty = 0; vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; bitmap_set(vb->used_map, 0, (1UL << order)); INIT_LIST_HEAD(&vb->free_list); xa = addr_to_vb_xa(va->va_start); vb_idx = addr_to_vb_idx(va->va_start); err = xa_insert(xa, vb_idx, vb, gfp_mask); if (err) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } vbq = raw_cpu_ptr(&vmap_block_queue); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); return vaddr; } static void free_vmap_block(struct vmap_block *vb) { struct vmap_block *tmp; struct xarray *xa; xa = addr_to_vb_xa(vb->va->va_start); tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); BUG_ON(tmp != vb); spin_lock(&vmap_area_lock); unlink_va(vb->va, &vmap_area_root); spin_unlock(&vmap_area_lock); free_vmap_area_noflush(vb->va); kfree_rcu(vb, rcu_head); } static bool purge_fragmented_block(struct vmap_block *vb, struct vmap_block_queue *vbq, struct list_head *purge_list, bool force_purge) { if (vb->free + vb->dirty != VMAP_BBMAP_BITS || vb->dirty == VMAP_BBMAP_BITS) return false; /* Don't overeagerly purge usable blocks unless requested */ if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) return false; /* prevent further allocs after releasing lock */ WRITE_ONCE(vb->free, 0); /* prevent purging it again */ WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); list_add_tail(&vb->purge, purge_list); return true; } static void free_purged_blocks(struct list_head *purge_list) { struct vmap_block *vb, *n_vb; list_for_each_entry_safe(vb, n_vb, purge_list, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long free = READ_ONCE(vb->free); unsigned long dirty = READ_ONCE(vb->dirty); if (free + dirty != VMAP_BBMAP_BITS || dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); purge_fragmented_block(vb, vbq, &purge, true); spin_unlock(&vb->lock); } rcu_read_unlock(); free_purged_blocks(&purge); } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; void *vaddr = NULL; unsigned int order; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); if (WARN_ON(size == 0)) { /* * Allocating 0 bytes isn't what caller wants since * get_order(0) returns funny result. Just warn and terminate * early. */ return NULL; } order = get_order(size); rcu_read_lock(); vbq = raw_cpu_ptr(&vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; if (READ_ONCE(vb->free) < (1UL << order)) continue; spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); continue; } pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; } rcu_read_unlock(); /* Allocate new block if nothing was found */ if (!vaddr) vaddr = new_vmap_block(order, gfp_mask); return vaddr; } static void vb_free(unsigned long addr, unsigned long size) { unsigned long offset; unsigned int order; struct vmap_block *vb; struct xarray *xa; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap(addr, addr + size); order = get_order(size); offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; xa = addr_to_vb_xa(addr); vb = xa_load(xa, addr_to_vb_idx(addr)); spin_lock(&vb->lock); bitmap_clear(vb->used_map, offset, (1UL << order)); spin_unlock(&vb->lock); vunmap_range_noflush(addr, addr + size); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(addr, addr + size); spin_lock(&vb->lock); /* Expand the not yet TLB flushed dirty range */ vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { LIST_HEAD(purge_list); int cpu; if (unlikely(!vmap_initialized)) return; mutex_lock(&vmap_purge_lock); for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; unsigned long idx; rcu_read_lock(); xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); /* * Try to purge a fragmented block first. If it's * not purgeable, check whether there is dirty * space to be flushed. */ if (!purge_fragmented_block(vb, vbq, &purge_list, false) && vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; s = va_start + (vb->dirty_min << PAGE_SHIFT); e = va_start + (vb->dirty_max << PAGE_SHIFT); start = min(s, start); end = max(e, end); /* Prevent that this is flushed again */ vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; flush = 1; } spin_unlock(&vb->lock); } rcu_read_unlock(); } free_purged_blocks(&purge_list); if (!__purge_vmap_area_lazy(start, end) && flush) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { unsigned long start = ULONG_MAX, end = 0; int flush = 0; _vm_unmap_aliases(start, end, flush); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr = (unsigned long)kasan_reset_tag(mem); struct vmap_area *va; might_sleep(); BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(!PAGE_ALIGNED(addr)); kasan_poison_vmalloc(mem, size); if (likely(count <= VMAP_MAX_ALLOC)) { debug_check_no_locks_freed(mem, size); vb_free(addr, size); return; } va = find_unlink_vmap_area(addr); if (WARN_ON_ONCE(!va)) return; debug_check_no_locks_freed((void *)va->va_start, (va->va_end - va->va_start)); free_unmap_vmap_area(va); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * * If you use this function for less than VMAP_MAX_ALLOC pages, it could be * faster than vmap so it's good. But if you mix long-life and short-life * objects with vm_map_ram(), it could consume lots of address space through * fragmentation (especially on a 32bit machine). You could see failures in * the end. Please use this function for short-lived objects. * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL, VMAP_RAM); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { vm_unmap_ram(mem, count); return NULL; } /* * Mark the pages as accessible, now that they are mapped. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); return mem; } EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; static inline unsigned int vm_area_page_order(struct vm_struct *vm) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC return vm->page_order; #else return 0; #endif } static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC vm->page_order = order; #else BUG_ON(order != 0); #endif } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add * * This function is used to add fixed kernel vm area to vmlist before * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags * should contain proper values and the other fields should be zero. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_add_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { BUG_ON(tmp->addr < vm->addr + vm->size); break; } else BUG_ON(tmp->addr + tmp->size > vm->addr); } vm->next = *p; *p = vm; } /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { unsigned long addr = ALIGN(VMALLOC_START, align); struct vm_struct *cur, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { if ((unsigned long)cur->addr - addr >= vm->size) break; addr = ALIGN((unsigned long)cur->addr + cur->size, align); } BUG_ON(addr > VMALLOC_END - vm->size); vm->addr = (void *)addr; vm->next = *p; *p = vm; kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } static void vmap_init_free_space(void) { unsigned long vmap_start = 1; const unsigned long vmap_end = ULONG_MAX; struct vmap_area *busy, *free; /* * B F B B B F * -|-----|.....|-----|-----|-----|.....|- * | The KVA space | * |<--------------------------------->| */ list_for_each_entry(busy, &vmap_area_list, list) { if (busy->va_start - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = busy->va_start; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } vmap_start = busy->va_end; } if (vmap_end - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = vmap_end; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } } static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = va->va_end - va->va_start; vm->caller = caller; va->vm = vm; } static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { spin_lock(&vmap_area_lock); setup_vmalloc_vm_locked(vm, va, flags, caller); spin_unlock(&vmap_area_lock); } static void clear_vm_uninitialized_flag(struct vm_struct *vm) { /* * Before removing VM_UNINITIALIZED, * we should make sure that vm has proper values. * Pair with smp_rmb() in show_numa_info(). */ smp_wmb(); vm->flags &= ~VM_UNINITIALIZED; } static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long shift, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; unsigned long requested_size = size; BUG_ON(in_interrupt()); size = ALIGN(size, 1ul << shift); if (unlikely(!size)) return NULL; if (flags & VM_IOREMAP) align = 1ul << clamp_t(int, get_count_order_long(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; if (!(flags & VM_NO_GUARD)) size += PAGE_SIZE; va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0); if (IS_ERR(va)) { kfree(area); return NULL; } setup_vmalloc_vm(area, va, flags, caller); /* * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a * best-effort approach, as they can be mapped outside of vmalloc code. * For VM_ALLOC mappings, the pages are marked as accessible after * getting mapped in __vmalloc_node_range(). * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ if (!(flags & VM_ALLOC)) area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, KASAN_VMALLOC_PROT_NORMAL); return area; } struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * find_vm_area - find a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and return it. * It is up to the caller to do all required locking to keep the returned * pointer valid. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (!va) return NULL; return va->vm; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; struct vm_struct *vm; might_sleep(); if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", addr)) return NULL; va = find_unlink_vmap_area((unsigned long)addr); if (!va || !va->vm) return NULL; vm = va->vm; debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); kasan_free_module_shadow(vm); kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); free_unmap_vmap_area(va); return vm; } static inline void set_area_direct_map(const struct vm_struct *area, int (*set_direct_map)(struct page *page)) { int i; /* HUGE_VMALLOC passes small pages to set_direct_map */ for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) set_direct_map(area->pages[i]); } /* * Flush the vm mapping and reset the direct map. */ static void vm_reset_perms(struct vm_struct *area) { unsigned long start = ULONG_MAX, end = 0; unsigned int page_order = vm_area_page_order(area); int flush_dmap = 0; int i; /* * Find the start and end range of the direct mappings to make sure that * the vm_unmap_aliases() flush includes the direct map. */ for (i = 0; i < area->nr_pages; i += 1U << page_order) { unsigned long addr = (unsigned long)page_address(area->pages[i]); if (addr) { unsigned long page_size; page_size = PAGE_SIZE << page_order; start = min(addr, start); end = max(addr + page_size, end); flush_dmap = 1; } } /* * Set direct map to something invalid so that it won't be cached if * there are any accesses after the TLB flush, then flush the TLB and * reset the direct map permissions to the default. */ set_area_direct_map(area, set_direct_map_invalid_noflush); _vm_unmap_aliases(start, end, flush_dmap); set_area_direct_map(area, set_direct_map_default_noflush); } static void delayed_vfree_work(struct work_struct *w) { struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); struct llist_node *t, *llnode; llist_for_each_safe(llnode, t, llist_del_all(&p->list)) vfree(llnode); } /** * vfree_atomic - release memory allocated by vmalloc() * @addr: memory base address * * This one is just like vfree() but can be called in any atomic context * except NMIs. */ void vfree_atomic(const void *addr) { struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); BUG_ON(in_nmi()); kmemleak_free(addr); /* * Use raw_cpu_ptr() because this can be called from preemptible * context. Preemption is absolutely fine here, because the llist_add() * implementation is lockless, so it works even if we are adding to * another cpu's list. schedule_work() should be fine with this too. */ if (addr && llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); } /** * vfree - Release memory allocated by vmalloc() * @addr: Memory base address * * Free the virtually continuous memory area starting at @addr, as obtained * from one of the vmalloc() family of APIs. This will usually also free the * physical memory underlying the virtual allocation, but that memory is * reference counted, so it will not be freed until the last user goes away. * * If @addr is NULL, no operation is performed. * * Context: * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { struct vm_struct *vm; int i; if (unlikely(in_interrupt())) { vfree_atomic(addr); return; } BUG_ON(in_nmi()); kmemleak_free(addr); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) vm_reset_perms(vm); for (i = 0; i < vm->nr_pages; i++) { struct page *page = vm->pages[i]; BUG_ON(!page); mod_memcg_page_state(page, MEMCG_VMALLOC, -1); /* * High-order allocs for huge vmallocs are split, so * can be freed as an array of order-0 allocations */ __free_page(page); cond_resched(); } atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); kvfree(vm->pages); kfree(vm); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { struct vm_struct *vm; BUG_ON(in_interrupt()); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", addr); return; } kfree(vm); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual space. * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself * (which must be kmalloc or vmalloc memory) and one reference per pages in it * are transferred from the caller to vmap(), and will be freed / dropped when * vfree() is called on the return value. * * Return: the address of the area or %NULL on failure */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; unsigned long addr; unsigned long size; /* In bytes */ might_sleep(); if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) return NULL; /* * Your top guard is someone else's bottom guard. Not having a top * guard compromises someone else's mappings too. */ if (WARN_ON_ONCE(flags & VM_NO_GUARD)) flags &= ~VM_NO_GUARD; if (count > totalram_pages()) return NULL; size = (unsigned long)count << PAGE_SHIFT; area = get_vm_area_caller(size, flags, __builtin_return_address(0)); if (!area) return NULL; addr = (unsigned long)area->addr; if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), pages, PAGE_SHIFT) < 0) { vunmap(area->addr); return NULL; } if (flags & VM_MAP_PUT_PAGES) { area->pages = pages; area->nr_pages = count; } return area->addr; } EXPORT_SYMBOL(vmap); #ifdef CONFIG_VMAP_PFN struct vmap_pfn_data { unsigned long *pfns; pgprot_t prot; unsigned int idx; }; static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) { struct vmap_pfn_data *data = private; unsigned long pfn = data->pfns[data->idx]; pte_t ptent; if (WARN_ON_ONCE(pfn_valid(pfn))) return -EINVAL; ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); set_pte_at(&init_mm, addr, pte, ptent); data->idx++; return 0; } /** * vmap_pfn - map an array of PFNs into virtually contiguous space * @pfns: array of PFNs * @count: number of pages to map * @prot: page protection for the mapping * * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns * the start address of the mapping. */ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) { struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; struct vm_struct *area; area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, __builtin_return_address(0)); if (!area) return NULL; if (apply_to_page_range(&init_mm, (unsigned long)area->addr, count * PAGE_SIZE, vmap_pfn_apply, &data)) { free_vm_area(area); return NULL; } flush_cache_vmap((unsigned long)area->addr, (unsigned long)area->addr + count * PAGE_SIZE); return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ static inline unsigned int vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int order, unsigned int nr_pages, struct page **pages) { unsigned int nr_allocated = 0; gfp_t alloc_gfp = gfp; bool nofail = false; struct page *page; int i; /* * For order-0 pages we make use of bulk allocator, if * the page array is partly or not at all populated due * to fails, fallback to a single page allocator that is * more permissive. */ if (!order) { /* bulk allocator doesn't support nofail req. officially */ gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; while (nr_allocated < nr_pages) { unsigned int nr, nr_pages_request; /* * A maximum allowed request is hard-coded and is 100 * pages per call. That is done in order to prevent a * long preemption off scenario in the bulk-allocator * so the range is [1:100]. */ nr_pages_request = min(100U, nr_pages - nr_allocated); /* memory allocation should consider mempolicy, we can't * wrongly use nearest node when nid == NUMA_NO_NODE, * otherwise memory may be allocated in only one node, * but mempolicy wants to alloc memory by interleaving. */ if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, nr_pages_request, pages + nr_allocated); else nr = alloc_pages_bulk_array_node(bulk_gfp, nid, nr_pages_request, pages + nr_allocated); nr_allocated += nr; cond_resched(); /* * If zero or pages were obtained partly, * fallback to a single page allocator. */ if (nr != nr_pages_request) break; } } else if (gfp & __GFP_NOFAIL) { /* * Higher order nofail allocations are really expensive and * potentially dangerous (pre-mature OOM, disruptive reclaim * and compaction etc. */ alloc_gfp &= ~__GFP_NOFAIL; nofail = true; } /* High-order pages or fallback path if "bulk" fails. */ while (nr_allocated < nr_pages) { if (fatal_signal_pending(current)) break; if (nid == NUMA_NO_NODE) page = alloc_pages(alloc_gfp, order); else page = alloc_pages_node(nid, alloc_gfp, order); if (unlikely(!page)) { if (!nofail) break; /* fall back to the zero order allocations */ alloc_gfp |= __GFP_NOFAIL; order = 0; continue; } /* * Higher order allocations must be able to be treated as * indepdenent small pages by callers (as they can with * small-page vmallocs). Some drivers do their own refcounting * on vmalloc_to_page() pages, some use page->mapping, * page->lru, etc. */ if (order) split_page(page, order); /* * Careful, we allocate and map page-order pages, but * tracking is done per PAGE_SIZE page so as to keep the * vm_struct APIs independent of the physical/mapped size. */ for (i = 0; i < (1U << order); i++) pages[nr_allocated + i] = page + i; cond_resched(); nr_allocated += 1U << order; } return nr_allocated; } static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node) { const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; bool nofail = gfp_mask & __GFP_NOFAIL; unsigned long addr = (unsigned long)area->addr; unsigned long size = get_vm_area_size(area); unsigned long array_size; unsigned int nr_small_pages = size >> PAGE_SHIFT; unsigned int page_order; unsigned int flags; int ret; array_size = (unsigned long)nr_small_pages * sizeof(struct page *); if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) gfp_mask |= __GFP_HIGHMEM; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, area->caller); } else { area->pages = kmalloc_node(array_size, nested_gfp, node); } if (!area->pages) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocated page array size %lu", nr_small_pages * PAGE_SIZE, array_size); free_vm_area(area); return NULL; } set_vm_area_page_order(area, page_shift - PAGE_SHIFT); page_order = vm_area_page_order(area); area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, node, page_order, nr_small_pages, area->pages); atomic_long_add(area->nr_pages, &nr_vmalloc_pages); if (gfp_mask & __GFP_ACCOUNT) { int i; for (i = 0; i < area->nr_pages; i++) mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); } /* * If not enough pages were obtained to accomplish an * allocation request, free them via vfree() if any. */ if (area->nr_pages != nr_small_pages) { /* * vm_area_alloc_pages() can fail due to insufficient memory but * also:- * * - a pending fatal signal * - insufficient huge page-order pages * * Since we always retry allocations at order-0 in the huge page * case a warning for either is spurious. */ if (!fatal_signal_pending(current) && page_order == 0) warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocate pages", area->nr_pages * PAGE_SIZE); goto fail; } /* * page tables allocations ignore external gfp mask, enforce it * by the scope API */ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) flags = memalloc_nofs_save(); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) flags = memalloc_noio_save(); do { ret = vmap_pages_range(addr, addr + size, prot, area->pages, page_shift); if (nofail && (ret < 0)) schedule_timeout_uninterruptible(1); } while (nofail && (ret < 0)); if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) memalloc_nofs_restore(flags); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) memalloc_noio_restore(flags); if (ret < 0) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to map pages", area->nr_pages * PAGE_SIZE); goto fail; } return area->addr; fail: vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Please note that the full set of gfp * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all * supported. * Zone modifiers are not supported. From the reclaim modifiers * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and * __GFP_RETRY_MAYFAIL are not supported). * * __GFP_NOWARN can be used to suppress failures messages. * * Map them into contiguous kernel virtual space, using a pagetable * protection of @prot. * * Return: the address of the area or %NULL on failure */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { struct vm_struct *area; void *ret; kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; unsigned long real_size = size; unsigned long real_align = align; unsigned int shift = PAGE_SHIFT; if (WARN_ON_ONCE(!size)) return NULL; if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, exceeds total pages", real_size); return NULL; } if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { unsigned long size_per_node; /* * Try huge pages. Only try for PAGE_KERNEL allocations, * others like modules don't yet expect huge pages in * their allocations due to apply_to_page_range not * supporting them. */ size_per_node = size; if (node == NUMA_NO_NODE) size_per_node /= num_online_nodes(); if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) shift = PMD_SHIFT; else shift = arch_vmap_pte_supported_shift(size_per_node); align = max(real_align, 1UL << shift); size = ALIGN(real_size, 1UL << shift); } again: area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) { bool nofail = gfp_mask & __GFP_NOFAIL; warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, vm_struct allocation failed%s", real_size, (nofail) ? ". Retrying." : ""); if (nofail) { schedule_timeout_uninterruptible(1); goto again; } goto fail; } /* * Prepare arguments for __vmalloc_area_node() and * kasan_unpoison_vmalloc(). */ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { if (kasan_hw_tags_enabled()) { /* * Modify protection bits to allow tagging. * This must be done before mapping. */ prot = arch_vmap_pgprot_tagged(prot); /* * Skip page_alloc poisoning and zeroing for physical * pages backing VM_ALLOC mapping. Memory is instead * poisoned and zeroed by kasan_unpoison_vmalloc(). */ gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; } /* Take note that the mapping is PAGE_KERNEL. */ kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; } /* Allocate physical pages and map them into vmalloc space. */ ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!ret) goto fail; /* * Mark the pages as accessible, now that they are mapped. * The condition for setting KASAN_VMALLOC_INIT should complement the * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check * to make sure that memory is initialized under the same conditions. * Tag-based KASAN modes only assign tags to normal non-executable * allocations, see __kasan_unpoison_vmalloc(). */ kasan_flags |= KASAN_VMALLOC_VM_ALLOC; if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && (gfp_mask & __GFP_SKIP_ZERO)) kasan_flags |= KASAN_VMALLOC_INIT; /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED * flag. It means that vm_struct is not fully initialized. * Now, it is fully initialized, so remove this flag here. */ clear_vm_uninitialized_flag(area); size = PAGE_ALIGN(size); if (!(vm_flags & VM_DEFER_KMEMLEAK)) kmemleak_vmalloc(area, size, gfp_mask); return area->addr; fail: if (shift > PAGE_SHIFT) { shift = PAGE_SHIFT; align = real_align; size = real_size; goto again; } return NULL; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level allocator with * @gfp_mask flags. Map them into contiguous kernel virtual space. * * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * and __GFP_NOFAIL are not supported * * Any use of gfp flags outside of GFP_KERNEL should be consulted * with mm people. * * Return: pointer to the allocated memory or %NULL on error */ void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, 0, node, caller); } /* * This is only for performance analysis of vmalloc and stress purpose. * It is required by vmalloc test module, therefore do not use it other * than that. */ #ifdef CONFIG_TEST_VMALLOC_MODULE EXPORT_SYMBOL_GPL(__vmalloc_node); #endif void *__vmalloc(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc); /** * vmalloc_huge - allocate virtually contiguous memory, allow huge pages * @size: allocation size * @gfp_mask: flags for the page level allocator * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * If @size is greater than or equal to PMD_SIZE, allow using * huge pages for the memory * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(vmalloc_huge); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_user(unsigned long size) { return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_user); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc_node); #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) #else /* * 64b systems should always have either DMA or DMA32 zones. For others * GFP_DMA32 should do the right thing and use the normal zone. */ #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32(unsigned long size) { return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32_user(unsigned long size) { return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32_user); /* * Atomically zero bytes in the iterator. * * Returns the number of zeroed bytes. */ static size_t zero_iter(struct iov_iter *iter, size_t count) { size_t remains = count; while (remains > 0) { size_t num, copied; num = min_t(size_t, remains, PAGE_SIZE); copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); remains -= copied; if (copied < num) break; } return count - remains; } /* * small helper routine, copy contents to iter from addr. * If the page is not present, fill zero. * * Returns the number of copied bytes. */ static size_t aligned_vread_iter(struct iov_iter *iter, const char *addr, size_t count) { size_t remains = count; struct page *page; while (remains > 0) { unsigned long offset, length; size_t copied = 0; offset = offset_in_page(addr); length = PAGE_SIZE - offset; if (length > remains) length = remains; page = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need lock. But * adding lock here means that we need to add overhead of * vmalloc()/vfree() calls for this _debug_ interface, rarely * used. Instead of that, we'll use an local mapping via * copy_page_to_iter_nofault() and accept a small overhead in * this access function. */ if (page) copied = copy_page_to_iter_nofault(page, offset, length, iter); else copied = zero_iter(iter, length); addr += copied; remains -= copied; if (copied != length) break; } return count - remains; } /* * Read from a vm_map_ram region of memory. * * Returns the number of copied bytes. */ static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, size_t count, unsigned long flags) { char *start; struct vmap_block *vb; struct xarray *xa; unsigned long offset; unsigned int rs, re; size_t remains, n; /* * If it's area created by vm_map_ram() interface directly, but * not further subdividing and delegating management to vmap_block, * handle it here. */ if (!(flags & VMAP_BLOCK)) return aligned_vread_iter(iter, addr, count); remains = count; /* * Area is split into regions and tracked with vmap_block, read out * each region and zero fill the hole between regions. */ xa = addr_to_vb_xa((unsigned long) addr); vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); if (!vb) goto finished_zero; spin_lock(&vb->lock); if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { spin_unlock(&vb->lock); goto finished_zero; } for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { size_t copied; if (remains == 0) goto finished; start = vmap_block_vaddr(vb->va->va_start, rs); if (addr < start) { size_t to_zero = min_t(size_t, start - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } /*it could start reading from the middle of used region*/ offset = offset_in_page(addr); n = ((re - rs + 1) << PAGE_SHIFT) - offset; if (n > remains) n = remains; copied = aligned_vread_iter(iter, start + offset, n); addr += copied; remains -= copied; if (copied != n) goto finished; } spin_unlock(&vb->lock); finished_zero: /* zero-fill the left dirty or free regions */ return count - remains + zero_iter(iter, remains); finished: /* We couldn't copy/zero everything */ spin_unlock(&vb->lock); return count - remains; } /** * vread_iter() - read vmalloc area in a safe way to an iterator. * @iter: the iterator to which data should be written. * @addr: vm address. * @count: number of bytes to be read. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. @buf should be kernel's buffer. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any information, as /proc/kcore. * * Return: number of bytes for which addr and buf should be increased * (same number as @count) or %0 if [addr...addr+count) doesn't * include any intersection with valid vmalloc area */ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) { struct vmap_area *va; struct vm_struct *vm; char *vaddr; size_t n, size, flags, remains; addr = kasan_reset_tag(addr); /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; remains = count; spin_lock(&vmap_area_lock); va = find_vmap_area_exceed_addr((unsigned long)addr); if (!va) goto finished_zero; /* no intersects with alive vmap_area */ if ((unsigned long)addr + remains <= va->va_start) goto finished_zero; list_for_each_entry_from(va, &vmap_area_list, list) { size_t copied; if (remains == 0) goto finished; vm = va->vm; flags = va->flags & VMAP_FLAGS_MASK; /* * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need * be set together with VMAP_RAM. */ WARN_ON(flags == VMAP_BLOCK); if (!vm && !flags) continue; if (vm && (vm->flags & VM_UNINITIALIZED)) continue; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); vaddr = (char *) va->va_start; size = vm ? get_vm_area_size(vm) : va_size(va); if (addr >= vaddr + size) continue; if (addr < vaddr) { size_t to_zero = min_t(size_t, vaddr - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } n = vaddr + size - addr; if (n > remains) n = remains; if (flags & VMAP_RAM) copied = vmap_ram_vread_iter(iter, addr, n, flags); else if (!(vm->flags & VM_IOREMAP)) copied = aligned_vread_iter(iter, addr, n); else /* IOREMAP area is treated as memory hole */ copied = zero_iter(iter, n); addr += copied; remains -= copied; if (copied != n) goto finished; } finished_zero: spin_unlock(&vmap_area_lock); /* zero-fill memory holes */ return count - remains + zero_iter(iter, remains); finished: /* Nothing remains, or We couldn't copy/zero everything. */ spin_unlock(&vmap_area_lock); return count - remains; } /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure * * This function checks that @kaddr is a valid vmalloc'ed area, * and that it is big enough to cover the range starting at * @uaddr in @vma. Will return failure if that criteria isn't * met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long pgoff, unsigned long size) { struct vm_struct *area; unsigned long off; unsigned long end_index; if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) return -EINVAL; size = PAGE_ALIGN(size); if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) return -EINVAL; area = find_vm_area(kaddr); if (!area) return -EINVAL; if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) return -EINVAL; if (check_add_overflow(size, off, &end_index) || end_index > get_vm_area_size(area)) return -EINVAL; kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; kaddr += PAGE_SIZE; size -= PAGE_SIZE; } while (size > 0); vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); return 0; } /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return rb_entry_safe(n, struct vmap_area, rb_node); } /** * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to * @addr: target address * * Returns: vmap_area if it is found. If there is no such area * the first highest(reverse order) vmap_area is returned * i.e. va->va_start < addr && va->va_end < addr or NULL * if there are no any areas before @addr. */ static struct vmap_area * pvm_find_va_enclose_addr(unsigned long addr) { struct vmap_area *va, *tmp; struct rb_node *n; n = free_vmap_area_root.rb_node; va = NULL; while (n) { tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_start <= addr) { va = tmp; if (tmp->va_end >= addr) break; n = n->rb_right; } else { n = n->rb_left; } } return va; } /** * pvm_determine_end_from_reverse - find the highest aligned address * of free block below VMALLOC_END * @va: * in - the VA we start the search(reverse order); * out - the VA with the highest aligned end address. * @align: alignment for required highest address * * Returns: determined end address within vmap_area */ static unsigned long pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) { unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (likely(*va)) { list_for_each_entry_from_reverse((*va), &free_vmap_area_list, list) { addr = min((*va)->va_end & ~(align - 1), vmalloc_end); if ((*va)->va_start < addr) return addr; } } return 0; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans free blocks from the end looking * for matching base. While scanning, if any of the areas do not fit the * base address is pulled down to fit the area. Scanning is repeated till * all the areas fit and then all necessary data structures are inserted * and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *va; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end, orig_start, orig_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = area + 1; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; BUG_ON(start2 < end && start < end2); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&free_vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(vmalloc_end); base = pvm_determine_end_from_reverse(&va, align) - end; while (true) { /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) goto overflow; /* * Fitting base has not been found. */ if (va == NULL) goto overflow; /* * If required width exceeds current VA block, move * base downwards and then recheck. */ if (base + end > va->va_end) { base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * If this VA does not fit, move base downwards and recheck. */ if (base + start < va->va_start) { va = node_to_va(rb_prev(&va->rb_node)); base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(base + end); } /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { int ret; start = base + offsets[area]; size = sizes[area]; va = pvm_find_va_enclose_addr(start); if (WARN_ON_ONCE(va == NULL)) /* It is a BUG(), but trigger recovery instead. */ goto recovery; ret = adjust_va_to_fit_type(&free_vmap_area_root, &free_vmap_area_list, va, start, size); if (WARN_ON_ONCE(unlikely(ret))) /* It is a BUG(), but trigger recovery instead. */ goto recovery; /* Allocated area. */ va = vas[area]; va->va_start = start; va->va_end = start + size; } spin_unlock(&free_vmap_area_lock); /* populate the kasan shadow space */ for (area = 0; area < nr_vms; area++) { if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) goto err_free_shadow; } /* insert all vm's */ spin_lock(&vmap_area_lock); for (area = 0; area < nr_vms; area++) { insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); } spin_unlock(&vmap_area_lock); /* * Mark allocated areas as accessible. Do it now as a best-effort * approach, as they can be mapped outside of vmalloc code. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ for (area = 0; area < nr_vms; area++) vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); kfree(vas); return vms; recovery: /* * Remove previously allocated areas. There is no * need in removing these areas from the busy tree, * because they are inserted only on the final step * and when pcpu_get_vm_areas() is success. */ while (area--) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); vas[area] = NULL; } overflow: spin_unlock(&free_vmap_area_lock); if (!purged) { reclaim_and_purge_vmap_areas(); purged = true; /* Before "retry", check if we recover. */ for (area = 0; area < nr_vms; area++) { if (vas[area]) continue; vas[area] = kmem_cache_zalloc( vmap_area_cachep, GFP_KERNEL); if (!vas[area]) goto err_free; } goto retry; } err_free: for (area = 0; area < nr_vms; area++) { if (vas[area]) kmem_cache_free(vmap_area_cachep, vas[area]); kfree(vms[area]); } err_free2: kfree(vas); kfree(vms); return NULL; err_free_shadow: spin_lock(&free_vmap_area_lock); /* * We release all the vmalloc shadows, even the ones for regions that * hadn't been successfully added. This relies on kasan_release_vmalloc * being able to tolerate this case. */ for (area = 0; area < nr_vms; area++) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); vas[area] = NULL; kfree(vms[area]); } spin_unlock(&free_vmap_area_lock); kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PRINTK bool vmalloc_dump_obj(void *object) { void *objp = (void *)PAGE_ALIGN((unsigned long)object); const void *caller; struct vm_struct *vm; struct vmap_area *va; unsigned long addr; unsigned int nr_pages; if (!spin_trylock(&vmap_area_lock)) return false; va = __find_vmap_area((unsigned long)objp, &vmap_area_root); if (!va) { spin_unlock(&vmap_area_lock); return false; } vm = va->vm; if (!vm) { spin_unlock(&vmap_area_lock); return false; } addr = (unsigned long)vm->addr; caller = vm->caller; nr_pages = vm->nr_pages; spin_unlock(&vmap_area_lock); pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", nr_pages, addr, caller); return true; } #endif #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmap_purge_lock) __acquires(&vmap_area_lock) { mutex_lock(&vmap_purge_lock); spin_lock(&vmap_area_lock); return seq_list_start(&vmap_area_list, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &vmap_area_list, pos); } static void s_stop(struct seq_file *m, void *p) __releases(&vmap_area_lock) __releases(&vmap_purge_lock) { spin_unlock(&vmap_area_lock); mutex_unlock(&vmap_purge_lock); } static void show_numa_info(struct seq_file *m, struct vm_struct *v) { if (IS_ENABLED(CONFIG_NUMA)) { unsigned int nr, *counters = m->private; unsigned int step = 1U << vm_area_page_order(v); if (!counters) return; if (v->flags & VM_UNINITIALIZED) return; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr += step) counters[page_to_nid(v->pages[nr])] += step; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } } static void show_purge_info(struct seq_file *m) { struct vmap_area *va; spin_lock(&purge_vmap_area_lock); list_for_each_entry(va, &purge_vmap_area_list, list) { seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", (void *)va->va_start, (void *)va->va_end, va->va_end - va->va_start); } spin_unlock(&purge_vmap_area_lock); } static int s_show(struct seq_file *m, void *p) { struct vmap_area *va; struct vm_struct *v; va = list_entry(p, struct vmap_area, list); if (!va->vm) { if (va->flags & VMAP_RAM) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", (void *)va->va_start, (void *)va->va_end, va->va_end - va->va_start); goto final; } v = va->vm; seq_printf(m, "0x%pK-0x%pK %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%pa", &v->phys_addr); if (v->flags & VM_IOREMAP) seq_puts(m, " ioremap"); if (v->flags & VM_ALLOC) seq_puts(m, " vmalloc"); if (v->flags & VM_MAP) seq_puts(m, " vmap"); if (v->flags & VM_USERMAP) seq_puts(m, " user"); if (v->flags & VM_DMA_COHERENT) seq_puts(m, " dma-coherent"); if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); show_numa_info(m, v); seq_putc(m, '\n'); /* * As a final step, dump "unpurged" areas. */ final: if (list_is_last(&va->list, &vmap_area_list)) show_purge_info(m); return 0; } static const struct seq_operations vmalloc_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int __init proc_vmalloc_init(void) { if (IS_ENABLED(CONFIG_NUMA)) proc_create_seq_private("vmallocinfo", 0400, NULL, &vmalloc_op, nr_node_ids * sizeof(unsigned int), NULL); else proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); return 0; } module_init(proc_vmalloc_init); #endif void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; /* * Create the cache for vmap_area objects. */ vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); for_each_possible_cpu(i) { struct vmap_block_queue *vbq; struct vfree_deferred *p; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); p = &per_cpu(vfree_deferred, i); init_llist_head(&p->list); INIT_WORK(&p->wq, delayed_vfree_work); xa_init(&vbq->vmap_blocks); } /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (WARN_ON_ONCE(!va)) continue; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; insert_vmap_area(va, &vmap_area_root, &vmap_area_list); } /* * Now we can initialize a free vmap space. */ vmap_init_free_space(); vmap_initialized = true; }
linux-master
mm/vmalloc.c
#include <linux/device.h> #include <linux/dma-map-ops.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/module.h> #define NR_TESTS (100) struct dma_pool_pair { dma_addr_t dma; void *v; }; struct dmapool_parms { size_t size; size_t align; size_t boundary; }; static const struct dmapool_parms pool_parms[] = { { .size = 16, .align = 16, .boundary = 0 }, { .size = 64, .align = 64, .boundary = 0 }, { .size = 256, .align = 256, .boundary = 0 }, { .size = 1024, .align = 1024, .boundary = 0 }, { .size = 4096, .align = 4096, .boundary = 0 }, { .size = 68, .align = 32, .boundary = 4096 }, }; static struct dma_pool *pool; static struct device test_dev; static u64 dma_mask; static inline int nr_blocks(int size) { return clamp_t(int, (PAGE_SIZE / size) * 512, 1024, 8192); } static int dmapool_test_alloc(struct dma_pool_pair *p, int blocks) { int i; for (i = 0; i < blocks; i++) { p[i].v = dma_pool_alloc(pool, GFP_KERNEL, &p[i].dma); if (!p[i].v) goto pool_fail; } for (i = 0; i < blocks; i++) dma_pool_free(pool, p[i].v, p[i].dma); return 0; pool_fail: for (--i; i >= 0; i--) dma_pool_free(pool, p[i].v, p[i].dma); return -ENOMEM; } static int dmapool_test_block(const struct dmapool_parms *parms) { int blocks = nr_blocks(parms->size); ktime_t start_time, end_time; struct dma_pool_pair *p; int i, ret; p = kcalloc(blocks, sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; pool = dma_pool_create("test pool", &test_dev, parms->size, parms->align, parms->boundary); if (!pool) { ret = -ENOMEM; goto free_pairs; } start_time = ktime_get(); for (i = 0; i < NR_TESTS; i++) { ret = dmapool_test_alloc(p, blocks); if (ret) goto free_pool; if (need_resched()) cond_resched(); } end_time = ktime_get(); printk("dmapool test: size:%-4zu align:%-4zu blocks:%-4d time:%llu\n", parms->size, parms->align, blocks, ktime_us_delta(end_time, start_time)); free_pool: dma_pool_destroy(pool); free_pairs: kfree(p); return ret; } static void dmapool_test_release(struct device *dev) { } static int dmapool_checks(void) { int i, ret; ret = dev_set_name(&test_dev, "dmapool-test"); if (ret) return ret; ret = device_register(&test_dev); if (ret) { printk("%s: register failed:%d\n", __func__, ret); goto put_device; } test_dev.release = dmapool_test_release; set_dma_ops(&test_dev, NULL); test_dev.dma_mask = &dma_mask; ret = dma_set_mask_and_coherent(&test_dev, DMA_BIT_MASK(64)); if (ret) { printk("%s: mask failed:%d\n", __func__, ret); goto del_device; } for (i = 0; i < ARRAY_SIZE(pool_parms); i++) { ret = dmapool_test_block(&pool_parms[i]); if (ret) break; } del_device: device_del(&test_dev); put_device: put_device(&test_dev); return ret; } static void dmapool_exit(void) { } module_init(dmapool_checks); module_exit(dmapool_exit); MODULE_LICENSE("GPL");
linux-master
mm/dmapool_test.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Procedures for maintaining information about logical memory blocks. * * Peter Bergner, IBM Corp. June 2001. * Copyright (C) 2001 Peter Bergner. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/poison.h> #include <linux/pfn.h> #include <linux/debugfs.h> #include <linux/kmemleak.h> #include <linux/seq_file.h> #include <linux/memblock.h> #include <asm/sections.h> #include <linux/io.h> #include "internal.h" #define INIT_MEMBLOCK_REGIONS 128 #define INIT_PHYSMEM_REGIONS 4 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS #endif #ifndef INIT_MEMBLOCK_MEMORY_REGIONS #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS #endif /** * DOC: memblock overview * * Memblock is a method of managing memory regions during the early * boot period when the usual kernel memory allocators are not up and * running. * * Memblock views the system memory as collections of contiguous * regions. There are several types of these collections: * * * ``memory`` - describes the physical memory available to the * kernel; this may differ from the actual physical memory installed * in the system, for instance when the memory is restricted with * ``mem=`` command line parameter * * ``reserved`` - describes the regions that were allocated * * ``physmem`` - describes the actual physical memory available during * boot regardless of the possible restrictions and memory hot(un)plug; * the ``physmem`` type is only available on some architectures. * * Each region is represented by struct memblock_region that * defines the region extents, its attributes and NUMA node id on NUMA * systems. Every memory type is described by the struct memblock_type * which contains an array of memory regions along with * the allocator metadata. The "memory" and "reserved" types are nicely * wrapped with struct memblock. This structure is statically * initialized at build time. The region arrays are initially sized to * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. * The memblock_allow_resize() enables automatic resizing of the region * arrays during addition of new regions. This feature should be used * with care so that memory allocated for the region array will not * overlap with areas that should be reserved, for example initrd. * * The early architecture setup should tell memblock what the physical * memory layout is by using memblock_add() or memblock_add_node() * functions. The first function does not assign the region to a NUMA * node and it is appropriate for UMA systems. Yet, it is possible to * use it on NUMA systems as well and assign the region to a NUMA node * later in the setup process using memblock_set_node(). The * memblock_add_node() performs such an assignment directly. * * Once memblock is setup the memory can be allocated using one of the * API variants: * * * memblock_phys_alloc*() - these functions return the **physical** * address of the allocated memory * * memblock_alloc*() - these functions return the **virtual** address * of the allocated memory. * * Note, that both API variants use implicit assumptions about allowed * memory ranges and the fallback methods. Consult the documentation * of memblock_alloc_internal() and memblock_alloc_range_nid() * functions for more elaborate description. * * As the system boot progresses, the architecture specific mem_init() * function frees all the memory to the buddy page allocator. * * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the * memblock data structures (except "physmem") will be discarded after the * system initialization completes. */ #ifndef CONFIG_NUMA struct pglist_data __refdata contig_page_data; EXPORT_SYMBOL(contig_page_data); #endif unsigned long max_low_pfn; unsigned long min_low_pfn; unsigned long max_pfn; unsigned long long max_possible_pfn; static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; #endif struct memblock memblock __initdata_memblock = { .memory.regions = memblock_memory_init_regions, .memory.cnt = 1, /* empty dummy entry */ .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS, .memory.name = "memory", .reserved.regions = memblock_reserved_init_regions, .reserved.cnt = 1, /* empty dummy entry */ .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, .reserved.name = "reserved", .bottom_up = false, .current_limit = MEMBLOCK_ALLOC_ANYWHERE, }; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP struct memblock_type physmem = { .regions = memblock_physmem_init_regions, .cnt = 1, /* empty dummy entry */ .max = INIT_PHYSMEM_REGIONS, .name = "physmem", }; #endif /* * keep a pointer to &memblock.memory in the text section to use it in * __next_mem_range() and its helpers. * For architectures that do not keep memblock data after init, this * pointer will be reset to NULL at memblock_discard() */ static __refdata struct memblock_type *memblock_memory = &memblock.memory; #define for_each_memblock_type(i, memblock_type, rgn) \ for (i = 0, rgn = &memblock_type->regions[0]; \ i < memblock_type->cnt; \ i++, rgn = &memblock_type->regions[i]) #define memblock_dbg(fmt, ...) \ do { \ if (memblock_debug) \ pr_info(fmt, ##__VA_ARGS__); \ } while (0) static int memblock_debug __initdata_memblock; static bool system_has_some_mirror __initdata_memblock; static int memblock_can_resize __initdata_memblock; static int memblock_memory_in_slab __initdata_memblock; static int memblock_reserved_in_slab __initdata_memblock; bool __init_memblock memblock_has_mirror(void) { return system_has_some_mirror; } static enum memblock_flags __init_memblock choose_memblock_flags(void) { return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; } /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) { return *size = min(*size, PHYS_ADDR_MAX - base); } /* * Address comparison utilities */ static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2, phys_addr_t size2) { return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); } bool __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { unsigned long i; memblock_cap_size(base, &size); for (i = 0; i < type->cnt; i++) if (memblock_addrs_overlap(base, size, type->regions[i].base, type->regions[i].size)) break; return i < type->cnt; } /** * __memblock_find_range_bottom_up - find free area utility in bottom-up * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Utility called from memblock_find_in_range_node(), find free area bottom-up. * * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags) { phys_addr_t this_start, this_end, cand; u64 i; for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { this_start = clamp(this_start, start, end); this_end = clamp(this_end, start, end); cand = round_up(this_start, align); if (cand < this_end && this_end - cand >= size) return cand; } return 0; } /** * __memblock_find_range_top_down - find free area utility, in top-down * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Utility called from memblock_find_in_range_node(), find free area top-down. * * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags) { phys_addr_t this_start, this_end, cand; u64 i; for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, NULL) { this_start = clamp(this_start, start, end); this_end = clamp(this_end, start, end); if (this_end < size) continue; cand = round_down(this_end - size, align); if (cand >= this_start) return cand; } return 0; } /** * memblock_find_in_range_node - find free area in given range and node * @size: size of free area to find * @align: alignment of free area to find * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Find @size free area aligned to @align in the specified range and node. * * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid, enum memblock_flags flags) { /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE || end == MEMBLOCK_ALLOC_NOLEAKTRACE) end = memblock.current_limit; /* avoid allocating the first page */ start = max_t(phys_addr_t, start, PAGE_SIZE); end = max(start, end); if (memblock_bottom_up()) return __memblock_find_range_bottom_up(start, end, size, align, nid, flags); else return __memblock_find_range_top_down(start, end, size, align, nid, flags); } /** * memblock_find_in_range - find free area in given range * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * * Find @size free area aligned to @align in the specified range. * * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align) { phys_addr_t ret; enum memblock_flags flags = choose_memblock_flags(); again: ret = memblock_find_in_range_node(size, align, start, end, NUMA_NO_NODE, flags); if (!ret && (flags & MEMBLOCK_MIRROR)) { pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", &size); flags &= ~MEMBLOCK_MIRROR; goto again; } return ret; } static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) { type->total_size -= type->regions[r].size; memmove(&type->regions[r], &type->regions[r + 1], (type->cnt - (r + 1)) * sizeof(type->regions[r])); type->cnt--; /* Special case for empty arrays */ if (type->cnt == 0) { WARN_ON(type->total_size != 0); type->cnt = 1; type->regions[0].base = 0; type->regions[0].size = 0; type->regions[0].flags = 0; memblock_set_region_node(&type->regions[0], MAX_NUMNODES); } } #ifndef CONFIG_ARCH_KEEP_MEMBLOCK /** * memblock_discard - discard memory and reserved arrays if they were allocated */ void __init memblock_discard(void) { phys_addr_t addr, size; if (memblock.reserved.regions != memblock_reserved_init_regions) { addr = __pa(memblock.reserved.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.reserved.max); if (memblock_reserved_in_slab) kfree(memblock.reserved.regions); else memblock_free_late(addr, size); } if (memblock.memory.regions != memblock_memory_init_regions) { addr = __pa(memblock.memory.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.memory.max); if (memblock_memory_in_slab) kfree(memblock.memory.regions); else memblock_free_late(addr, size); } memblock_memory = NULL; } #endif /** * memblock_double_array - double the size of the memblock regions array * @type: memblock type of the regions array being doubled * @new_area_start: starting address of memory range to avoid overlap with * @new_area_size: size of memory range to avoid overlap with * * Double the size of the @type regions array. If memblock is being used to * allocate memory for a new reserved regions array and there is a previously * allocated memory range [@new_area_start, @new_area_start + @new_area_size] * waiting to be reserved, ensure the memory used by the new array does * not overlap. * * Return: * 0 on success, -1 on failure. */ static int __init_memblock memblock_double_array(struct memblock_type *type, phys_addr_t new_area_start, phys_addr_t new_area_size) { struct memblock_region *new_array, *old_array; phys_addr_t old_alloc_size, new_alloc_size; phys_addr_t old_size, new_size, addr, new_end; int use_slab = slab_is_available(); int *in_slab; /* We don't allow resizing until we know about the reserved regions * of memory that aren't suitable for allocation */ if (!memblock_can_resize) return -1; /* Calculate new doubled size */ old_size = type->max * sizeof(struct memblock_region); new_size = old_size << 1; /* * We need to allocated new one align to PAGE_SIZE, * so we can free them completely later. */ old_alloc_size = PAGE_ALIGN(old_size); new_alloc_size = PAGE_ALIGN(new_size); /* Retrieve the slab flag */ if (type == &memblock.memory) in_slab = &memblock_memory_in_slab; else in_slab = &memblock_reserved_in_slab; /* Try to find some space for it */ if (use_slab) { new_array = kmalloc(new_size, GFP_KERNEL); addr = new_array ? __pa(new_array) : 0; } else { /* only exclude range when trying to double reserved.regions */ if (type != &memblock.reserved) new_area_start = new_area_size = 0; addr = memblock_find_in_range(new_area_start + new_area_size, memblock.current_limit, new_alloc_size, PAGE_SIZE); if (!addr && new_area_size) addr = memblock_find_in_range(0, min(new_area_start, memblock.current_limit), new_alloc_size, PAGE_SIZE); new_array = addr ? __va(addr) : NULL; } if (!addr) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", type->name, type->max, type->max * 2); return -1; } new_end = addr + new_size - 1; memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", type->name, type->max * 2, &addr, &new_end); /* * Found space, we now need to move the array over before we add the * reserved region since it may be our reserved array itself that is * full. */ memcpy(new_array, type->regions, old_size); memset(new_array + type->max, 0, old_size); old_array = type->regions; type->regions = new_array; type->max <<= 1; /* Free old array. We needn't free it if the array is the static one */ if (*in_slab) kfree(old_array); else if (old_array != memblock_memory_init_regions && old_array != memblock_reserved_init_regions) memblock_free(old_array, old_alloc_size); /* * Reserve the new array if that comes from the memblock. Otherwise, we * needn't do it */ if (!use_slab) BUG_ON(memblock_reserve(addr, new_alloc_size)); /* Update slab flag */ *in_slab = use_slab; return 0; } /** * memblock_merge_regions - merge neighboring compatible regions * @type: memblock type to scan * @start_rgn: start scanning from (@start_rgn - 1) * @end_rgn: end scanning at (@end_rgn - 1) * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn) */ static void __init_memblock memblock_merge_regions(struct memblock_type *type, unsigned long start_rgn, unsigned long end_rgn) { int i = 0; if (start_rgn) i = start_rgn - 1; end_rgn = min(end_rgn, type->cnt - 1); while (i < end_rgn) { struct memblock_region *this = &type->regions[i]; struct memblock_region *next = &type->regions[i + 1]; if (this->base + this->size != next->base || memblock_get_region_node(this) != memblock_get_region_node(next) || this->flags != next->flags) { BUG_ON(this->base + this->size > next->base); i++; continue; } this->size += next->size; /* move forward from next + 1, index of which is i + 2 */ memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); type->cnt--; end_rgn--; } } /** * memblock_insert_region - insert new memblock region * @type: memblock type to insert into * @idx: index for the insertion point * @base: base address of the new region * @size: size of the new region * @nid: node id of the new region * @flags: flags of the new region * * Insert new memblock region [@base, @base + @size) into @type at @idx. * @type must already have extra room to accommodate the new region. */ static void __init_memblock memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags) { struct memblock_region *rgn = &type->regions[idx]; BUG_ON(type->cnt >= type->max); memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); rgn->base = base; rgn->size = size; rgn->flags = flags; memblock_set_region_node(rgn, nid); type->cnt++; type->total_size += size; } /** * memblock_add_range - add new memblock region * @type: memblock type to add new region into * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region * @flags: flags of the new region * * Add new memblock region [@base, @base + @size) into @type. The new region * is allowed to overlap with existing ones - overlaps don't affect already * existing regions. @type is guaranteed to be minimal (all neighbouring * compatible regions are merged) after the addition. * * Return: * 0 on success, -errno on failure. */ static int __init_memblock memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags) { bool insert = false; phys_addr_t obase = base; phys_addr_t end = base + memblock_cap_size(base, &size); int idx, nr_new, start_rgn = -1, end_rgn; struct memblock_region *rgn; if (!size) return 0; /* special case for empty array */ if (type->regions[0].size == 0) { WARN_ON(type->cnt != 1 || type->total_size); type->regions[0].base = base; type->regions[0].size = size; type->regions[0].flags = flags; memblock_set_region_node(&type->regions[0], nid); type->total_size = size; return 0; } /* * The worst case is when new range overlaps all existing regions, * then we'll need type->cnt + 1 empty regions in @type. So if * type->cnt * 2 + 1 is less than or equal to type->max, we know * that there is enough empty regions in @type, and we can insert * regions directly. */ if (type->cnt * 2 + 1 <= type->max) insert = true; repeat: /* * The following is executed twice. Once with %false @insert and * then with %true. The first counts the number of regions needed * to accommodate the new area. The second actually inserts them. */ base = obase; nr_new = 0; for_each_memblock_type(idx, type, rgn) { phys_addr_t rbase = rgn->base; phys_addr_t rend = rbase + rgn->size; if (rbase >= end) break; if (rend <= base) continue; /* * @rgn overlaps. If it separates the lower part of new * area, insert that portion. */ if (rbase > base) { #ifdef CONFIG_NUMA WARN_ON(nid != memblock_get_region_node(rgn)); #endif WARN_ON(flags != rgn->flags); nr_new++; if (insert) { if (start_rgn == -1) start_rgn = idx; end_rgn = idx + 1; memblock_insert_region(type, idx++, base, rbase - base, nid, flags); } } /* area below @rend is dealt with, forget about it */ base = min(rend, end); } /* insert the remaining portion */ if (base < end) { nr_new++; if (insert) { if (start_rgn == -1) start_rgn = idx; end_rgn = idx + 1; memblock_insert_region(type, idx, base, end - base, nid, flags); } } if (!nr_new) return 0; /* * If this was the first round, resize array and repeat for actual * insertions; otherwise, merge and return. */ if (!insert) { while (type->cnt + nr_new > type->max) if (memblock_double_array(type, obase, size) < 0) return -ENOMEM; insert = true; goto repeat; } else { memblock_merge_regions(type, start_rgn, end_rgn); return 0; } } /** * memblock_add_node - add new memblock region within a NUMA node * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region * @flags: flags of the new region * * Add new memblock region [@base, @base + @size) to the "memory" * type. See memblock_add_range() description for mode details * * Return: * 0 on success, -errno on failure. */ int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__, &base, &end, nid, flags, (void *)_RET_IP_); return memblock_add_range(&memblock.memory, base, size, nid, flags); } /** * memblock_add - add new memblock region * @base: base address of the new region * @size: size of the new region * * Add new memblock region [@base, @base + @size) to the "memory" * type. See memblock_add_range() description for mode details * * Return: * 0 on success, -errno on failure. */ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); } /** * memblock_isolate_range - isolate given range into disjoint memblocks * @type: memblock type to isolate range for * @base: base of range to isolate * @size: size of range to isolate * @start_rgn: out parameter for the start of isolated region * @end_rgn: out parameter for the end of isolated region * * Walk @type and ensure that regions don't cross the boundaries defined by * [@base, @base + @size). Crossing regions are split at the boundaries, * which may create at most two more regions. The index of the first * region inside the range is returned in *@start_rgn and end in *@end_rgn. * * Return: * 0 on success, -errno on failure. */ static int __init_memblock memblock_isolate_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, int *start_rgn, int *end_rgn) { phys_addr_t end = base + memblock_cap_size(base, &size); int idx; struct memblock_region *rgn; *start_rgn = *end_rgn = 0; if (!size) return 0; /* we'll create at most two more regions */ while (type->cnt + 2 > type->max) if (memblock_double_array(type, base, size) < 0) return -ENOMEM; for_each_memblock_type(idx, type, rgn) { phys_addr_t rbase = rgn->base; phys_addr_t rend = rbase + rgn->size; if (rbase >= end) break; if (rend <= base) continue; if (rbase < base) { /* * @rgn intersects from below. Split and continue * to process the next region - the new top half. */ rgn->base = base; rgn->size -= base - rbase; type->total_size -= base - rbase; memblock_insert_region(type, idx, rbase, base - rbase, memblock_get_region_node(rgn), rgn->flags); } else if (rend > end) { /* * @rgn intersects from above. Split and redo the * current region - the new bottom half. */ rgn->base = end; rgn->size -= end - rbase; type->total_size -= end - rbase; memblock_insert_region(type, idx--, rbase, end - rbase, memblock_get_region_node(rgn), rgn->flags); } else { /* @rgn is fully contained, record it */ if (!*end_rgn) *start_rgn = idx; *end_rgn = idx + 1; } } return 0; } static int __init_memblock memblock_remove_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size) { int start_rgn, end_rgn; int i, ret; ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret; for (i = end_rgn - 1; i >= start_rgn; i--) memblock_remove_region(type, i); return 0; } int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); return memblock_remove_range(&memblock.memory, base, size); } /** * memblock_free - free boot memory allocation * @ptr: starting address of the boot memory allocation * @size: size of the boot memory block in bytes * * Free boot memory block previously allocated by memblock_alloc_xx() API. * The freeing memory will not be released to the buddy allocator. */ void __init_memblock memblock_free(void *ptr, size_t size) { if (ptr) memblock_phys_free(__pa(ptr), size); } /** * memblock_phys_free - free boot memory block * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. * The freeing memory will not be released to the buddy allocator. */ int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); return memblock_remove_range(&memblock.reserved, base, size); } int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); } #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); } #endif /** * memblock_setclr_flag - set or clear flag for a memory region * @base: base address of the region * @size: size of the region * @set: set or clear the flag * @flag: the flag to update * * This function isolates region [@base, @base + @size), and sets/clears flag * * Return: 0 on success, -errno on failure. */ static int __init_memblock memblock_setclr_flag(phys_addr_t base, phys_addr_t size, int set, int flag) { struct memblock_type *type = &memblock.memory; int i, ret, start_rgn, end_rgn; ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret; for (i = start_rgn; i < end_rgn; i++) { struct memblock_region *r = &type->regions[i]; if (set) r->flags |= flag; else r->flags &= ~flag; } memblock_merge_regions(type, start_rgn, end_rgn); return 0; } /** * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) { return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); } /** * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) { return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); } /** * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { if (!mirrored_kernelcore) return 0; system_has_some_mirror = true; return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); } /** * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. * @base: the base phys addr of the region * @size: the size of the region * * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the * direct mapping of the physical memory. These regions will still be * covered by the memory map. The struct page representing NOMAP memory * frames in the memory map will be PageReserved() * * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from * memblock, the caller must inform kmemleak to ignore that memory * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) { return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); } /** * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) { return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); } static bool should_skip_region(struct memblock_type *type, struct memblock_region *m, int nid, int flags) { int m_nid = memblock_get_region_node(m); /* we never skip regions when iterating memblock.reserved or physmem */ if (type != memblock_memory) return false; /* only memory regions are associated with nodes, check it */ if (nid != NUMA_NO_NODE && nid != m_nid) return true; /* skip hotpluggable memory regions if needed */ if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && !(flags & MEMBLOCK_HOTPLUG)) return true; /* if we want mirror memory skip non-mirror memory regions */ if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) return true; /* skip nomap memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) return true; /* skip driver-managed memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) return true; return false; } /** * __next_mem_range - next function for for_each_free_mem_range() etc. * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * * Find the first area from *@idx which matches @nid, fill the out * parameters, and update *@idx for the next iteration. The lower 32bit of * *@idx contains index into type_a and the upper 32bit indexes the * areas before each region in type_b. For example, if type_b regions * look like the following, * * 0:[0-16), 1:[32-48), 2:[128-130) * * The upper 32bit indexes the following regions. * * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) * * As both region arrays are sorted, the function advances the two indices * in lockstep and returns each intersection. */ void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid) { int idx_a = *idx & 0xffffffff; int idx_b = *idx >> 32; if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; for (; idx_a < type_a->cnt; idx_a++) { struct memblock_region *m = &type_a->regions[idx_a]; phys_addr_t m_start = m->base; phys_addr_t m_end = m->base + m->size; int m_nid = memblock_get_region_node(m); if (should_skip_region(type_a, m, nid, flags)) continue; if (!type_b) { if (out_start) *out_start = m_start; if (out_end) *out_end = m_end; if (out_nid) *out_nid = m_nid; idx_a++; *idx = (u32)idx_a | (u64)idx_b << 32; return; } /* scan areas before each reservation */ for (; idx_b < type_b->cnt + 1; idx_b++) { struct memblock_region *r; phys_addr_t r_start; phys_addr_t r_end; r = &type_b->regions[idx_b]; r_start = idx_b ? r[-1].base + r[-1].size : 0; r_end = idx_b < type_b->cnt ? r->base : PHYS_ADDR_MAX; /* * if idx_b advanced past idx_a, * break out to advance idx_a */ if (r_start >= m_end) break; /* if the two regions intersect, we're done */ if (m_start < r_end) { if (out_start) *out_start = max(m_start, r_start); if (out_end) *out_end = min(m_end, r_end); if (out_nid) *out_nid = m_nid; /* * The region which ends first is * advanced for the next iteration. */ if (m_end <= r_end) idx_a++; else idx_b++; *idx = (u32)idx_a | (u64)idx_b << 32; return; } } } /* signal end of iteration */ *idx = ULLONG_MAX; } /** * __next_mem_range_rev - generic next function for for_each_*_range_rev() * * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * * Finds the next range from type_a which is not marked as unsuitable * in type_b. * * Reverse of __next_mem_range(). */ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid) { int idx_a = *idx & 0xffffffff; int idx_b = *idx >> 32; if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; if (*idx == (u64)ULLONG_MAX) { idx_a = type_a->cnt - 1; if (type_b != NULL) idx_b = type_b->cnt; else idx_b = 0; } for (; idx_a >= 0; idx_a--) { struct memblock_region *m = &type_a->regions[idx_a]; phys_addr_t m_start = m->base; phys_addr_t m_end = m->base + m->size; int m_nid = memblock_get_region_node(m); if (should_skip_region(type_a, m, nid, flags)) continue; if (!type_b) { if (out_start) *out_start = m_start; if (out_end) *out_end = m_end; if (out_nid) *out_nid = m_nid; idx_a--; *idx = (u32)idx_a | (u64)idx_b << 32; return; } /* scan areas before each reservation */ for (; idx_b >= 0; idx_b--) { struct memblock_region *r; phys_addr_t r_start; phys_addr_t r_end; r = &type_b->regions[idx_b]; r_start = idx_b ? r[-1].base + r[-1].size : 0; r_end = idx_b < type_b->cnt ? r->base : PHYS_ADDR_MAX; /* * if idx_b advanced past idx_a, * break out to advance idx_a */ if (r_end <= m_start) break; /* if the two regions intersect, we're done */ if (m_end > r_start) { if (out_start) *out_start = max(m_start, r_start); if (out_end) *out_end = min(m_end, r_end); if (out_nid) *out_nid = m_nid; if (m_start >= r_start) idx_a--; else idx_b--; *idx = (u32)idx_a | (u64)idx_b << 32; return; } } } /* signal end of iteration */ *idx = ULLONG_MAX; } /* * Common iterator interface used to define for_each_mem_pfn_range(). */ void __init_memblock __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, unsigned long *out_end_pfn, int *out_nid) { struct memblock_type *type = &memblock.memory; struct memblock_region *r; int r_nid; while (++*idx < type->cnt) { r = &type->regions[*idx]; r_nid = memblock_get_region_node(r); if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) continue; if (nid == MAX_NUMNODES || nid == r_nid) break; } if (*idx >= type->cnt) { *idx = -1; return; } if (out_start_pfn) *out_start_pfn = PFN_UP(r->base); if (out_end_pfn) *out_end_pfn = PFN_DOWN(r->base + r->size); if (out_nid) *out_nid = r_nid; } /** * memblock_set_node - set node ID on memblock regions * @base: base of area to set node ID for * @size: size of area to set node ID for * @type: memblock type to set node ID for * @nid: node ID to set * * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. * Regions which cross the area boundaries are split as necessary. * * Return: * 0 on success, -errno on failure. */ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid) { #ifdef CONFIG_NUMA int start_rgn, end_rgn; int i, ret; ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret; for (i = start_rgn; i < end_rgn; i++) memblock_set_region_node(&type->regions[i], nid); memblock_merge_regions(type, start_rgn, end_rgn); #endif return 0; } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /** * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() * * @idx: pointer to u64 loop variable * @zone: zone in which all of the memory blocks reside * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL * * This function is meant to be a zone/pfn specific wrapper for the * for_each_mem_range type iterators. Specifically they are used in the * deferred memory init routines and as such we were duplicating much of * this logic throughout the code. So instead of having it in multiple * locations it seemed like it would make more sense to centralize this to * one new iterator that does everything they need. */ void __init_memblock __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsigned long *out_spfn, unsigned long *out_epfn) { int zone_nid = zone_to_nid(zone); phys_addr_t spa, epa; __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, &memblock.memory, &memblock.reserved, &spa, &epa, NULL); while (*idx != U64_MAX) { unsigned long epfn = PFN_DOWN(epa); unsigned long spfn = PFN_UP(spa); /* * Verify the end is at least past the start of the zone and * that we have at least one PFN to initialize. */ if (zone->zone_start_pfn < epfn && spfn < epfn) { /* if we went too far just stop searching */ if (zone_end_pfn(zone) <= spfn) { *idx = U64_MAX; break; } if (out_spfn) *out_spfn = max(zone->zone_start_pfn, spfn); if (out_epfn) *out_epfn = min(zone_end_pfn(zone), epfn); return; } __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, &memblock.memory, &memblock.reserved, &spa, &epa, NULL); } /* signal end of iteration */ if (out_spfn) *out_spfn = ULONG_MAX; if (out_epfn) *out_epfn = 0; } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /** * memblock_alloc_range_nid - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @start: the lower bound of the memory region to allocate (phys address) * @end: the upper bound of the memory region to allocate (phys address) * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @exact_nid: control the allocation fall back to other nodes * * The allocation is performed from memory region limited by * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. * * If the specified node can not hold the requested memory and @exact_nid * is false, the allocation falls back to any node in the system. * * For systems with memory mirroring, the allocation is attempted first * from the regions with mirroring enabled and then retried from any * memory region. * * In addition, function using kmemleak_alloc_phys for allocated boot * memory block, it is never reported as leaks. * * Return: * Physical address of allocated memory block on success, %0 on failure. */ phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, int nid, bool exact_nid) { enum memblock_flags flags = choose_memblock_flags(); phys_addr_t found; if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; if (!align) { /* Can't use WARNs this early in boot on powerpc */ dump_stack(); align = SMP_CACHE_BYTES; } again: found = memblock_find_in_range_node(size, align, start, end, nid, flags); if (found && !memblock_reserve(found, size)) goto done; if (nid != NUMA_NO_NODE && !exact_nid) { found = memblock_find_in_range_node(size, align, start, end, NUMA_NO_NODE, flags); if (found && !memblock_reserve(found, size)) goto done; } if (flags & MEMBLOCK_MIRROR) { flags &= ~MEMBLOCK_MIRROR; pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", &size); goto again; } return 0; done: /* * Skip kmemleak for those places like kasan_init() and * early_pgtable_alloc() due to high volume. */ if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) /* * Memblock allocated blocks are never reported as * leaks. This is because many of these blocks are * only referred via the physical address which is * not looked up by kmemleak. */ kmemleak_alloc_phys(found, size, 0); /* * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, * require memory to be accepted before it can be used by the * guest. * * Accept the memory of the allocated buffer. */ accept_memory(found, found + size); return found; } /** * memblock_phys_alloc_range - allocate a memory block inside specified range * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @start: the lower bound of the memory region to allocate (physical address) * @end: the upper bound of the memory region to allocate (physical address) * * Allocate @size bytes in the between @start and @end. * * Return: physical address of the allocated memory block on success, * %0 on failure. */ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end) { memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, &start, &end, (void *)_RET_IP_); return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, false); } /** * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Allocates memory block from the specified NUMA node. If the node * has no available memory, attempts to allocated from any node in the * system. * * Return: physical address of the allocated memory block on success, * %0 on failure. */ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) { return memblock_alloc_range_nid(size, align, 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); } /** * memblock_alloc_internal - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region to allocate (phys address) * @max_addr: the upper bound of the memory region to allocate (phys address) * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @exact_nid: control the allocation fall back to other nodes * * Allocates memory block using memblock_alloc_range_nid() and * converts the returned physical address to virtual. * * The @min_addr limit is dropped if it can not be satisfied and the allocation * will fall back to memory below @min_addr. Other constraints, such * as node and mirrored memory will be handled again in * memblock_alloc_range_nid(). * * Return: * Virtual address of allocated memory block on success, NULL on failure. */ static void * __init memblock_alloc_internal( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid, bool exact_nid) { phys_addr_t alloc; /* * Detect any accidental use of these APIs after slab is ready, as at * this moment memblock may be deinitialized already and its * internal data may be destroyed (after execution of memblock_free_all) */ if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, nid); if (max_addr > memblock.current_limit) max_addr = memblock.current_limit; alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, exact_nid); /* retry allocation without lower limit */ if (!alloc && min_addr) alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, exact_nid); if (!alloc) return NULL; return phys_to_virt(alloc); } /** * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node * without zeroing memory * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. Does not zero allocated memory. * * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_alloc_exact_nid_raw( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) { memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); return memblock_alloc_internal(size, align, min_addr, max_addr, nid, true); } /** * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing * memory and without panicking * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. Does not zero allocated memory, does not panic if request * cannot be satisfied. * * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_alloc_try_nid_raw( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) { memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); return memblock_alloc_internal(size, align, min_addr, max_addr, nid, false); } /** * memblock_alloc_try_nid - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. This function zeroes the allocated memory. * * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_alloc_try_nid( phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) { void *ptr; memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr, (void *)_RET_IP_); ptr = memblock_alloc_internal(size, align, min_addr, max_addr, nid, false); if (ptr) memset(ptr, 0, size); return ptr; } /** * memblock_free_late - free pages directly to buddy allocator * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * * This is only useful when the memblock allocator has already been torn * down, but we are still initializing the system. Pages are released directly * to the buddy allocator. */ void __init memblock_free_late(phys_addr_t base, phys_addr_t size) { phys_addr_t cursor, end; end = base + size - 1; memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); cursor = PFN_UP(base); end = PFN_DOWN(base + size); for (; cursor < end; cursor++) { memblock_free_pages(pfn_to_page(cursor), cursor, 0); totalram_pages_inc(); } } /* * Remaining API functions */ phys_addr_t __init_memblock memblock_phys_mem_size(void) { return memblock.memory.total_size; } phys_addr_t __init_memblock memblock_reserved_size(void) { return memblock.reserved.total_size; } /* lowest address */ phys_addr_t __init_memblock memblock_start_of_DRAM(void) { return memblock.memory.regions[0].base; } phys_addr_t __init_memblock memblock_end_of_DRAM(void) { int idx = memblock.memory.cnt - 1; return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); } static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) { phys_addr_t max_addr = PHYS_ADDR_MAX; struct memblock_region *r; /* * translate the memory @limit size into the max address within one of * the memory memblock regions, if the @limit exceeds the total size * of those regions, max_addr will keep original value PHYS_ADDR_MAX */ for_each_mem_region(r) { if (limit <= r->size) { max_addr = r->base + limit; break; } limit -= r->size; } return max_addr; } void __init memblock_enforce_memory_limit(phys_addr_t limit) { phys_addr_t max_addr; if (!limit) return; max_addr = __find_max_addr(limit); /* @limit exceeds the total size of the memory, do nothing */ if (max_addr == PHYS_ADDR_MAX) return; /* truncate both memory and reserved regions */ memblock_remove_range(&memblock.memory, max_addr, PHYS_ADDR_MAX); memblock_remove_range(&memblock.reserved, max_addr, PHYS_ADDR_MAX); } void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) { int start_rgn, end_rgn; int i, ret; if (!size) return; if (!memblock_memory->total_size) { pr_warn("%s: No memory registered yet\n", __func__); return; } ret = memblock_isolate_range(&memblock.memory, base, size, &start_rgn, &end_rgn); if (ret) return; /* remove all the MAP regions */ for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) if (!memblock_is_nomap(&memblock.memory.regions[i])) memblock_remove_region(&memblock.memory, i); for (i = start_rgn - 1; i >= 0; i--) if (!memblock_is_nomap(&memblock.memory.regions[i])) memblock_remove_region(&memblock.memory, i); /* truncate the reserved regions */ memblock_remove_range(&memblock.reserved, 0, base); memblock_remove_range(&memblock.reserved, base + size, PHYS_ADDR_MAX); } void __init memblock_mem_limit_remove_map(phys_addr_t limit) { phys_addr_t max_addr; if (!limit) return; max_addr = __find_max_addr(limit); /* @limit exceeds the total size of the memory, do nothing */ if (max_addr == PHYS_ADDR_MAX) return; memblock_cap_memory_range(0, max_addr); } static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) { unsigned int left = 0, right = type->cnt; do { unsigned int mid = (right + left) / 2; if (addr < type->regions[mid].base) right = mid; else if (addr >= (type->regions[mid].base + type->regions[mid].size)) left = mid + 1; else return mid; } while (left < right); return -1; } bool __init_memblock memblock_is_reserved(phys_addr_t addr) { return memblock_search(&memblock.reserved, addr) != -1; } bool __init_memblock memblock_is_memory(phys_addr_t addr) { return memblock_search(&memblock.memory, addr) != -1; } bool __init_memblock memblock_is_map_memory(phys_addr_t addr) { int i = memblock_search(&memblock.memory, addr); if (i == -1) return false; return !memblock_is_nomap(&memblock.memory.regions[i]); } int __init_memblock memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn) { struct memblock_type *type = &memblock.memory; int mid = memblock_search(type, PFN_PHYS(pfn)); if (mid == -1) return -1; *start_pfn = PFN_DOWN(type->regions[mid].base); *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); return memblock_get_region_node(&type->regions[mid]); } /** * memblock_is_region_memory - check if a region is a subset of memory * @base: base of region to check * @size: size of region to check * * Check if the region [@base, @base + @size) is a subset of a memory block. * * Return: * 0 if false, non-zero if true */ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) { int idx = memblock_search(&memblock.memory, base); phys_addr_t end = base + memblock_cap_size(base, &size); if (idx == -1) return false; return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size) >= end; } /** * memblock_is_region_reserved - check if a region intersects reserved memory * @base: base of region to check * @size: size of region to check * * Check if the region [@base, @base + @size) intersects a reserved * memory block. * * Return: * True if they intersect, false if not. */ bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { return memblock_overlaps_region(&memblock.reserved, base, size); } void __init_memblock memblock_trim_memory(phys_addr_t align) { phys_addr_t start, end, orig_start, orig_end; struct memblock_region *r; for_each_mem_region(r) { orig_start = r->base; orig_end = r->base + r->size; start = round_up(orig_start, align); end = round_down(orig_end, align); if (start == orig_start && end == orig_end) continue; if (start < end) { r->base = start; r->size = end - start; } else { memblock_remove_region(&memblock.memory, r - memblock.memory.regions); r--; } } } void __init_memblock memblock_set_current_limit(phys_addr_t limit) { memblock.current_limit = limit; } phys_addr_t __init_memblock memblock_get_current_limit(void) { return memblock.current_limit; } static void __init_memblock memblock_dump(struct memblock_type *type) { phys_addr_t base, end, size; enum memblock_flags flags; int idx; struct memblock_region *rgn; pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); for_each_memblock_type(idx, type, rgn) { char nid_buf[32] = ""; base = rgn->base; size = rgn->size; end = base + size - 1; flags = rgn->flags; #ifdef CONFIG_NUMA if (memblock_get_region_node(rgn) != MAX_NUMNODES) snprintf(nid_buf, sizeof(nid_buf), " on node %d", memblock_get_region_node(rgn)); #endif pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", type->name, idx, &base, &end, &size, nid_buf, flags); } } static void __init_memblock __memblock_dump_all(void) { pr_info("MEMBLOCK configuration:\n"); pr_info(" memory size = %pa reserved size = %pa\n", &memblock.memory.total_size, &memblock.reserved.total_size); memblock_dump(&memblock.memory); memblock_dump(&memblock.reserved); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP memblock_dump(&physmem); #endif } void __init_memblock memblock_dump_all(void) { if (memblock_debug) __memblock_dump_all(); } void __init memblock_allow_resize(void) { memblock_can_resize = 1; } static int __init early_memblock(char *p) { if (p && strstr(p, "debug")) memblock_debug = 1; return 0; } early_param("memblock", early_memblock); static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; phys_addr_t pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and round start upwards and end * downwards. */ pg = PAGE_ALIGN(__pa(start_pg)); pgend = __pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, free the section of the * memmap array. */ if (pg < pgend) memblock_phys_free(pg, pgend - pg); } /* * The mem_map array can get very big. Free the unused area of the memory map. */ static void __init free_unused_memmap(void) { unsigned long start, end, prev_end = 0; int i; if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) return; /* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init(). */ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist * due to SPARSEMEM sections which aren't present. */ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); #endif /* * Align down here since many operations in VM subsystem * presume that there are no holes in the memory map inside * a pageblock */ start = pageblock_start_pfn(start); /* * If we had a previous bank, and there is a space * between the current bank and the previous, free it. */ if (prev_end && prev_end < start) free_memmap(prev_end, start); /* * Align up here since many operations in VM subsystem * presume that there are no holes in the memory map inside * a pageblock */ prev_end = pageblock_align(end); } #ifdef CONFIG_SPARSEMEM if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { prev_end = pageblock_align(end); free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); } #endif } static void __init __free_pages_memory(unsigned long start, unsigned long end) { int order; while (start < end) { /* * Free the pages in the largest chunks alignment allows. * * __ffs() behaviour is undefined for 0. start == 0 is * MAX_ORDER-aligned, set order to MAX_ORDER for the case. */ if (start) order = min_t(int, MAX_ORDER, __ffs(start)); else order = MAX_ORDER; while (start + (1UL << order) > end) order--; memblock_free_pages(pfn_to_page(start), start, order); start += (1UL << order); } } static unsigned long __init __free_memory_core(phys_addr_t start, phys_addr_t end) { unsigned long start_pfn = PFN_UP(start); unsigned long end_pfn = min_t(unsigned long, PFN_DOWN(end), max_low_pfn); if (start_pfn >= end_pfn) return 0; __free_pages_memory(start_pfn, end_pfn); return end_pfn - start_pfn; } static void __init memmap_init_reserved_pages(void) { struct memblock_region *region; phys_addr_t start, end; int nid; /* * set nid on all reserved pages and also treat struct * pages for the NOMAP regions as PageReserved */ for_each_mem_region(region) { nid = memblock_get_region_node(region); start = region->base; end = start + region->size; if (memblock_is_nomap(region)) reserve_bootmem_region(start, end, nid); memblock_set_node(start, end, &memblock.reserved, nid); } /* initialize struct pages for the reserved regions */ for_each_reserved_mem_region(region) { nid = memblock_get_region_node(region); start = region->base; end = start + region->size; reserve_bootmem_region(start, end, nid); } } static unsigned long __init free_low_memory_core_early(void) { unsigned long count = 0; phys_addr_t start, end; u64 i; memblock_clear_hotplug(0, -1); memmap_init_reserved_pages(); /* * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id * because in some case like Node0 doesn't have RAM installed * low ram will be on Node1 */ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) count += __free_memory_core(start, end); return count; } static int reset_managed_pages_done __initdata; static void __init reset_node_managed_pages(pg_data_t *pgdat) { struct zone *z; for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) atomic_long_set(&z->managed_pages, 0); } void __init reset_all_zones_managed_pages(void) { struct pglist_data *pgdat; if (reset_managed_pages_done) return; for_each_online_pgdat(pgdat) reset_node_managed_pages(pgdat); reset_managed_pages_done = 1; } /** * memblock_free_all - release free pages to the buddy allocator */ void __init memblock_free_all(void) { unsigned long pages; free_unused_memmap(); reset_all_zones_managed_pages(); pages = free_low_memory_core_early(); totalram_pages_add(pages); } #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) static const char * const flagname[] = { [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", [ilog2(MEMBLOCK_MIRROR)] = "MIRROR", [ilog2(MEMBLOCK_NOMAP)] = "NOMAP", [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG", }; static int memblock_debug_show(struct seq_file *m, void *private) { struct memblock_type *type = m->private; struct memblock_region *reg; int i, j, nid; unsigned int count = ARRAY_SIZE(flagname); phys_addr_t end; for (i = 0; i < type->cnt; i++) { reg = &type->regions[i]; end = reg->base + reg->size - 1; nid = memblock_get_region_node(reg); seq_printf(m, "%4d: ", i); seq_printf(m, "%pa..%pa ", &reg->base, &end); if (nid != MAX_NUMNODES) seq_printf(m, "%4d ", nid); else seq_printf(m, "%4c ", 'x'); if (reg->flags) { for (j = 0; j < count; j++) { if (reg->flags & (1U << j)) { seq_printf(m, "%s\n", flagname[j]); break; } } if (j == count) seq_printf(m, "%s\n", "UNKNOWN"); } else { seq_printf(m, "%s\n", "NONE"); } } return 0; } DEFINE_SHOW_ATTRIBUTE(memblock_debug); static int __init memblock_init_debugfs(void) { struct dentry *root = debugfs_create_dir("memblock", NULL); debugfs_create_file("memory", 0444, root, &memblock.memory, &memblock_debug_fops); debugfs_create_file("reserved", 0444, root, &memblock.reserved, &memblock_debug_fops); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP debugfs_create_file("physmem", 0444, root, &physmem, &memblock_debug_fops); #endif return 0; } __initcall(memblock_init_debugfs); #endif /* CONFIG_DEBUG_FS */
linux-master
mm/memblock.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/mmap.c * * Written by obz. * * Address space accounting code <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/capability.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/profile.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/mmdebug.h> #include <linux/perf_event.h> #include <linux/audit.h> #include <linux/khugepaged.h> #include <linux/uprobes.h> #include <linux/notifier.h> #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> #include <linux/moduleparam.h> #include <linux/pkeys.h> #include <linux/oom.h> #include <linux/sched/mm.h> #include <linux/ksm.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #define CREATE_TRACE_POINTS #include <trace/events/mmap.h> #include "internal.h" #ifndef arch_mmap_check #define arch_mmap_check(addr, len, flags) (0) #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX; int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; #endif #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif static bool ignore_rlimit_data; core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next, unsigned long start, unsigned long end, unsigned long tree_end, bool mm_wr_locked); static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) { return pgprot_modify(oldprot, vm_get_page_prot(vm_flags)); } /* Update vma->vm_page_prot to reflect vma->vm_flags. */ void vma_set_page_prot(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; pgprot_t vm_page_prot; vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); if (vma_wants_writenotify(vma, vm_page_prot)) { vm_flags &= ~VM_SHARED; vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); } /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ WRITE_ONCE(vma->vm_page_prot, vm_page_prot); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) { if (vma->vm_flags & VM_SHARED) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Unlink a file-based vm structure from its interval tree, to hide * vma from rmap and vmtruncate before freeing its page tables. */ void unlink_file_vma(struct vm_area_struct *vma) { struct file *file = vma->vm_file; if (file) { struct address_space *mapping = file->f_mapping; i_mmap_lock_write(mapping); __remove_shared_vm_struct(vma, file, mapping); i_mmap_unlock_write(mapping); } } /* * Close a vm structure and free it. */ static void remove_vma(struct vm_area_struct *vma, bool unreachable) { might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); if (unreachable) __vm_area_free(vma); else vm_area_free(vma); } static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi, unsigned long min) { return mas_prev(&vmi->mas, min); } /* * check_brk_limits() - Use platform specific check of range & verify mlock * limits. * @addr: The address to check * @len: The size of increase. * * Return: 0 on success. */ static int check_brk_limits(unsigned long addr, unsigned long len) { unsigned long mapped_addr; mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, unsigned long addr, unsigned long request, unsigned long flags); SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; struct mm_struct *mm = current->mm; struct vm_area_struct *brkvma, *next = NULL; unsigned long min_brk; bool populate = false; LIST_HEAD(uf); struct vma_iterator vmi; if (mmap_write_lock_killable(mm)) return -EINTR; origbrk = mm->brk; #ifdef CONFIG_COMPAT_BRK /* * CONFIG_COMPAT_BRK can still be overridden by setting * randomize_va_space to 2, which will still cause mm->start_brk * to be arbitrarily shifted */ if (current->brk_randomized) min_brk = mm->start_brk; else min_brk = mm->end_data; #else min_brk = mm->start_brk; #endif if (brk < min_brk) goto out; /* * Check against rlimit here. If this check is done later after the test * of oldbrk with newbrk then it can escape the test and let the data * segment grow beyond its set limit the in case where the limit is * not page aligned -Ram Gupta */ if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, mm->end_data, mm->start_data)) goto out; newbrk = PAGE_ALIGN(brk); oldbrk = PAGE_ALIGN(mm->brk); if (oldbrk == newbrk) { mm->brk = brk; goto success; } /* Always allow shrinking brk. */ if (brk <= mm->brk) { /* Search one past newbrk */ vma_iter_init(&vmi, mm, newbrk); brkvma = vma_find(&vmi, oldbrk); if (!brkvma || brkvma->vm_start >= oldbrk) goto out; /* mapping intersects with an existing non-brk vma. */ /* * mm->brk must be protected by write mmap_lock. * do_vma_munmap() will drop the lock on success, so update it * before calling do_vma_munmap(). */ mm->brk = brk; if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true)) goto out; goto success_unlocked; } if (check_brk_limits(oldbrk, newbrk - oldbrk)) goto out; /* * Only check if the next VMA is within the stack_guard_gap of the * expansion area */ vma_iter_init(&vmi, mm, oldbrk); next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; brkvma = vma_prev_limit(&vmi, mm->start_brk); /* Ok, looks good - let it rip. */ if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) goto out; mm->brk = brk; if (mm->def_flags & VM_LOCKED) populate = true; success: mmap_write_unlock(mm); success_unlocked: userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(oldbrk, newbrk - oldbrk); return brk; out: mm->brk = origbrk; mmap_write_unlock(mm); return origbrk; } #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mt_validate(&mm->mm_mt); for_each_vma(vmi, vma) { #ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; #endif unsigned long vmi_start, vmi_end; bool warn = 0; vmi_start = vma_iter_addr(&vmi); vmi_end = vma_iter_end(&vmi); if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) warn = 1; if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) warn = 1; if (warn) { pr_emerg("issue in %s\n", current->comm); dump_stack(); dump_vma(vma); pr_emerg("tree range: %px start %lx end %lx\n", vma, vmi_start, vmi_end - 1); vma_iter_dump_tree(&vmi); } #ifdef CONFIG_DEBUG_VM_RB if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } #endif i++; } if (i != mm->map_count) { pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ #define validate_mm(mm) do { } while (0) #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_lock and by * the root anon_vma's mutex. */ static inline void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static inline void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } static unsigned long count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) { VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; unsigned long nr_pages = 0; for_each_vma_range(vmi, vma, end) { unsigned long vm_start = max(addr, vma->vm_start); unsigned long vm_end = min(end, vma->vm_end); nr_pages += PHYS_PFN(vm_end - vm_start); } return nr_pages; } static void __vma_link_file(struct vm_area_struct *vma, struct address_space *mapping) { if (vma->vm_flags & VM_SHARED) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) { VMA_ITERATOR(vmi, mm, 0); struct address_space *mapping = NULL; vma_iter_config(&vmi, vma->vm_start, vma->vm_end); if (vma_iter_prealloc(&vmi, vma)) return -ENOMEM; vma_start_write(vma); vma_iter_store(&vmi, vma); if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); __vma_link_file(vma, mapping); i_mmap_unlock_write(mapping); } mm->map_count++; validate_mm(mm); return 0; } /* * init_multi_vma_prep() - Initializer for struct vma_prepare * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked * @next: The next vma if it is to be adjusted * @remove: The first vma to be removed * @remove2: The second vma to be removed */ static inline void init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, struct vm_area_struct *next, struct vm_area_struct *remove, struct vm_area_struct *remove2) { memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; vp->remove = remove; vp->remove2 = remove2; vp->adj_next = next; if (!vp->anon_vma && next) vp->anon_vma = next->anon_vma; vp->file = vma->vm_file; if (vp->file) vp->mapping = vma->vm_file->f_mapping; } /* * init_vma_prep() - Initializer wrapper for vma_prepare struct * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked */ static inline void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) { init_multi_vma_prep(vp, vma, NULL, NULL, NULL); } /* * vma_prepare() - Helper function for handling locking VMAs prior to altering * @vp: The initialized vma_prepare struct */ static inline void vma_prepare(struct vma_prepare *vp) { if (vp->file) { uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); if (vp->adj_next) uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, vp->adj_next->vm_end); i_mmap_lock_write(vp->mapping); if (vp->insert && vp->insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(vp->insert, vp->insert->vm_file->f_mapping); } } if (vp->anon_vma) { anon_vma_lock_write(vp->anon_vma); anon_vma_interval_tree_pre_update_vma(vp->vma); if (vp->adj_next) anon_vma_interval_tree_pre_update_vma(vp->adj_next); } if (vp->file) { flush_dcache_mmap_lock(vp->mapping); vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); if (vp->adj_next) vma_interval_tree_remove(vp->adj_next, &vp->mapping->i_mmap); } } /* * vma_complete- Helper function for handling the unlocking after altering VMAs, * or for inserting a VMA. * * @vp: The vma_prepare struct * @vmi: The vma iterator * @mm: The mm_struct */ static inline void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, struct mm_struct *mm) { if (vp->file) { if (vp->adj_next) vma_interval_tree_insert(vp->adj_next, &vp->mapping->i_mmap); vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); flush_dcache_mmap_unlock(vp->mapping); } if (vp->remove && vp->file) { __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping); if (vp->remove2) __remove_shared_vm_struct(vp->remove2, vp->file, vp->mapping); } else if (vp->insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ vma_iter_store(vmi, vp->insert); mm->map_count++; } if (vp->anon_vma) { anon_vma_interval_tree_post_update_vma(vp->vma); if (vp->adj_next) anon_vma_interval_tree_post_update_vma(vp->adj_next); anon_vma_unlock_write(vp->anon_vma); } if (vp->file) { i_mmap_unlock_write(vp->mapping); uprobe_mmap(vp->vma); if (vp->adj_next) uprobe_mmap(vp->adj_next); } if (vp->remove) { again: vma_mark_detached(vp->remove, true); if (vp->file) { uprobe_munmap(vp->remove, vp->remove->vm_start, vp->remove->vm_end); fput(vp->file); } if (vp->remove->anon_vma) anon_vma_merge(vp->vma, vp->remove); mm->map_count--; mpol_put(vma_policy(vp->remove)); if (!vp->remove2) WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); vm_area_free(vp->remove); /* * In mprotect's case 6 (see comments on vma_merge), * we are removing both mid and next vmas */ if (vp->remove2) { vp->remove = vp->remove2; vp->remove2 = NULL; goto again; } } if (vp->insert && vp->file) uprobe_mmap(vp->insert); validate_mm(mm); } /* * dup_anon_vma() - Helper function to duplicate anon_vma * @dst: The destination VMA * @src: The source VMA * * Returns: 0 on success. */ static inline int dup_anon_vma(struct vm_area_struct *dst, struct vm_area_struct *src) { /* * Easily overlooked: when mprotect shifts the boundary, make sure the * expanding vma has anon_vma set if the shrinking vma had, to cover any * anon pages imported. */ if (src->anon_vma && !dst->anon_vma) { vma_assert_write_locked(dst); dst->anon_vma = src->anon_vma; return anon_vma_clone(dst, src); } return 0; } /* * vma_expand - Expand an existing VMA * * @vmi: The vma iterator * @vma: The vma to expand * @start: The start of the vma * @end: The exclusive end of the vma * @pgoff: The page offset of vma * @next: The current of next vma. * * Expand @vma to @start and @end. Can expand off the start and end. Will * expand over @next if it's different from @vma and @end == @next->vm_end. * Checking if the @vma can expand and merge with @next needs to be handled by * the caller. * * Returns: 0 on success */ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *next) { bool remove_next = false; struct vma_prepare vp; vma_start_write(vma); if (next && (vma != next) && (end == next->vm_end)) { int ret; remove_next = true; vma_start_write(next); ret = dup_anon_vma(vma, next); if (ret) return ret; } init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); /* Not merging but overwriting any part of next is not handled. */ VM_WARN_ON(next && !vp.remove && next != vma && end > next->vm_start); /* Only handles expanding */ VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); /* Note: vma iterator must be pointing to 'start' */ vma_iter_config(vmi, start, end); if (vma_iter_prealloc(vmi, vma)) goto nomem; vma_prepare(&vp); vma_adjust_trans_huge(vma, start, end, 0); vma->vm_start = start; vma->vm_end = end; vma->vm_pgoff = pgoff; vma_iter_store(vmi, vma); vma_complete(&vp, vmi, vma->vm_mm); return 0; nomem: return -ENOMEM; } /* * vma_shrink() - Reduce an existing VMAs memory area * @vmi: The vma iterator * @vma: The VMA to modify * @start: The new start * @end: The new end * * Returns: 0 on success, -ENOMEM otherwise */ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) { struct vma_prepare vp; WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); if (vma->vm_start < start) vma_iter_config(vmi, vma->vm_start, start); else vma_iter_config(vmi, end, vma->vm_end); if (vma_iter_prealloc(vmi, NULL)) return -ENOMEM; vma_start_write(vma); init_vma_prep(&vp, vma); vma_prepare(&vp); vma_adjust_trans_huge(vma, start, end, 0); vma_iter_clear(vmi); vma->vm_start = start; vma->vm_end = end; vma->vm_pgoff = pgoff; vma_complete(&vp, vmi, vma->vm_mm); return 0; } /* * If the vma has a ->close operation then the driver probably needs to release * per-vma resources, so we don't attempt to merge those if the caller indicates * the current vma may be removed as part of the merge. */ static inline bool is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name, bool may_remove_vma) { /* * VM_SOFTDIRTY should not prevent from VMA merging, if we * match the flags but dirty bit -- the caller should mark * merged VMA as dirty. If dirty bit won't be excluded from * comparison, we increase pressure on the memory system forcing * the kernel to generate new VMAs when old one could be * extended instead. */ if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) return false; if (vma->vm_file != file) return false; if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) return false; if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) return false; if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) return false; return true; } static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ if ((!anon_vma1 || !anon_vma2) && (!vma || list_is_singular(&vma->anon_vma_chain))) return true; return anon_vma1 == anon_vma2; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. * * We assume the vma may be removed as part of the merge. */ static bool can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { if (vma->vm_pgoff == vm_pgoff) return true; } return false; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We assume that vma is not removed as part of the merge. */ static bool can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name) { if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { pgoff_t vm_pglen; vm_pglen = vma_pages(vma); if (vma->vm_pgoff + vm_pglen == vm_pgoff) return true; } return false; } /* * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name), * figure out whether that can be merged with its predecessor or its * successor. Or both (it neatly fills a hole). * * In most cases - when called for mmap, brk or mremap - [addr,end) is * certain not to be mapped by the time vma_merge is called; but when * called for mprotect, it is certain to be already mapped (either at * an offset within prev, or at the start of next), and the flags of * this area are about to be changed to vm_flags - and the no-change * case has already been eliminated. * * The following mprotect cases have to be considered, where **** is * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts * at the same address as **** and is of the same or larger span, and * NNNN the next vma after ****: * * **** **** **** * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC * cannot merge might become might become * PPNNNNNNNNNN PPPPPPPPPPCC * mmap, brk or case 4 below case 5 below * mremap move: * **** **** * PPPP NNNN PPPPCCCCNNNN * might become might become * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8 * * It is important for case 8 that the vma CCCC overlapping the * region **** is never going to extended over NNNN. Instead NNNN must * be extended in region **** and CCCC must be removed. This way in * all cases where vma_merge succeeds, the moment vma_merge drops the * rmap_locks, the properties of the merged vma will be already * correct for the whole merged range. Some of those properties like * vm_page_prot/vm_flags may be accessed by rmap_walks and they must * be correct for the whole merged range immediately after the * rmap_locks are released. Otherwise if NNNN would be removed and * CCCC would be extended over the NNNN range, remove_migration_ptes * or other rmap walkers (if working on addresses beyond the "end" * parameter) may establish ptes with the wrong permissions of CCCC * instead of the right permissions of NNNN. * * In the code below: * PPPP is represented by *prev * CCCC is represented by *curr or not represented at all (NULL) * NNNN is represented by *next or not represented at all (NULL) * **** is not represented - it will be merged and the vma containing the * area is returned, or the function will return NULL */ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, struct anon_vma_name *anon_name) { struct vm_area_struct *curr, *next, *res; struct vm_area_struct *vma, *adjust, *remove, *remove2; struct vma_prepare vp; pgoff_t vma_pgoff; int err = 0; bool merge_prev = false; bool merge_next = false; bool vma_expanded = false; unsigned long vma_start = addr; unsigned long vma_end = end; pgoff_t pglen = (end - addr) >> PAGE_SHIFT; long adj_start = 0; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; /* Does the input range span an existing VMA? (cases 5 - 8) */ curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); if (!curr || /* cases 1 - 4 */ end == curr->vm_end) /* cases 6 - 8, adjacent VMA */ next = vma_lookup(mm, end); else next = NULL; /* case 5 */ if (prev) { vma_start = prev->vm_start; vma_pgoff = prev->vm_pgoff; /* Can we merge the predecessor? */ if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx, anon_name)) { merge_prev = true; vma_prev(vmi); } } /* Can we merge the successor? */ if (next && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx, anon_name)) { merge_next = true; } /* Verify some invariant that must be enforced by the caller. */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end)); VM_WARN_ON(addr >= end); if (!merge_prev && !merge_next) return NULL; /* Not mergeable. */ if (merge_prev) vma_start_write(prev); res = vma = prev; remove = remove2 = adjust = NULL; /* Can we merge both the predecessor and the successor? */ if (merge_prev && merge_next && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { vma_start_write(next); remove = next; /* case 1 */ vma_end = next->vm_end; err = dup_anon_vma(prev, next); if (curr) { /* case 6 */ vma_start_write(curr); remove = curr; remove2 = next; if (!next->anon_vma) err = dup_anon_vma(prev, curr); } } else if (merge_prev) { /* case 2 */ if (curr) { vma_start_write(curr); err = dup_anon_vma(prev, curr); if (end == curr->vm_end) { /* case 7 */ remove = curr; } else { /* case 5 */ adjust = curr; adj_start = (end - curr->vm_start); } } } else { /* merge_next */ vma_start_write(next); res = next; if (prev && addr < prev->vm_end) { /* case 4 */ vma_start_write(prev); vma_end = addr; adjust = next; adj_start = -(prev->vm_end - addr); err = dup_anon_vma(next, prev); } else { /* * Note that cases 3 and 8 are the ONLY ones where prev * is permitted to be (but is not necessarily) NULL. */ vma = next; /* case 3 */ vma_start = addr; vma_end = next->vm_end; vma_pgoff = next->vm_pgoff - pglen; if (curr) { /* case 8 */ vma_pgoff = curr->vm_pgoff; vma_start_write(curr); remove = curr; err = dup_anon_vma(next, curr); } } } /* Error in anon_vma clone. */ if (err) return NULL; if (vma_start < vma->vm_start || vma_end > vma->vm_end) vma_expanded = true; if (vma_expanded) { vma_iter_config(vmi, vma_start, vma_end); } else { vma_iter_config(vmi, adjust->vm_start + adj_start, adjust->vm_end); } if (vma_iter_prealloc(vmi, vma)) return NULL; init_multi_vma_prep(&vp, vma, adjust, remove, remove2); VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma && vp.anon_vma != adjust->anon_vma); vma_prepare(&vp); vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); vma->vm_start = vma_start; vma->vm_end = vma_end; vma->vm_pgoff = vma_pgoff; if (vma_expanded) vma_iter_store(vmi, vma); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += adj_start >> PAGE_SHIFT; if (adj_start < 0) { WARN_ON(vma_expanded); vma_iter_store(vmi, next); } } vma_complete(&vp, vmi, mm); khugepaged_enter_vma(res, vm_flags); return res; } /* * Rough compatibility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mmap_lock held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mmap_lock. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); struct anon_vma *anon_vma = NULL; struct vm_area_struct *prev, *next; /* Try next first. */ next = mas_walk(&mas); if (next) { anon_vma = reusable_anon_vma(next, vma, next); if (anon_vma) return anon_vma; } prev = mas_prev(&mas, 0); VM_BUG_ON_VMA(prev != vma, vma); prev = mas_prev(&mas, 0); /* Try prev next. */ if (prev) anon_vma = reusable_anon_vma(prev, prev, vma); /* * We might reach here with anon_vma == NULL if we can't find * any reusable anon_vma. * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return anon_vma; } /* * If a hint addr is less than mmap_min_addr change hint to be as * low as possible but still greater than mmap_min_addr */ static inline unsigned long round_hint_to_min(unsigned long hint) { hint &= PAGE_MASK; if (((void *)hint != NULL) && (hint < mmap_min_addr)) return PAGE_ALIGN(mmap_min_addr); return hint; } bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) return true; locked_pages = bytes >> PAGE_SHIFT; locked_pages += mm->locked_vm; limit_pages = rlimit(RLIMIT_MEMLOCK); limit_pages >>= PAGE_SHIFT; return locked_pages <= limit_pages; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) { if (S_ISREG(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; if (S_ISSOCK(inode->i_mode)) return MAX_LFS_FILESIZE; /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; /* Yes, random drivers might want more. But I'm tired of buggy drivers */ return ULONG_MAX; } static inline bool file_mmap_ok(struct file *file, struct inode *inode, unsigned long pgoff, unsigned long len) { u64 maxsize = file_mmap_size_max(file, inode); if (maxsize && len > maxsize) return false; maxsize -= len; if (pgoff > maxsize >> PAGE_SHIFT) return false; return true; } /* * The caller must write-lock current->mm->mmap_lock. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { struct mm_struct *mm = current->mm; int pkey = 0; *populate = 0; if (!len) return -EINVAL; /* * Does the application expect PROT_READ to imply PROT_EXEC? * * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC; /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; if (!(flags & MAP_FIXED)) addr = round_hint_to_min(addr); /* Careful about overflows.. */ len = PAGE_ALIGN(len); if (!len) return -ENOMEM; /* offset overflow? */ if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) return -EOVERFLOW; /* Too many mappings? */ if (mm->map_count > sysctl_max_map_count) return -ENOMEM; /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { if (find_vma_intersection(mm, addr, addr + len)) return -EEXIST; } if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) pkey = 0; } /* Do simple checking here so the lower-level routines won't have * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; if (!mlock_future_ok(mm, vm_flags, len)) return -EAGAIN; if (file) { struct inode *inode = file_inode(file); unsigned long flags_mask; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; switch (flags & MAP_TYPE) { case MAP_SHARED: /* * Force use of MAP_SHARED_VALIDATE with non-legacy * flags. E.g. MAP_SYNC is dangerous to use with * MAP_SHARED as you don't know which consistency model * you will get. We silently ignore unsupported flags * with MAP_SHARED to preserve backward compatibility. */ flags &= LEGACY_MAP_MASK; fallthrough; case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; if (prot & PROT_WRITE) { if (!(file->f_mode & FMODE_WRITE)) return -EACCES; if (IS_SWAPFILE(file->f_mapping->host)) return -ETXTBSY; } /* * Make sure we don't allow writing to an append-only * file.. */ if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) return -EACCES; vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); fallthrough; case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) return -EACCES; if (path_noexec(&file->f_path)) { if (vm_flags & VM_EXEC) return -EPERM; vm_flags &= ~VM_MAYEXEC; } if (!file->f_op->mmap) return -ENODEV; if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; break; default: return -EINVAL; } } else { switch (flags & MAP_TYPE) { case MAP_SHARED: if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) return -EINVAL; /* * Ignore pgoff. */ pgoff = 0; vm_flags |= VM_SHARED | VM_MAYSHARE; break; case MAP_PRIVATE: /* * Set pgoff according to addr for anon_vma. */ pgoff = addr >> PAGE_SHIFT; break; default: return -EINVAL; } } /* * Set 'VM_NORESERVE' if we should not account for the * memory use of this mapping. */ if (flags & MAP_NORESERVE) { /* We honor MAP_NORESERVE if allowed to overcommit */ if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ if (file && is_file_hugepages(file)) vm_flags |= VM_NORESERVE; } addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; return addr; } unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { struct file *file = NULL; unsigned long retval; if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); if (!file) return -EBADF; if (is_file_hugepages(file)) { len = ALIGN(len, huge_page_size(hstate_file(file))); } else if (unlikely(flags & MAP_HUGETLB)) { retval = -EINVAL; goto out_fput; } } else if (flags & MAP_HUGETLB) { struct hstate *hs; hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (!hs) return -EINVAL; len = ALIGN(len, huge_page_size(hs)); /* * VM_NORESERVE is used because the reservations will be * taken when vm_ops->mmap() is called */ file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE, HUGETLB_ANONHUGE_INODE, (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); if (IS_ERR(file)) return PTR_ERR(file); } retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); out_fput: if (file) fput(file); return retval; } SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, pgoff) { return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); } #ifdef __ARCH_WANT_SYS_OLD_MMAP struct mmap_arg_struct { unsigned long addr; unsigned long len; unsigned long prot; unsigned long flags; unsigned long fd; unsigned long offset; }; SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) { struct mmap_arg_struct a; if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; if (offset_in_page(a.offset)) return -EINVAL; return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) { return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); } static bool vma_is_shared_writable(struct vm_area_struct *vma) { return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == (VM_WRITE | VM_SHARED); } static bool vma_fs_can_writeback(struct vm_area_struct *vma) { /* No managed pages to writeback. */ if (vma->vm_flags & VM_PFNMAP) return false; return vma->vm_file && vma->vm_file->f_mapping && mapping_can_writeback(vma->vm_file->f_mapping); } /* * Does this VMA require the underlying folios to have their dirty state * tracked? */ bool vma_needs_dirty_tracking(struct vm_area_struct *vma) { /* Only shared, writable VMAs require dirty tracking. */ if (!vma_is_shared_writable(vma)) return false; /* Does the filesystem need to be notified? */ if (vm_ops_needs_writenotify(vma->vm_ops)) return true; /* * Even if the filesystem doesn't indicate a need for writenotify, if it * can writeback, dirty tracking is still required. */ return vma_fs_can_writeback(vma); } /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { /* If it was private or non-writable, the write bit is already clear */ if (!vma_is_shared_writable(vma)) return 0; /* The backer wishes to know when pages are first written to? */ if (vm_ops_needs_writenotify(vma->vm_ops)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) return 0; /* * Do we need to track softdirty? hugetlb does not support softdirty * tracking yet. */ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) return 1; /* Do we need write faults for uffd-wp tracking? */ if (userfaultfd_wp(vma)) return 1; /* Can the mapping track the dirty pages? */ return vma_fs_can_writeback(vma); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return 0; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } /** * unmapped_area() - Find an area between the low_limit and the high_limit with * the correct alignment and offset, all from @info. Note: current->mm is used * for the search. * * @info: The unmapped area information including the range [low_limit - * high_limit), the alignment offset and mask. * * Return: A memory address or -ENOMEM. */ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) { unsigned long length, gap; unsigned long low_limit, high_limit; struct vm_area_struct *tmp; MA_STATE(mas, &current->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; low_limit = info->low_limit; if (low_limit < mmap_min_addr) low_limit = mmap_min_addr; high_limit = info->high_limit; retry: if (mas_empty_area(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.index; gap += (info->align_offset - gap) & info->align_mask; tmp = mas_next(&mas, ULONG_MAX); if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ if (vm_start_gap(tmp) < gap + length - 1) { low_limit = tmp->vm_end; mas_reset(&mas); goto retry; } } else { tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { low_limit = vm_end_gap(tmp); mas_reset(&mas); goto retry; } } return gap; } /** * unmapped_area_topdown() - Find an area between the low_limit and the * high_limit with the correct alignment and offset at the highest available * address, all from @info. Note: current->mm is used for the search. * * @info: The unmapped area information including the range [low_limit - * high_limit), the alignment offset and mask. * * Return: A memory address or -ENOMEM. */ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { unsigned long length, gap, gap_end; unsigned long low_limit, high_limit; struct vm_area_struct *tmp; MA_STATE(mas, &current->mm->mm_mt, 0, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask; if (length < info->length) return -ENOMEM; low_limit = info->low_limit; if (low_limit < mmap_min_addr) low_limit = mmap_min_addr; high_limit = info->high_limit; retry: if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length)) return -ENOMEM; gap = mas.last + 1 - info->length; gap -= (gap - info->align_offset) & info->align_mask; gap_end = mas.last; tmp = mas_next(&mas, ULONG_MAX); if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ if (vm_start_gap(tmp) <= gap_end) { high_limit = vm_start_gap(tmp); mas_reset(&mas); goto retry; } } else { tmp = mas_prev(&mas, 0); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; mas_reset(&mas); goto retry; } } return gap; } /* * Search for an unmapped address range. * * We are looking for a range that: * - does not intersect with any VMA; * - is contained within the [low_limit, high_limit) interval; * - is at least the desired size. * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) */ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) { unsigned long addr; if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) addr = unmapped_area_topdown(info); else addr = unmapped_area(info); trace_vm_unmapped_area(addr, info); return addr; } /* Get an address range which is currently unmapped. * For shmat() with addr=0. * * Ugly calling convention alert: * Return value with the low bits set means error value, * ie * if (ret & ~PAGE_MASK) * error = ret; * * This function "knows" that -ENOMEM has the bits set. */ unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = mmap_end; info.align_mask = 0; info.align_offset = 0; return vm_unmapped_area(&info); } #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return generic_get_unmapped_area(filp, addr, len, pgoff, flags); } #endif /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): */ unsigned long generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); /* requested length too big for entire address space */ if (len > mmap_end - mmap_min_addr) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && (!prev || addr >= vm_end_gap(prev))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; addr = vm_unmapped_area(&info); } return addr; } #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); } #endif unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); unsigned long error = arch_mmap_check(addr, len, flags); if (error) return error; /* Careful about overflows.. */ if (len > TASK_SIZE) return -ENOMEM; get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; } else if (flags & MAP_SHARED) { /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. * do_mmap() will clear pgoff, so match alignment. */ pgoff = 0; get_area = shmem_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; if (addr > TASK_SIZE - len) return -ENOMEM; if (offset_in_page(addr)) return -EINVAL; error = security_mmap_addr(addr); return error ? error : addr; } EXPORT_SYMBOL(get_unmapped_area); /** * find_vma_intersection() - Look up the first VMA which intersects the interval * @mm: The process address space. * @start_addr: The inclusive start user address. * @end_addr: The exclusive end user address. * * Returns: The first VMA within the provided range, %NULL otherwise. Assumes * start_addr < end_addr. */ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, unsigned long start_addr, unsigned long end_addr) { unsigned long index = start_addr; mmap_assert_locked(mm); return mt_find(&mm->mm_mt, &index, end_addr - 1); } EXPORT_SYMBOL(find_vma_intersection); /** * find_vma() - Find the VMA for a given address, or the next VMA. * @mm: The mm_struct to check * @addr: The address * * Returns: The VMA associated with addr, or the next VMA. * May return %NULL in the case of no VMA at addr or above. */ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { unsigned long index = addr; mmap_assert_locked(mm); return mt_find(&mm->mm_mt, &index, ULONG_MAX); } EXPORT_SYMBOL(find_vma); /** * find_vma_prev() - Find the VMA for a given address, or the next vma and * set %pprev to the previous VMA, if any. * @mm: The mm_struct to check * @addr: The address * @pprev: The pointer to set to the previous VMA * * Note that RCU lock is missing here since the external mmap_lock() is used * instead. * * Returns: The VMA associated with @addr, or the next vma. * May return %NULL in the case of no vma at addr or above. */ struct vm_area_struct * find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) { struct vm_area_struct *vma; MA_STATE(mas, &mm->mm_mt, addr, addr); vma = mas_walk(&mas); *pprev = mas_prev(&mas, 0); if (!vma) vma = mas_next(&mas, ULONG_MAX); return vma; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) /* * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; MA_STATE(mas, &mm->mm_mt, vma->vm_start, address); if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } if (next) mas_prev_range(&mas, address); __mas_set_range(&mas, vma->vm_start, address - 1); if (mas_preallocate(&mas, vma, GFP_KERNEL)) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) { mas_destroy(&mas); return -ENOMEM; } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * We only hold a shared mmap_lock lock here, so * we need to protect against concurrent vma * expansions. anon_vma_lock_write() doesn't * help here, as we don't guarantee that all * growable vmas in a mm share the same root * anon vma. So, we reuse mm->page_table_lock * to guard against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ mas_store_prealloc(&mas, vma); anon_vma_interval_tree_post_update_vma(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); mas_destroy(&mas); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. * mmap_lock held for writing. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); struct vm_area_struct *prev; int error = 0; if (!(vma->vm_flags & VM_GROWSDOWN)) return -EFAULT; address &= PAGE_MASK; if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; /* Enforce stack_guard_gap */ prev = mas_prev(&mas, 0); /* Check that both stack segments have the same anon_vma? */ if (prev) { if (!(prev->vm_flags & VM_GROWSDOWN) && vma_is_accessible(prev) && (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; } if (prev) mas_next_range(&mas, vma->vm_start); __mas_set_range(&mas, address, vma->vm_end - 1); if (mas_preallocate(&mas, vma, GFP_KERNEL)) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) { mas_destroy(&mas); return -ENOMEM; } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { /* * We only hold a shared mmap_lock lock here, so * we need to protect against concurrent vma * expansions. anon_vma_lock_write() doesn't * help here, as we don't guarantee that all * growable vmas in a mm share the same root * anon vma. So, we reuse mm->page_table_lock * to guard against concurrent vma expansions. */ spin_lock(&mm->page_table_lock); if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ mas_store_prealloc(&mas, vma); anon_vma_interval_tree_post_update_vma(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma(vma, vma->vm_flags); mas_destroy(&mas); validate_mm(mm); return error; } /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; static int __init cmdline_parse_stack_guard_gap(char *p) { unsigned long val; char *endptr; val = simple_strtoul(p, &endptr, 10); if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; return 1; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { return expand_upwards(vma, address); } struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; addr &= PAGE_MASK; vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; if (!prev) return NULL; if (expand_stack_locked(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return -EINVAL; return expand_downwards(vma, address); } struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; addr &= PAGE_MASK; vma = find_vma(mm, addr); if (!vma) return NULL; if (vma->vm_start <= addr) return vma; start = vma->vm_start; if (expand_stack_locked(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); return vma; } #endif /* * IA64 has some horrid mapping rules: it can expand both up and down, * but with various special rules. * * We'll get rid of this architecture eventually, so the ugliness is * temporary. */ #ifdef CONFIG_IA64 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) { return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && REGION_OFFSET(addr) < RGN_MAP_LIMIT; } /* * IA64 stacks grow down, but there's a special register backing store * that can grow up. Only sequentially, though, so the new address must * match vm_end. */ static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) { if (!vma_expand_ok(vma, addr)) return -EFAULT; if (vma->vm_end != (addr & PAGE_MASK)) return -EFAULT; return expand_upwards(vma, addr); } static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) { if (!vma_expand_ok(vma, addr)) return -EFAULT; return expand_downwards(vma, addr); } #elif defined(CONFIG_STACK_GROWSUP) #define vma_expand_up(vma,addr) expand_upwards(vma, addr) #define vma_expand_down(vma, addr) (-EFAULT) #else #define vma_expand_up(vma,addr) (-EFAULT) #define vma_expand_down(vma, addr) expand_downwards(vma, addr) #endif /* * expand_stack(): legacy interface for page faulting. Don't use unless * you have to. * * This is called with the mm locked for reading, drops the lock, takes * the lock for writing, tries to look up a vma again, expands it if * necessary, and downgrades the lock to reading again. * * If no vma is found or it can't be expanded, it returns NULL and has * dropped the lock. */ struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; mmap_read_unlock(mm); if (mmap_write_lock_killable(mm)) return NULL; vma = find_vma_prev(mm, addr, &prev); if (vma && vma->vm_start <= addr) goto success; if (prev && !vma_expand_up(prev, addr)) { vma = prev; goto success; } if (vma && !vma_expand_down(vma, addr)) goto success; mmap_write_unlock(mm); return NULL; success: mmap_write_downgrade(mm); return vma; } /* * Ok - we have the memory areas we should free on a maple tree so release them, * and do the vma updates. * * Called with the mm semaphore held. */ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas) { unsigned long nr_accounted = 0; struct vm_area_struct *vma; /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); mas_for_each(mas, vma, ULONG_MAX) { long nrpages = vma_pages(vma); if (vma->vm_flags & VM_ACCOUNT) nr_accounted += nrpages; vm_stat_account(mm, vma->vm_flags, -nrpages); remove_vma(vma, false); } vm_unacct_memory(nr_accounted); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ static void unmap_region(struct mm_struct *mm, struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next, unsigned long start, unsigned long end, unsigned long tree_end, bool mm_wr_locked) { struct mmu_gather tlb; unsigned long mt_start = mas->index; lru_add_drain(); tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); mas_set(mas, mt_start); free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING, mm_wr_locked); tlb_finish_mmu(&tlb); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. * VMA Iterator will point to the end VMA. */ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vma_prepare vp; struct vm_area_struct *new; int err; WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); if (vma->vm_ops && vma->vm_ops->may_split) { err = vma->vm_ops->may_split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) { new->vm_end = addr; } else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = -ENOMEM; vma_iter_config(vmi, new->vm_start, new->vm_end); if (vma_iter_prealloc(vmi, new)) goto out_free_vma; err = vma_dup_policy(vma, new); if (err) goto out_free_vmi; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); vma_start_write(vma); vma_start_write(new); init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); if (new_below) { vma->vm_start = addr; vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; } else { vma->vm_end = addr; } /* vma_complete stores the new vma */ vma_complete(&vp, vmi, vma->vm_mm); /* Success. */ if (new_below) vma_next(vmi); return 0; out_free_mpol: mpol_put(vma_policy(new)); out_free_vmi: vma_iter_free(vmi); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (vma->vm_mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(vmi, vma, addr, new_below); } /* * do_vmi_align_munmap() - munmap the aligned region from @start to @end. * @vmi: The vma iterator * @vma: The starting vm_area_struct * @mm: The mm_struct * @start: The aligned start address to munmap. * @end: The aligned end address to munmap. * @uf: The userfaultfd list_head * @unlock: Set to true to drop the mmap_lock. unlocking only happens on * success. * * Return: 0 on success and drops the lock if so directed, error and leaves the * lock held otherwise. */ static int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) { struct vm_area_struct *prev, *next = NULL; struct maple_tree mt_detach; int count = 0; int error = -ENOMEM; unsigned long locked_vm = 0; MA_STATE(mas_detach, &mt_detach, 0, 0); mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_on_stack(mt_detach); /* * If we need to split any vma, do it now to save pain later. * * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially * unmapped vm_area_struct will remain in use: so lower split_vma * places tmp vma above, and higher split_vma places tmp vma below. */ /* Does it split the first one? */ if (start > vma->vm_start) { /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) goto map_count_exceeded; error = __split_vma(vmi, vma, start, 1); if (error) goto start_split_failed; } /* * Detach a range of VMAs from the mm. Using next as a temp variable as * it is always overwritten. */ next = vma; do { /* Does it split the end? */ if (next->vm_end > end) { error = __split_vma(vmi, next, end, 0); if (error) goto end_split_failed; } vma_start_write(next); mas_set(&mas_detach, count); error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); if (error) goto munmap_gather_failed; vma_mark_detached(next, true); if (next->vm_flags & VM_LOCKED) locked_vm += vma_pages(next); count++; if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain split, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ error = userfaultfd_unmap_prep(next, start, end, uf); if (error) goto userfaultfd_error; } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE BUG_ON(next->vm_start < start); BUG_ON(next->vm_start > end); #endif } for_each_vma_range(*vmi, next, end); #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) /* Make sure no VMAs are about to be lost. */ { MA_STATE(test, &mt_detach, 0, 0); struct vm_area_struct *vma_mas, *vma_test; int test_count = 0; vma_iter_set(vmi, start); rcu_read_lock(); vma_test = mas_find(&test, count - 1); for_each_vma_range(*vmi, vma_mas, end) { BUG_ON(vma_mas != vma_test); test_count++; vma_test = mas_next(&test, count - 1); } rcu_read_unlock(); BUG_ON(count != test_count); } #endif while (vma_iter_addr(vmi) > start) vma_iter_prev_range(vmi); error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); if (error) goto clear_tree_failed; /* Point of no return */ mm->locked_vm -= locked_vm; mm->map_count -= count; if (unlock) mmap_write_downgrade(mm); prev = vma_iter_prev_range(vmi); next = vma_next(vmi); if (next) vma_iter_prev_range(vmi); /* * We can free page tables without write-locking mmap_lock because VMAs * were isolated before we downgraded mmap_lock. */ mas_set(&mas_detach, 1); unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, !unlock); /* Statistics and freeing VMAs */ mas_set(&mas_detach, 0); remove_mt(mm, &mas_detach); validate_mm(mm); if (unlock) mmap_read_unlock(mm); __mt_destroy(&mt_detach); return 0; clear_tree_failed: userfaultfd_error: munmap_gather_failed: end_split_failed: mas_set(&mas_detach, 0); mas_for_each(&mas_detach, next, end) vma_mark_detached(next, false); __mt_destroy(&mt_detach); start_split_failed: map_count_exceeded: validate_mm(mm); return error; } /* * do_vmi_munmap() - munmap a given range. * @vmi: The vma iterator * @mm: The mm_struct * @start: The start address to munmap * @len: The length of the range to munmap * @uf: The userfaultfd list_head * @unlock: set to true if the user wants to drop the mmap_lock on success * * This function takes a @mas that is either pointing to the previous VMA or set * to MA_START and sets it up to remove the mapping(s). The @len will be * aligned and any arch_unmap work will be preformed. * * Return: 0 on success and drops the lock if so directed, error and leaves the * lock held otherwise. */ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock) { unsigned long end; struct vm_area_struct *vma; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; end = start + PAGE_ALIGN(len); if (end == start) return -EINVAL; /* arch_unmap() might do unmaps itself. */ arch_unmap(mm, start, end); /* Find the first overlapping VMA */ vma = vma_find(vmi, end); if (!vma) { if (unlock) mmap_write_unlock(mm); return 0; } return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); } /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. * @mm: The mm_struct * @start: The start address to munmap * @len: The length to be munmapped. * @uf: The userfaultfd list_head * * Return: 0 on success, error otherwise. */ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf) { VMA_ITERATOR(vmi, mm, start); return do_vmi_munmap(&vmi, mm, start, len, uf, false); } unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct vm_area_struct *next, *prev, *merge; pgoff_t pglen = len >> PAGE_SHIFT; unsigned long charged = 0; unsigned long end = addr + len; unsigned long merge_start = addr, merge_end = end; pgoff_t vm_pgoff; int error; VMA_ITERATOR(vmi, mm, addr); /* Check against address space limit. */ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) { unsigned long nr_pages; /* * MAP_FIXED may remove pages of mappings that intersects with * requested mapping. Account for the pages it would unmap. */ nr_pages = count_vma_pages_range(mm, addr, end); if (!may_expand_vm(mm, vm_flags, (len >> PAGE_SHIFT) - nr_pages)) return -ENOMEM; } /* Unmap any existing mapping in the area */ if (do_vmi_munmap(&vmi, mm, addr, len, uf, false)) return -ENOMEM; /* * Private writable mapping: check memory availability */ if (accountable_mapping(file, vm_flags)) { charged = len >> PAGE_SHIFT; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; vm_flags |= VM_ACCOUNT; } next = vma_next(&vmi); prev = vma_prev(&vmi); if (vm_flags & VM_SPECIAL) { if (prev) vma_iter_next_range(&vmi); goto cannot_expand; } /* Attempt to expand an old mapping */ /* Check next */ if (next && next->vm_start == end && !vma_policy(next) && can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen, NULL_VM_UFFD_CTX, NULL)) { merge_end = next->vm_end; vma = next; vm_pgoff = next->vm_pgoff - pglen; } /* Check prev */ if (prev && prev->vm_end == addr && !vma_policy(prev) && (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, pgoff, vma->vm_userfaultfd_ctx, NULL) : can_vma_merge_after(prev, vm_flags, NULL, file, pgoff, NULL_VM_UFFD_CTX, NULL))) { merge_start = prev->vm_start; vma = prev; vm_pgoff = prev->vm_pgoff; } else if (prev) { vma_iter_next_range(&vmi); } /* Actually expand, if possible */ if (vma && !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { khugepaged_enter_vma(vma, vm_flags); goto expanded; } if (vma == prev) vma_iter_set(&vmi, addr); cannot_expand: /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(mm); if (!vma) { error = -ENOMEM; goto unacct_error; } vma_iter_config(&vmi, addr, end); vma->vm_start = addr; vma->vm_end = end; vm_flags_init(vma, vm_flags); vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; if (file) { if (vm_flags & VM_SHARED) { error = mapping_map_writable(file->f_mapping); if (error) goto free_vma; } vma->vm_file = get_file(file); error = call_mmap(file, vma); if (error) goto unmap_and_free_vma; /* * Expansion is handled above, merging is handled below. * Drivers should not alter the address of the VMA. */ error = -EINVAL; if (WARN_ON((addr != vma->vm_start))) goto close_and_free_vma; vma_iter_config(&vmi, addr, end); /* * If vm_flags changed after call_mmap(), we should try merge * vma again as we may succeed this time. */ if (unlikely(vm_flags != vma->vm_flags && prev)) { merge = vma_merge(&vmi, mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); if (merge) { /* * ->mmap() can change vma->vm_file and fput * the original file. So fput the vma->vm_file * here or we would add an extra fput for file * and cause general protection fault * ultimately. */ fput(vma->vm_file); vm_area_free(vma); vma = merge; /* Update vm_flags to pick up the change. */ vm_flags = vma->vm_flags; goto unmap_writable; } } vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) goto free_vma; } else { vma_set_anonymous(vma); } if (map_deny_write_exec(vma, vma->vm_flags)) { error = -EACCES; goto close_and_free_vma; } /* Allow architectures to sanity-check the vm_flags */ error = -EINVAL; if (!arch_validate_flags(vma->vm_flags)) goto close_and_free_vma; error = -ENOMEM; if (vma_iter_prealloc(&vmi, vma)) goto close_and_free_vma; /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); vma_iter_store(&vmi, vma); mm->map_count++; if (vma->vm_file) { i_mmap_lock_write(vma->vm_file->f_mapping); if (vma->vm_flags & VM_SHARED) mapping_allow_writable(vma->vm_file->f_mapping); flush_dcache_mmap_lock(vma->vm_file->f_mapping); vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); flush_dcache_mmap_unlock(vma->vm_file->f_mapping); i_mmap_unlock_write(vma->vm_file->f_mapping); } /* * vma_merge() calls khugepaged_enter_vma() either, the below * call covers the non-merge case. */ khugepaged_enter_vma(vma, vma->vm_flags); /* Once vma denies write, undo our temporary denial count */ unmap_writable: if (file && vm_flags & VM_SHARED) mapping_unmap_writable(file->f_mapping); file = vma->vm_file; ksm_add_vma(vma); expanded: perf_event_mmap(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) vm_flags_clear(vma, VM_LOCKED_MASK); else mm->locked_vm += (len >> PAGE_SHIFT); } if (file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ vm_flags_set(vma, VM_SOFTDIRTY); vma_set_page_prot(vma); validate_mm(mm); return addr; close_and_free_vma: if (file && vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (file || vma->vm_file) { unmap_and_free_vma: fput(vma->vm_file); vma->vm_file = NULL; vma_iter_set(&vmi, vma->vm_end); /* Undo any partial mapping done by a device driver. */ unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, vma->vm_end, vma->vm_end, true); } if (file && (vm_flags & VM_SHARED)) mapping_unmap_writable(file->f_mapping); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); validate_mm(mm); return error; } static int __vm_munmap(unsigned long start, size_t len, bool unlock) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, start); if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); if (ret || !unlock) mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); return ret; } int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); } EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { addr = untagged_addr(addr); return __vm_munmap(addr, len, true); } /* * Emulation of deprecated remap_file_pages() syscall. */ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long populate = 0; unsigned long ret = -EINVAL; struct file *file; pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) return ret; start = start & PAGE_MASK; size = size & PAGE_MASK; if (start + size <= start) return ret; /* Does pgoff wrap? */ if (pgoff + (size >> PAGE_SHIFT) < pgoff) return ret; if (mmap_write_lock_killable(mm)) return -EINTR; vma = vma_lookup(mm, start); if (!vma || !(vma->vm_flags & VM_SHARED)) goto out; if (start + size > vma->vm_end) { VMA_ITERATOR(vmi, mm, vma->vm_end); struct vm_area_struct *next, *prev = vma; for_each_vma_range(vmi, next, start + size) { /* hole between vmas ? */ if (next->vm_start != prev->vm_end) goto out; if (next->vm_file != vma->vm_file) goto out; if (next->vm_flags != vma->vm_flags) goto out; if (start + size <= next->vm_end) break; prev = next; } if (!next) goto out; } prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; flags &= MAP_NONBLOCK; flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; if (vma->vm_flags & VM_LOCKED) flags |= MAP_LOCKED; file = get_file(vma->vm_file); ret = do_mmap(vma->vm_file, start, size, prot, flags, 0, pgoff, &populate, NULL); fput(file); out: mmap_write_unlock(mm); if (populate) mm_populate(ret, populate); if (!IS_ERR_VALUE(ret)) ret = 0; return ret; } /* * do_vma_munmap() - Unmap a full or partial vma. * @vmi: The vma iterator pointing at the vma * @vma: The first vma to be munmapped * @start: the start of the address to unmap * @end: The end of the address to unmap * @uf: The userfaultfd list_head * @unlock: Drop the lock on success * * unmaps a VMA mapping when the vma iterator is already in position. * Does not handle alignment. * * Return: 0 on success drops the lock of so directed, error on failure and will * still hold the lock. */ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) { struct mm_struct *mm = vma->vm_mm; arch_unmap(mm, start, end); return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); } /* * do_brk_flags() - Increase the brk vma if the flags match. * @vmi: The vma iterator * @addr: The start address * @len: The length of the increase * @vma: The vma, * @flags: The VMA Flags * * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags * do not match then create a new anonymous VMA. Eventually we may be able to * do some brk-specific accounting here. */ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long flags) { struct mm_struct *mm = current->mm; struct vma_prepare vp; /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. */ flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* * Expand the existing vma if possible; Note that singular lists do not * occur after forking, so the expand will only happen on new VMAs. */ if (vma && vma->vm_end == addr && !vma_policy(vma) && can_vma_merge_after(vma, flags, NULL, NULL, addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { vma_iter_config(vmi, vma->vm_start, addr + len); if (vma_iter_prealloc(vmi, vma)) goto unacct_fail; vma_start_write(vma); init_vma_prep(&vp, vma); vma_prepare(&vp); vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); vma->vm_end = addr + len; vm_flags_set(vma, VM_SOFTDIRTY); vma_iter_store(vmi, vma); vma_complete(&vp, vmi, mm); khugepaged_enter_vma(vma, flags); goto out; } if (vma) vma_iter_next_range(vmi); /* create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) goto unacct_fail; vma_set_anonymous(vma); vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_pgoff = addr >> PAGE_SHIFT; vm_flags_init(vma, flags); vma->vm_page_prot = vm_get_page_prot(flags); vma_start_write(vma); if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) goto mas_store_fail; mm->map_count++; validate_mm(mm); ksm_add_vma(vma); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); vm_flags_set(vma, VM_SOFTDIRTY); return 0; mas_store_fail: vm_area_free(vma); unacct_fail: vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; unsigned long len; int ret; bool populate; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, addr); len = PAGE_ALIGN(request); if (len < request) return -ENOMEM; if (!len) return 0; if (mmap_write_lock_killable(mm)) return -EINTR; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; ret = check_brk_limits(addr, len); if (ret) goto limits_failed; ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); if (ret) goto munmap_failed; vma = vma_prev(&vmi); ret = do_brk_flags(&vmi, vma, addr, len, flags); populate = ((mm->def_flags & VM_LOCKED) != 0); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate && !ret) mm_populate(addr, len); return ret; munmap_failed: limits_failed: mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL(vm_brk_flags); int vm_brk(unsigned long addr, unsigned long len) { return vm_brk_flags(addr, len, 0); } EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) { struct mmu_gather tlb; struct vm_area_struct *vma; unsigned long nr_accounted = 0; MA_STATE(mas, &mm->mm_mt, 0, 0); int count = 0; /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); mmap_read_lock(mm); arch_exit_mmap(mm); vma = mas_find(&mas, ULONG_MAX); if (!vma) { /* Can happen if dup_mmap() received an OOM */ mmap_read_unlock(mm); return; } lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false); mmap_read_unlock(mm); /* * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper * because the memory has been already freed. */ set_bit(MMF_OOM_SKIP, &mm->flags); mmap_write_lock(mm); mt_clear_in_rcu(&mm->mm_mt); mas_set(&mas, vma->vm_end); free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING, true); tlb_finish_mmu(&tlb); /* * Walk the list again, actually closing and freeing it, with preemption * enabled, without holding any MM locks besides the unreachable * mmap_write_lock. */ mas_set(&mas, vma->vm_end); do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); remove_vma(vma, true); count++; cond_resched(); } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); BUG_ON(count != mm->map_count); trace_exit_mmap(mm); __mt_destroy(&mm->mm_mt); mmap_write_unlock(mm); vm_unacct_memory(nr_accounted); } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long charged = vma_pages(vma); if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap and in do_brk_flags. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } if (vma_link(mm, vma)) { vm_unacct_memory(charged); return -ENOMEM; } return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma, *prev; bool faulted_in_anon_vma = true; VMA_ITERATOR(vmi, mm, addr); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } new_vma = find_vma_prev(mm, addr, &prev); if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; new_vma->vm_start = addr; new_vma->vm_end = addr + len; new_vma->vm_pgoff = pgoff; if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; } return new_vma; out_vma_link: if (new_vma->vm_ops && new_vma->vm_ops->close) new_vma->vm_ops->close(new_vma); if (new_vma->vm_file) fput(new_vma->vm_file); unlink_anon_vmas(new_vma); out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Return true if the calling process may expand its vm space by the passed * number of pages */ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) { if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; if (is_data_mapping(flags) && mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { /* Workaround for Valgrind */ if (rlimit(RLIMIT_DATA) == 0 && mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) return true; pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", current->comm, current->pid, (mm->data_vm + npages) << PAGE_SHIFT, rlimit(RLIMIT_DATA), ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); if (!ignore_rlimit_data) return false; } return true; } void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); if (is_exec_mapping(flags)) mm->exec_vm += npages; else if (is_stack_mapping(flags)) mm->stack_vm += npages; else if (is_data_mapping(flags)) mm->data_vm += npages; } static vm_fault_t special_mapping_fault(struct vm_fault *vmf); /* * Having a close hook prevents vma merging regardless of flags. */ static void special_mapping_close(struct vm_area_struct *vma) { } static const char *special_mapping_name(struct vm_area_struct *vma) { return ((struct vm_special_mapping *)vma->vm_private_data)->name; } static int special_mapping_mremap(struct vm_area_struct *new_vma) { struct vm_special_mapping *sm = new_vma->vm_private_data; if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) return -EFAULT; if (sm->mremap) return sm->mremap(sm, new_vma); return 0; } static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) { /* * Forbid splitting special mappings - kernel has expectations over * the number of pages in mapping. Together with VM_DONTEXPAND * the size of vma should stay the same over the special mapping's * lifetime. */ return -EINVAL; } static const struct vm_operations_struct special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, /* vDSO code relies that VVAR can't be accessed remotely */ .access = NULL, .may_split = special_mapping_split, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { .close = special_mapping_close, .fault = special_mapping_fault, }; static vm_fault_t special_mapping_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgoff_t pgoff; struct page **pages; if (vma->vm_ops == &legacy_special_mapping_vmops) { pages = vma->vm_private_data; } else { struct vm_special_mapping *sm = vma->vm_private_data; if (sm->fault) return sm->fault(sm, vmf->vma, vmf); pages = sm->pages; } for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) pgoff--; if (*pages) { struct page *page = *pages; get_page(page); vmf->page = page; return 0; } return VM_FAULT_SIGBUS; } static struct vm_area_struct *__install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, void *priv, const struct vm_operations_struct *ops) { int ret; struct vm_area_struct *vma; vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); vma->vm_start = addr; vma->vm_end = addr + len; vm_flags_init(vma, (vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); vma->vm_ops = ops; vma->vm_private_data = priv; ret = insert_vm_struct(mm, vma); if (ret) goto out; vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); return vma; out: vm_area_free(vma); return ERR_PTR(ret); } bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm) { return vma->vm_private_data == sm && (vma->vm_ops == &special_mapping_vmops || vma->vm_ops == &legacy_special_mapping_vmops); } /* * Called with mm->mmap_lock held for writing. * Insert a new vma covering the given region, with the given flags. * Its pages are supplied by the given array of struct page *. * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. * The region past the last page supplied will always produce SIGBUS. * The array pointer and the pages it points to are assumed to stay alive * for as long as this mapping might exist. */ struct vm_area_struct *_install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) { return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, &special_mapping_vmops); } int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { struct vm_area_struct *vma = __install_special_mapping( mm, addr, len, vm_flags, (void *)pages, &legacy_special_mapping_vmops); return PTR_ERR_OR_ZERO(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_lock in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_lock until mm_drop_all_locks() returns. * * mmap_lock in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all vmas marked locked * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_assert_write_locked(mm); mutex_lock(&mm_all_locks_mutex); /* * vma_start_write() does not have a complement in mm_drop_all_locks() * because vma_start_write() is always asymmetrical; it marks a VMA as * being written to until mmap_write_unlock() or mmap_write_downgrade() * is reached. */ mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; vma_start_write(vma); } mas_set(&mas, 0); mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } mas_set(&mas, 0); mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } mas_set(&mas, 0); mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_lock cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; MA_STATE(mas, &mm->mm_mt, 0, 0); mmap_assert_write_locked(mm); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); mas_for_each(&mas, vma, ULONG_MAX) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * initialise the percpu counter for VM */ void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); } /* * Initialise sysctl_user_reserve_kbytes. * * This is intended to prevent a user from starting a single memory hogging * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER * mode. * * The default value is min(3% of free memory, 128MB) * 128MB is enough to recover with sshd/login, bash, and top/kill. */ static int init_user_reserve(void) { unsigned long free_kbytes; free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. * * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin * to log in and kill a memory hogging process. * * Systems with more than 256MB will reserve 8MB, enough to recover * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will * only reserve 3% of free pages by default. */ static int init_admin_reserve(void) { unsigned long free_kbytes; free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } subsys_initcall(init_admin_reserve); /* * Reinititalise user and admin reserves if memory is added or removed. * * The default user reserve max is 128MB, and the default max for the * admin reserve is 8MB. These are usually, but not always, enough to * enable recovery from a memory hogging process using login/sshd, a shell, * and tools like top. It may make sense to increase or even disable the * reserve depending on the existence of swap or variations in the recovery * tools. So, the admin may have changed them. * * If memory is added and the reserves have been eliminated or increased above * the default max, then we'll trust the admin. * * If memory is removed and there isn't enough free memory, then we * need to reset the reserves. * * Otherwise keep the reserve set by the admin. */ static int reserve_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) { unsigned long tmp, free_kbytes; switch (action) { case MEM_ONLINE: /* Default max is 128MB. Leave alone if modified by operator. */ tmp = sysctl_user_reserve_kbytes; if (0 < tmp && tmp < (1UL << 17)) init_user_reserve(); /* Default max is 8MB. Leave alone if modified by operator. */ tmp = sysctl_admin_reserve_kbytes; if (0 < tmp && tmp < (1UL << 13)) init_admin_reserve(); break; case MEM_OFFLINE: free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); if (sysctl_user_reserve_kbytes > free_kbytes) { init_user_reserve(); pr_info("vm.user_reserve_kbytes reset to %lu\n", sysctl_user_reserve_kbytes); } if (sysctl_admin_reserve_kbytes > free_kbytes) { init_admin_reserve(); pr_info("vm.admin_reserve_kbytes reset to %lu\n", sysctl_admin_reserve_kbytes); } break; default: break; } return NOTIFY_OK; } static int __meminit init_reserve_notifier(void) { if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) pr_err("Failed registering memory add/remove notifier for admin reserve\n"); return 0; } subsys_initcall(init_reserve_notifier);
linux-master
mm/mmap.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/pagewalk.h> #include <linux/hugetlb.h> #include <linux/bitops.h> #include <linux/mmu_notifier.h> #include <linux/mm_inline.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> /** * struct wp_walk - Private struct for pagetable walk callbacks * @range: Range for mmu notifiers * @tlbflush_start: Address of first modified pte * @tlbflush_end: Address of last modified pte + 1 * @total: Total number of modified ptes */ struct wp_walk { struct mmu_notifier_range range; unsigned long tlbflush_start; unsigned long tlbflush_end; unsigned long total; }; /** * wp_pte - Write-protect a pte * @pte: Pointer to the pte * @addr: The start of protecting virtual address * @end: The end of protecting virtual address * @walk: pagetable walk callback argument * * The function write-protects a pte and records the range in * virtual address space of touched ptes for efficient range TLB flushes. */ static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; pte_t ptent = ptep_get(pte); if (pte_write(ptent)) { pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); ptent = pte_wrprotect(old_pte); ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); wpwalk->total++; wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, addr + PAGE_SIZE); } return 0; } /** * struct clean_walk - Private struct for the clean_record_pte function. * @base: struct wp_walk we derive from * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap * @bitmap: Bitmap with one bit for each page offset in the address_space range * covered. * @start: Address_space page offset of first modified pte relative * to @bitmap_pgoff * @end: Address_space page offset of last modified pte relative * to @bitmap_pgoff */ struct clean_walk { struct wp_walk base; pgoff_t bitmap_pgoff; unsigned long *bitmap; pgoff_t start; pgoff_t end; }; #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) /** * clean_record_pte - Clean a pte and record its address space offset in a * bitmap * @pte: Pointer to the pte * @addr: The start of virtual address to be clean * @end: The end of virtual address to be clean * @walk: pagetable walk callback argument * * The function cleans a pte and records the range in * virtual address space of touched ptes for efficient TLB flushes. * It also records dirty ptes in a bitmap representing page offsets * in the address_space, as well as the first and last of the bits * touched. */ static int clean_record_pte(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; struct clean_walk *cwalk = to_clean_walk(wpwalk); pte_t ptent = ptep_get(pte); if (pte_dirty(ptent)) { pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + walk->vma->vm_pgoff - cwalk->bitmap_pgoff; pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); ptent = pte_mkclean(old_pte); ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); wpwalk->total++; wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, addr + PAGE_SIZE); __set_bit(pgoff, cwalk->bitmap); cwalk->start = min(cwalk->start, pgoff); cwalk->end = max(cwalk->end, pgoff + 1); } return 0; } /* * wp_clean_pmd_entry - The pagewalk pmd callback. * * Dirty-tracking should take place on the PTE level, so * WARN() if encountering a dirty huge pmd. * Furthermore, never split huge pmds, since that currently * causes dirty info loss. The pagefault handler should do * that if needed. */ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t pmdval = pmdp_get_lockless(pmd); /* Do not split a huge pmd, present or migrated */ if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) { WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); walk->action = ACTION_CONTINUE; } return 0; } /* * wp_clean_pud_entry - The pagewalk pud callback. * * Dirty-tracking should take place on the PTE level, so * WARN() if encountering a dirty huge puds. * Furthermore, never split huge puds, since that currently * causes dirty info loss. The pagefault handler should do * that if needed. */ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD pud_t pudval = READ_ONCE(*pud); /* Do not split a huge pud */ if (pud_trans_huge(pudval) || pud_devmap(pudval)) { WARN_ON(pud_write(pudval) || pud_dirty(pudval)); walk->action = ACTION_CONTINUE; } #endif return 0; } /* * wp_clean_pre_vma - The pagewalk pre_vma callback. * * The pre_vma callback performs the cache flush, stages the tlb flush * and calls the necessary mmu notifiers. */ static int wp_clean_pre_vma(unsigned long start, unsigned long end, struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; wpwalk->tlbflush_start = end; wpwalk->tlbflush_end = start; mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, walk->mm, start, end); mmu_notifier_invalidate_range_start(&wpwalk->range); flush_cache_range(walk->vma, start, end); /* * We're not using tlb_gather_mmu() since typically * only a small subrange of PTEs are affected, whereas * tlb_gather_mmu() records the full range. */ inc_tlb_flush_pending(walk->mm); return 0; } /* * wp_clean_post_vma - The pagewalk post_vma callback. * * The post_vma callback performs the tlb flush and calls necessary mmu * notifiers. */ static void wp_clean_post_vma(struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; if (mm_tlb_flush_nested(walk->mm)) flush_tlb_range(walk->vma, wpwalk->range.start, wpwalk->range.end); else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) flush_tlb_range(walk->vma, wpwalk->tlbflush_start, wpwalk->tlbflush_end); mmu_notifier_invalidate_range_end(&wpwalk->range); dec_tlb_flush_pending(walk->mm); } /* * wp_clean_test_walk - The pagewalk test_walk callback. * * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas. */ static int wp_clean_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); /* Skip non-applicable VMAs */ if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) != (VM_SHARED | VM_MAYWRITE)) return 1; return 0; } static const struct mm_walk_ops clean_walk_ops = { .pte_entry = clean_record_pte, .pmd_entry = wp_clean_pmd_entry, .pud_entry = wp_clean_pud_entry, .test_walk = wp_clean_test_walk, .pre_vma = wp_clean_pre_vma, .post_vma = wp_clean_post_vma }; static const struct mm_walk_ops wp_walk_ops = { .pte_entry = wp_pte, .pmd_entry = wp_clean_pmd_entry, .pud_entry = wp_clean_pud_entry, .test_walk = wp_clean_test_walk, .pre_vma = wp_clean_pre_vma, .post_vma = wp_clean_post_vma }; /** * wp_shared_mapping_range - Write-protect all ptes in an address space range * @mapping: The address_space we want to write protect * @first_index: The first page offset in the range * @nr: Number of incremental page offsets to cover * * Note: This function currently skips transhuge page-table entries, since * it's intended for dirty-tracking on the PTE level. It will warn on * encountering transhuge write-enabled entries, though, and can easily be * extended to handle them as well. * * Return: The number of ptes actually write-protected. Note that * already write-protected ptes are not counted. */ unsigned long wp_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr) { struct wp_walk wpwalk = { .total = 0 }; i_mmap_lock_read(mapping); WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, &wpwalk)); i_mmap_unlock_read(mapping); return wpwalk.total; } EXPORT_SYMBOL_GPL(wp_shared_mapping_range); /** * clean_record_shared_mapping_range - Clean and record all ptes in an * address space range * @mapping: The address_space we want to clean * @first_index: The first page offset in the range * @nr: Number of incremental page offsets to cover * @bitmap_pgoff: The page offset of the first bit in @bitmap * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to * cover the whole range @first_index..@first_index + @nr. * @start: Pointer to number of the first set bit in @bitmap. * is modified as new bits are set by the function. * @end: Pointer to the number of the last set bit in @bitmap. * none set. The value is modified as new bits are set by the function. * * When this function returns there is no guarantee that a CPU has * not already dirtied new ptes. However it will not clean any ptes not * reported in the bitmap. The guarantees are as follows: * * * All ptes dirty when the function starts executing will end up recorded * in the bitmap. * * All ptes dirtied after that will either remain dirty, be recorded in the * bitmap or both. * * If a caller needs to make sure all dirty ptes are picked up and none * additional are added, it first needs to write-protect the address-space * range and make sure new writers are blocked in page_mkwrite() or * pfn_mkwrite(). And then after a TLB flush following the write-protection * pick up all dirty bits. * * This function currently skips transhuge page-table entries, since * it's intended for dirty-tracking on the PTE level. It will warn on * encountering transhuge dirty entries, though, and can easily be extended * to handle them as well. * * Return: The number of dirty ptes actually cleaned. */ unsigned long clean_record_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, pgoff_t bitmap_pgoff, unsigned long *bitmap, pgoff_t *start, pgoff_t *end) { bool none_set = (*start >= *end); struct clean_walk cwalk = { .base = { .total = 0 }, .bitmap_pgoff = bitmap_pgoff, .bitmap = bitmap, .start = none_set ? nr : *start, .end = none_set ? 0 : *end, }; i_mmap_lock_read(mapping); WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, &cwalk.base)); i_mmap_unlock_read(mapping); *start = cwalk.start; *end = cwalk.end; return cwalk.base.total; } EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);
linux-master
mm/mapping_dirty_helpers.c
// SPDX-License-Identifier: GPL-2.0 /* * HugeTLB Vmemmap Optimization (HVO) * * Copyright (c) 2020, ByteDance. All rights reserved. * * Author: Muchun Song <[email protected]> * * See Documentation/mm/vmemmap_dedup.rst */ #define pr_fmt(fmt) "HugeTLB: " fmt #include <linux/pgtable.h> #include <linux/moduleparam.h> #include <linux/bootmem_info.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include "hugetlb_vmemmap.h" /** * struct vmemmap_remap_walk - walk vmemmap page table * * @remap_pte: called for each lowest-level entry (PTE). * @nr_walked: the number of walked pte. * @reuse_page: the page which is reused for the tail vmemmap pages. * @reuse_addr: the virtual address of the @reuse_page page. * @vmemmap_pages: the list head of the vmemmap pages that can be freed * or is mapped from. */ struct vmemmap_remap_walk { void (*remap_pte)(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk); unsigned long nr_walked; struct page *reuse_page; unsigned long reuse_addr; struct list_head *vmemmap_pages; }; static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) { pmd_t __pmd; int i; unsigned long addr = start; struct page *head; pte_t *pgtable; spin_lock(&init_mm.page_table_lock); head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL; spin_unlock(&init_mm.page_table_lock); if (!head) return 0; pgtable = pte_alloc_one_kernel(&init_mm); if (!pgtable) return -ENOMEM; pmd_populate_kernel(&init_mm, &__pmd, pgtable); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { pte_t entry, *pte; pgprot_t pgprot = PAGE_KERNEL; entry = mk_pte(head + i, pgprot); pte = pte_offset_kernel(&__pmd, addr); set_pte_at(&init_mm, addr, pte, entry); } spin_lock(&init_mm.page_table_lock); if (likely(pmd_leaf(*pmd))) { /* * Higher order allocations from buddy allocator must be able to * be treated as indepdenent small pages (as they can be freed * individually). */ if (!PageReserved(head)) split_page(head, get_order(PMD_SIZE)); /* Make pte visible before pmd. See comment in pmd_install(). */ smp_wmb(); pmd_populate_kernel(&init_mm, pmd, pgtable); flush_tlb_kernel_range(start, start + PMD_SIZE); } else { pte_free_kernel(&init_mm, pgtable); } spin_unlock(&init_mm.page_table_lock); return 0; } static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct vmemmap_remap_walk *walk) { pte_t *pte = pte_offset_kernel(pmd, addr); /* * The reuse_page is found 'first' in table walk before we start * remapping (which is calling @walk->remap_pte). */ if (!walk->reuse_page) { walk->reuse_page = pte_page(ptep_get(pte)); /* * Because the reuse address is part of the range that we are * walking, skip the reuse address range. */ addr += PAGE_SIZE; pte++; walk->nr_walked++; } for (; addr != end; addr += PAGE_SIZE, pte++) { walk->remap_pte(pte, addr, walk); walk->nr_walked++; } } static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct vmemmap_remap_walk *walk) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { int ret; ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK); if (ret) return ret; next = pmd_addr_end(addr, end); vmemmap_pte_range(pmd, addr, next, walk); } while (pmd++, addr = next, addr != end); return 0; } static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, struct vmemmap_remap_walk *walk) { pud_t *pud; unsigned long next; pud = pud_offset(p4d, addr); do { int ret; next = pud_addr_end(addr, end); ret = vmemmap_pmd_range(pud, addr, next, walk); if (ret) return ret; } while (pud++, addr = next, addr != end); return 0; } static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct vmemmap_remap_walk *walk) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { int ret; next = p4d_addr_end(addr, end); ret = vmemmap_pud_range(p4d, addr, next, walk); if (ret) return ret; } while (p4d++, addr = next, addr != end); return 0; } static int vmemmap_remap_range(unsigned long start, unsigned long end, struct vmemmap_remap_walk *walk) { unsigned long addr = start; unsigned long next; pgd_t *pgd; VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(end)); pgd = pgd_offset_k(addr); do { int ret; next = pgd_addr_end(addr, end); ret = vmemmap_p4d_range(pgd, addr, next, walk); if (ret) return ret; } while (pgd++, addr = next, addr != end); flush_tlb_kernel_range(start, end); return 0; } /* * Free a vmemmap page. A vmemmap page can be allocated from the memblock * allocator or buddy allocator. If the PG_reserved flag is set, it means * that it allocated from the memblock allocator, just free it via the * free_bootmem_page(). Otherwise, use __free_page(). */ static inline void free_vmemmap_page(struct page *page) { if (PageReserved(page)) free_bootmem_page(page); else __free_page(page); } /* Free a list of the vmemmap pages */ static void free_vmemmap_page_list(struct list_head *list) { struct page *page, *next; list_for_each_entry_safe(page, next, list, lru) free_vmemmap_page(page); } static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk) { /* * Remap the tail pages as read-only to catch illegal write operation * to the tail pages. */ pgprot_t pgprot = PAGE_KERNEL_RO; struct page *page = pte_page(ptep_get(pte)); pte_t entry; /* Remapping the head page requires r/w */ if (unlikely(addr == walk->reuse_addr)) { pgprot = PAGE_KERNEL; list_del(&walk->reuse_page->lru); /* * Makes sure that preceding stores to the page contents from * vmemmap_remap_free() become visible before the set_pte_at() * write. */ smp_wmb(); } entry = mk_pte(walk->reuse_page, pgprot); list_add_tail(&page->lru, walk->vmemmap_pages); set_pte_at(&init_mm, addr, pte, entry); } /* * How many struct page structs need to be reset. When we reuse the head * struct page, the special metadata (e.g. page->flags or page->mapping) * cannot copy to the tail struct page structs. The invalid value will be * checked in the free_tail_page_prepare(). In order to avoid the message * of "corrupted mapping in tail page". We need to reset at least 3 (one * head struct page struct and two tail struct page structs) struct page * structs. */ #define NR_RESET_STRUCT_PAGE 3 static inline void reset_struct_pages(struct page *start) { struct page *from = start + NR_RESET_STRUCT_PAGE; BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page)); memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE); } static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk) { pgprot_t pgprot = PAGE_KERNEL; struct page *page; void *to; BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page); page = list_first_entry(walk->vmemmap_pages, struct page, lru); list_del(&page->lru); to = page_to_virt(page); copy_page(to, (void *)walk->reuse_addr); reset_struct_pages(to); /* * Makes sure that preceding stores to the page contents become visible * before the set_pte_at() write. */ smp_wmb(); set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); } /** * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) * to the page which @reuse is mapped to, then free vmemmap * which the range are mapped to. * @start: start address of the vmemmap virtual address range that we want * to remap. * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse) { int ret; LIST_HEAD(vmemmap_pages); struct vmemmap_remap_walk walk = { .remap_pte = vmemmap_remap_pte, .reuse_addr = reuse, .vmemmap_pages = &vmemmap_pages, }; int nid = page_to_nid((struct page *)start); gfp_t gfp_mask = GFP_KERNEL | __GFP_THISNODE | __GFP_NORETRY | __GFP_NOWARN; /* * Allocate a new head vmemmap page to avoid breaking a contiguous * block of struct page memory when freeing it back to page allocator * in free_vmemmap_page_list(). This will allow the likely contiguous * struct page backing memory to be kept contiguous and allowing for * more allocations of hugepages. Fallback to the currently * mapped head page in case should it fail to allocate. */ walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); if (walk.reuse_page) { copy_page(page_to_virt(walk.reuse_page), (void *)walk.reuse_addr); list_add(&walk.reuse_page->lru, &vmemmap_pages); } /* * In order to make remapping routine most efficient for the huge pages, * the routine of vmemmap page table walking has the following rules * (see more details from the vmemmap_pte_range()): * * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) * should be continuous. * - The @reuse address is part of the range [@reuse, @end) that we are * walking which is passed to vmemmap_remap_range(). * - The @reuse address is the first in the complete range. * * So we need to make sure that @start and @reuse meet the above rules. */ BUG_ON(start - reuse != PAGE_SIZE); mmap_read_lock(&init_mm); ret = vmemmap_remap_range(reuse, end, &walk); if (ret && walk.nr_walked) { end = reuse + walk.nr_walked * PAGE_SIZE; /* * vmemmap_pages contains pages from the previous * vmemmap_remap_range call which failed. These * are pages which were removed from the vmemmap. * They will be restored in the following call. */ walk = (struct vmemmap_remap_walk) { .remap_pte = vmemmap_restore_pte, .reuse_addr = reuse, .vmemmap_pages = &vmemmap_pages, }; vmemmap_remap_range(reuse, end, &walk); } mmap_read_unlock(&init_mm); free_vmemmap_page_list(&vmemmap_pages); return ret; } static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, struct list_head *list) { gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_THISNODE; unsigned long nr_pages = (end - start) >> PAGE_SHIFT; int nid = page_to_nid((struct page *)start); struct page *page, *next; while (nr_pages--) { page = alloc_pages_node(nid, gfp_mask, 0); if (!page) goto out; list_add_tail(&page->lru, list); } return 0; out: list_for_each_entry_safe(page, next, list, lru) __free_page(page); return -ENOMEM; } /** * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) * to the page which is from the @vmemmap_pages * respectively. * @start: start address of the vmemmap virtual address range that we want * to remap. * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse) { LIST_HEAD(vmemmap_pages); struct vmemmap_remap_walk walk = { .remap_pte = vmemmap_restore_pte, .reuse_addr = reuse, .vmemmap_pages = &vmemmap_pages, }; /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) return -ENOMEM; mmap_read_lock(&init_mm); vmemmap_remap_range(reuse, end, &walk); mmap_read_unlock(&init_mm); return 0; } DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); /** * hugetlb_vmemmap_restore - restore previously optimized (by * hugetlb_vmemmap_optimize()) vmemmap pages which * will be reallocated and remapped. * @h: struct hstate. * @head: the head page whose vmemmap pages will be restored. * * Return: %0 if @head's vmemmap pages have been reallocated and remapped, * negative error code otherwise. */ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) { int ret; unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; unsigned long vmemmap_reuse; if (!HPageVmemmapOptimized(head)) return 0; vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_reuse = vmemmap_start; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* * The pages which the vmemmap virtual address range [@vmemmap_start, * @vmemmap_end) are mapped to are freed to the buddy allocator, and * the range is mapped to the page which @vmemmap_reuse is mapped to. * When a HugeTLB page is freed to the buddy allocator, previously * discarded vmemmap pages must be allocated and remapping. */ ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse); if (!ret) { ClearHPageVmemmapOptimized(head); static_branch_dec(&hugetlb_optimize_vmemmap_key); } return ret; } /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) { if (!READ_ONCE(vmemmap_optimize_enabled)) return false; if (!hugetlb_vmemmap_optimizable(h)) return false; if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) { pmd_t *pmdp, pmd; struct page *vmemmap_page; unsigned long vaddr = (unsigned long)head; /* * Only the vmemmap page's vmemmap page can be self-hosted. * Walking the page tables to find the backing page of the * vmemmap page. */ pmdp = pmd_off_k(vaddr); /* * The READ_ONCE() is used to stabilize *pmdp in a register or * on the stack so that it will stop changing under the code. * The only concurrent operation where it can be changed is * split_vmemmap_huge_pmd() (*pmdp will be stable after this * operation). */ pmd = READ_ONCE(*pmdp); if (pmd_leaf(pmd)) vmemmap_page = pmd_page(pmd) + pte_index(vaddr); else vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr)); /* * Due to HugeTLB alignment requirements and the vmemmap pages * being at the start of the hotplugged memory region in * memory_hotplug.memmap_on_memory case. Checking any vmemmap * page's vmemmap page if it is marked as VmemmapSelfHosted is * sufficient. * * [ hotplugged memory ] * [ section ][...][ section ] * [ vmemmap ][ usable memory ] * ^ | | | * +---+ | | * ^ | | * +-------+ | * ^ | * +-------------------------------------------+ */ if (PageVmemmapSelfHosted(vmemmap_page)) return false; } return true; } /** * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages. * @h: struct hstate. * @head: the head page whose vmemmap pages will be optimized. * * This function only tries to optimize @head's vmemmap pages and does not * guarantee that the optimization will succeed after it returns. The caller * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages * have been optimized. */ void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) { unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; unsigned long vmemmap_reuse; if (!vmemmap_should_optimize(h, head)) return; static_branch_inc(&hugetlb_optimize_vmemmap_key); vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_reuse = vmemmap_start; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end) * to the page which @vmemmap_reuse is mapped to, then free the pages * which the range [@vmemmap_start, @vmemmap_end] is mapped to. */ if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse)) static_branch_dec(&hugetlb_optimize_vmemmap_key); else SetHPageVmemmapOptimized(head); } static struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", .data = &vmemmap_optimize_enabled, .maxlen = sizeof(vmemmap_optimize_enabled), .mode = 0644, .proc_handler = proc_dobool, }, { } }; static int __init hugetlb_vmemmap_init(void) { const struct hstate *h; /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */ BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE); for_each_hstate(h) { if (hugetlb_vmemmap_optimizable(h)) { register_sysctl_init("vm", hugetlb_vmemmap_sysctls); break; } } return 0; } late_initcall(hugetlb_vmemmap_init);
linux-master
mm/hugetlb_vmemmap.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008, 2009 Intel Corporation * Authors: Andi Kleen, Fengguang Wu * * High level machine check handler. Handles pages reported by the * hardware as being corrupted usually due to a multi-bit ECC memory or cache * failure. * * In addition there is a "soft offline" entry point that allows stop using * not-yet-corrupted-by-suspicious pages without killing anything. * * Handles page cache pages in various states. The tricky part * here is that we can access any page asynchronously in respect to * other VM users, because memory failures could happen anytime and * anywhere. This could violate some of their assumptions. This is why * this code has to be extremely careful. Generally it tries to use * normal locking rules, as in get the standard locks, even if that means * the error handling takes potentially a long time. * * It can be very tempting to add handling for obscure cases here. * In general any code for handling new cases should only be added iff: * - You know how to test it. * - You have a test that can be added to mce-test * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ * - The case actually shows up as a frequent (top 10) page state in * tools/mm/page-types when running a real workload. * * There are several operations here with exponential complexity because * of unsuitable VM data structures. For example the operation to map back * from RMAP chains to processes has to walk the complete process list and * has non linear complexity with the number. But since memory corruptions * are rare we hope to get away with this. This avoids impacting the core * VM. */ #define pr_fmt(fmt) "Memory failure: " fmt #include <linux/kernel.h> #include <linux/mm.h> #include <linux/page-flags.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/dax.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/backing-dev.h> #include <linux/migrate.h> #include <linux/slab.h> #include <linux/swapops.h> #include <linux/hugetlb.h> #include <linux/memory_hotplug.h> #include <linux/mm_inline.h> #include <linux/memremap.h> #include <linux/kfifo.h> #include <linux/ratelimit.h> #include <linux/pagewalk.h> #include <linux/shmem_fs.h> #include <linux/sysctl.h> #include "swap.h" #include "internal.h" #include "ras/ras_event.h" static int sysctl_memory_failure_early_kill __read_mostly; static int sysctl_memory_failure_recovery __read_mostly = 1; atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); static bool hw_memory_failure __read_mostly = false; static DEFINE_MUTEX(mf_mutex); void num_poisoned_pages_inc(unsigned long pfn) { atomic_long_inc(&num_poisoned_pages); memblk_nr_poison_inc(pfn); } void num_poisoned_pages_sub(unsigned long pfn, long i) { atomic_long_sub(i, &num_poisoned_pages); if (pfn != -1UL) memblk_nr_poison_sub(pfn, i); } /** * MF_ATTR_RO - Create sysfs entry for each memory failure statistics. * @_name: name of the file in the per NUMA sysfs directory. */ #define MF_ATTR_RO(_name) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct memory_failure_stats *mf_stats = \ &NODE_DATA(dev->id)->mf_stats; \ return sprintf(buf, "%lu\n", mf_stats->_name); \ } \ static DEVICE_ATTR_RO(_name) MF_ATTR_RO(total); MF_ATTR_RO(ignored); MF_ATTR_RO(failed); MF_ATTR_RO(delayed); MF_ATTR_RO(recovered); static struct attribute *memory_failure_attr[] = { &dev_attr_total.attr, &dev_attr_ignored.attr, &dev_attr_failed.attr, &dev_attr_delayed.attr, &dev_attr_recovered.attr, NULL, }; const struct attribute_group memory_failure_attr_group = { .name = "memory_failure", .attrs = memory_failure_attr, }; static struct ctl_table memory_failure_table[] = { { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { .procname = "memory_failure_recovery", .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { } }; /* * Return values: * 1: the page is dissolved (if needed) and taken off from buddy, * 0: the page is dissolved (if needed) and not taken off from buddy, * < 0: failed to dissolve. */ static int __page_handle_poison(struct page *page) { int ret; zone_pcp_disable(page_zone(page)); ret = dissolve_free_huge_page(page); if (!ret) ret = take_page_off_buddy(page); zone_pcp_enable(page_zone(page)); return ret; } static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) { if (hugepage_or_freepage) { /* * Doing this check for free pages is also fine since dissolve_free_huge_page * returns 0 for non-hugetlb pages as well. */ if (__page_handle_poison(page) <= 0) /* * We could fail to take off the target page from buddy * for example due to racy page allocation, but that's * acceptable because soft-offlined page is not broken * and if someone really want to use it, they should * take it. */ return false; } SetPageHWPoison(page); if (release) put_page(page); page_ref_inc(page); num_poisoned_pages_inc(page_to_pfn(page)); return true; } #if IS_ENABLED(CONFIG_HWPOISON_INJECT) u32 hwpoison_filter_enable = 0; u32 hwpoison_filter_dev_major = ~0U; u32 hwpoison_filter_dev_minor = ~0U; u64 hwpoison_filter_flags_mask; u64 hwpoison_filter_flags_value; EXPORT_SYMBOL_GPL(hwpoison_filter_enable); EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); static int hwpoison_filter_dev(struct page *p) { struct address_space *mapping; dev_t dev; if (hwpoison_filter_dev_major == ~0U && hwpoison_filter_dev_minor == ~0U) return 0; mapping = page_mapping(p); if (mapping == NULL || mapping->host == NULL) return -EINVAL; dev = mapping->host->i_sb->s_dev; if (hwpoison_filter_dev_major != ~0U && hwpoison_filter_dev_major != MAJOR(dev)) return -EINVAL; if (hwpoison_filter_dev_minor != ~0U && hwpoison_filter_dev_minor != MINOR(dev)) return -EINVAL; return 0; } static int hwpoison_filter_flags(struct page *p) { if (!hwpoison_filter_flags_mask) return 0; if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == hwpoison_filter_flags_value) return 0; else return -EINVAL; } /* * This allows stress tests to limit test scope to a collection of tasks * by putting them under some memcg. This prevents killing unrelated/important * processes such as /sbin/init. Note that the target task may share clean * pages with init (eg. libc text), which is harmless. If the target task * share _dirty_ pages with another task B, the test scheme must make sure B * is also included in the memcg. At last, due to race conditions this filter * can only guarantee that the page either belongs to the memcg tasks, or is * a freed page. */ #ifdef CONFIG_MEMCG u64 hwpoison_filter_memcg; EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); static int hwpoison_filter_task(struct page *p) { if (!hwpoison_filter_memcg) return 0; if (page_cgroup_ino(p) != hwpoison_filter_memcg) return -EINVAL; return 0; } #else static int hwpoison_filter_task(struct page *p) { return 0; } #endif int hwpoison_filter(struct page *p) { if (!hwpoison_filter_enable) return 0; if (hwpoison_filter_dev(p)) return -EINVAL; if (hwpoison_filter_flags(p)) return -EINVAL; if (hwpoison_filter_task(p)) return -EINVAL; return 0; } #else int hwpoison_filter(struct page *p) { return 0; } #endif EXPORT_SYMBOL_GPL(hwpoison_filter); /* * Kill all processes that have a poisoned page mapped and then isolate * the page. * * General strategy: * Find all processes having the page mapped and kill them. * But we keep a page reference around so that the page is not * actually freed yet. * Then stash the page away * * There's no convenient way to get back to mapped processes * from the VMAs. So do a brute-force search over all * running processes. * * Remember that machine checks are not common (or rather * if they are common you have other problems), so this shouldn't * be a performance issue. * * Also there are some races possible while we get from the * error detection to actually handle it. */ struct to_kill { struct list_head nd; struct task_struct *tsk; unsigned long addr; short size_shift; }; /* * Send all the processes who have the page mapped a signal. * ``action optional'' if they are not immediately affected by the error * ``action required'' if error happened in current execution context */ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) { struct task_struct *t = tk->tsk; short addr_lsb = tk->size_shift; int ret = 0; pr_err("%#lx: Sending SIGBUS to %s:%d due to hardware memory corruption\n", pfn, t->comm, t->pid); if ((flags & MF_ACTION_REQUIRED) && (t == current)) ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr, addr_lsb); else /* * Signal other processes sharing the page if they have * PF_MCE_EARLY set. * Don't use force here, it's convenient if the signal * can be temporarily blocked. * This could cause a loop when the user sets SIGBUS * to SIG_IGN, but hopefully no one will do that? */ ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr, addr_lsb, t); if (ret < 0) pr_info("Error sending signal to %s:%d: %d\n", t->comm, t->pid, ret); return ret; } /* * Unknown page type encountered. Try to check whether it can turn PageLRU by * lru_add_drain_all. */ void shake_page(struct page *p) { if (PageHuge(p)) return; /* * TODO: Could shrink slab caches here if a lightweight range-based * shrinker will be available. */ if (PageSlab(p)) return; lru_add_drain_all(); } EXPORT_SYMBOL_GPL(shake_page); static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, unsigned long address) { unsigned long ret = 0; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t ptent; VM_BUG_ON_VMA(address == -EFAULT, vma); pgd = pgd_offset(vma->vm_mm, address); if (!pgd_present(*pgd)) return 0; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) return 0; pud = pud_offset(p4d, address); if (!pud_present(*pud)) return 0; if (pud_devmap(*pud)) return PUD_SHIFT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return 0; if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); if (!pte) return 0; ptent = ptep_get(pte); if (pte_present(ptent) && pte_devmap(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; } /* * Failure handling: if we can't find or can't kill a process there's * not much we can do. We just print a message and ignore otherwise. */ #define FSDAX_INVALID_PGOFF ULONG_MAX /* * Schedule a process for later kill. * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. * * Note: @fsdax_pgoff is used only when @p is a fsdax page and a * filesystem with a memory failure handler has claimed the * memory_failure event. In all other cases, page->index and * page->mapping are sufficient for mapping the page back to its * corresponding user virtual address. */ static void __add_to_kill(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long ksm_addr, pgoff_t fsdax_pgoff) { struct to_kill *tk; tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); if (!tk) { pr_err("Out of memory while machine check handling\n"); return; } tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma); if (is_zone_device_page(p)) { if (fsdax_pgoff != FSDAX_INVALID_PGOFF) tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); } else tk->size_shift = page_shift(compound_head(p)); /* * Send SIGKILL if "tk->addr == -EFAULT". Also, as * "tk->size_shift" is always non-zero for !is_zone_device_page(), * so "tk->size_shift == 0" effectively checks no mapping on * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times * to a process' address space, it's possible not all N VMAs * contain mappings for the page, but at least one VMA does. * Only deliver SIGBUS with payload derived from the VMA that * has a mapping for the page. */ if (tk->addr == -EFAULT) { pr_info("Unable to find user space address %lx in %s\n", page_to_pfn(p), tsk->comm); } else if (tk->size_shift == 0) { kfree(tk); return; } get_task_struct(tsk); tk->tsk = tsk; list_add_tail(&tk->nd, to_kill); } static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill) { __add_to_kill(tsk, p, vma, to_kill, 0, FSDAX_INVALID_PGOFF); } #ifdef CONFIG_KSM static bool task_in_to_kill_list(struct list_head *to_kill, struct task_struct *tsk) { struct to_kill *tk, *next; list_for_each_entry_safe(tk, next, to_kill, nd) { if (tk->tsk == tsk) return true; } return false; } void add_to_kill_ksm(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, unsigned long ksm_addr) { if (!task_in_to_kill_list(to_kill, tsk)) __add_to_kill(tsk, p, vma, to_kill, ksm_addr, FSDAX_INVALID_PGOFF); } #endif /* * Kill the processes that have been collected earlier. * * Only do anything when FORCEKILL is set, otherwise just free the * list (this is used for clean pages which do not need killing) * Also when FAIL is set do a force kill because something went * wrong earlier. */ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, unsigned long pfn, int flags) { struct to_kill *tk, *next; list_for_each_entry_safe(tk, next, to_kill, nd) { if (forcekill) { /* * In case something went wrong with munmapping * make sure the process doesn't catch the * signal and then access the memory. Just kill it. */ if (fail || tk->addr == -EFAULT) { pr_err("%#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", pfn, tk->tsk->comm, tk->tsk->pid); do_send_sig_info(SIGKILL, SEND_SIG_PRIV, tk->tsk, PIDTYPE_PID); } /* * In theory the process could have mapped * something else on the address in-between. We could * check for that, but we need to tell the * process anyways. */ else if (kill_proc(tk, pfn, flags) < 0) pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n", pfn, tk->tsk->comm, tk->tsk->pid); } list_del(&tk->nd); put_task_struct(tk->tsk); kfree(tk); } } /* * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO) * on behalf of the thread group. Return task_struct of the (first found) * dedicated thread if found, and return NULL otherwise. * * We already hold rcu lock in the caller, so we don't have to call * rcu_read_lock/unlock() in this function. */ static struct task_struct *find_early_kill_thread(struct task_struct *tsk) { struct task_struct *t; for_each_thread(tsk, t) { if (t->flags & PF_MCE_PROCESS) { if (t->flags & PF_MCE_EARLY) return t; } else { if (sysctl_memory_failure_early_kill) return t; } } return NULL; } /* * Determine whether a given process is "early kill" process which expects * to be signaled when some page under the process is hwpoisoned. * Return task_struct of the dedicated thread (main thread unless explicitly * specified) if the process is "early kill" and otherwise returns NULL. * * Note that the above is true for Action Optional case. For Action Required * case, it's only meaningful to the current thread which need to be signaled * with SIGBUS, this error is Action Optional for other non current * processes sharing the same error page,if the process is "early kill", the * task_struct of the dedicated thread will also be returned. */ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early) { if (!tsk->mm) return NULL; /* * Comparing ->mm here because current task might represent * a subthread, while tsk always points to the main thread. */ if (force_early && tsk->mm == current->mm) return current; return find_early_kill_thread(tsk); } /* * Collect processes when the error hit an anonymous page. */ static void collect_procs_anon(struct page *page, struct list_head *to_kill, int force_early) { struct folio *folio = page_folio(page); struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; av = folio_lock_anon_vma_read(folio, NULL); if (av == NULL) /* Not actually mapped anymore */ return; pgoff = page_to_pgoff(page); rcu_read_lock(); for_each_process(tsk) { struct anon_vma_chain *vmac; struct task_struct *t = task_early_kill(tsk, force_early); if (!t) continue; anon_vma_interval_tree_foreach(vmac, &av->rb_root, pgoff, pgoff) { vma = vmac->vma; if (vma->vm_mm != t->mm) continue; if (!page_mapped_in_vma(page, vma)) continue; add_to_kill_anon_file(t, page, vma, to_kill); } } rcu_read_unlock(); anon_vma_unlock_read(av); } /* * Collect processes when the error hit a file mapped page. */ static void collect_procs_file(struct page *page, struct list_head *to_kill, int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; struct address_space *mapping = page->mapping; pgoff_t pgoff; i_mmap_lock_read(mapping); rcu_read_lock(); pgoff = page_to_pgoff(page); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, force_early); if (!t) continue; vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { /* * Send early kill signal to tasks where a vma covers * the page but the corrupted page is not necessarily * mapped in its pte. * Assume applications who requested early kill want * to be informed of all such data corruptions. */ if (vma->vm_mm == t->mm) add_to_kill_anon_file(t, page, vma, to_kill); } } rcu_read_unlock(); i_mmap_unlock_read(mapping); } #ifdef CONFIG_FS_DAX static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, pgoff_t pgoff) { __add_to_kill(tsk, p, vma, to_kill, 0, pgoff); } /* * Collect processes when the error hit a fsdax page. */ static void collect_procs_fsdax(struct page *page, struct address_space *mapping, pgoff_t pgoff, struct list_head *to_kill) { struct vm_area_struct *vma; struct task_struct *tsk; i_mmap_lock_read(mapping); rcu_read_lock(); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, true); if (!t) continue; vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { if (vma->vm_mm == t->mm) add_to_kill_fsdax(t, page, vma, to_kill, pgoff); } } rcu_read_unlock(); i_mmap_unlock_read(mapping); } #endif /* CONFIG_FS_DAX */ /* * Collect the processes who have the corrupted page mapped to kill. */ static void collect_procs(struct page *page, struct list_head *tokill, int force_early) { if (!page->mapping) return; if (unlikely(PageKsm(page))) collect_procs_ksm(page, tokill, force_early); else if (PageAnon(page)) collect_procs_anon(page, tokill, force_early); else collect_procs_file(page, tokill, force_early); } struct hwpoison_walk { struct to_kill tk; unsigned long pfn; int flags; }; static void set_to_kill(struct to_kill *tk, unsigned long addr, short shift) { tk->addr = addr; tk->size_shift = shift; } static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, unsigned long poisoned_pfn, struct to_kill *tk) { unsigned long pfn = 0; if (pte_present(pte)) { pfn = pte_pfn(pte); } else { swp_entry_t swp = pte_to_swp_entry(pte); if (is_hwpoison_entry(swp)) pfn = swp_offset_pfn(swp); } if (!pfn || pfn != poisoned_pfn) return 0; set_to_kill(tk, addr, shift); return 1; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, struct hwpoison_walk *hwp) { pmd_t pmd = *pmdp; unsigned long pfn; unsigned long hwpoison_vaddr; if (!pmd_present(pmd)) return 0; pfn = pmd_pfn(pmd); if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT); set_to_kill(&hwp->tk, hwpoison_vaddr, PAGE_SHIFT); return 1; } return 0; } #else static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr, struct hwpoison_walk *hwp) { return 0; } #endif static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct hwpoison_walk *hwp = walk->private; int ret = 0; pte_t *ptep, *mapped_pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmdp, walk->vma); if (ptl) { ret = check_hwpoisoned_pmd_entry(pmdp, addr, hwp); spin_unlock(ptl); goto out; } mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl); if (!ptep) goto out; for (; addr != end; ptep++, addr += PAGE_SIZE) { ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); if (ret == 1) break; } pte_unmap_unlock(mapped_pte, ptl); out: cond_resched(); return ret; } #ifdef CONFIG_HUGETLB_PAGE static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct hwpoison_walk *hwp = walk->private; pte_t pte = huge_ptep_get(ptep); struct hstate *h = hstate_vma(walk->vma); return check_hwpoisoned_entry(pte, addr, huge_page_shift(h), hwp->pfn, &hwp->tk); } #else #define hwpoison_hugetlb_range NULL #endif static const struct mm_walk_ops hwpoison_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, .walk_lock = PGWALK_RDLOCK, }; /* * Sends SIGBUS to the current process with error info. * * This function is intended to handle "Action Required" MCEs on already * hardware poisoned pages. They could happen, for example, when * memory_failure() failed to unmap the error page at the first call, or * when multiple local machine checks happened on different CPUs. * * MCE handler currently has no easy access to the error virtual address, * so this function walks page table to find it. The returned virtual address * is proper in most cases, but it could be wrong when the application * process has multiple entries mapping the error page. */ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, int flags) { int ret; struct hwpoison_walk priv = { .pfn = pfn, }; priv.tk.tsk = p; if (!p->mm) return -EFAULT; mmap_read_lock(p->mm); ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, (void *)&priv); if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); else ret = 0; mmap_read_unlock(p->mm); return ret > 0 ? -EHWPOISON : -EFAULT; } static const char *action_name[] = { [MF_IGNORED] = "Ignored", [MF_FAILED] = "Failed", [MF_DELAYED] = "Delayed", [MF_RECOVERED] = "Recovered", }; static const char * const action_page_types[] = { [MF_MSG_KERNEL] = "reserved kernel page", [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page", [MF_MSG_SLAB] = "kernel slab page", [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking", [MF_MSG_HUGE] = "huge page", [MF_MSG_FREE_HUGE] = "free huge page", [MF_MSG_UNMAP_FAILED] = "unmapping failed page", [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page", [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page", [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page", [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page", [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page", [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page", [MF_MSG_DIRTY_LRU] = "dirty LRU page", [MF_MSG_CLEAN_LRU] = "clean LRU page", [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page", [MF_MSG_BUDDY] = "free buddy page", [MF_MSG_DAX] = "dax page", [MF_MSG_UNSPLIT_THP] = "unsplit thp", [MF_MSG_UNKNOWN] = "unknown page", }; /* * XXX: It is possible that a page is isolated from LRU cache, * and then kept in swap cache or failed to remove from page cache. * The page count will stop it from being freed by unpoison. * Stress tests should be aware of this memory leak problem. */ static int delete_from_lru_cache(struct page *p) { if (isolate_lru_page(p)) { /* * Clear sensible page flags, so that the buddy system won't * complain when the page is unpoison-and-freed. */ ClearPageActive(p); ClearPageUnevictable(p); /* * Poisoned page might never drop its ref count to 0 so we have * to uncharge it manually from its memcg. */ mem_cgroup_uncharge(page_folio(p)); /* * drop the page count elevated by isolate_lru_page() */ put_page(p); return 0; } return -EIO; } static int truncate_error_page(struct page *p, unsigned long pfn, struct address_space *mapping) { int ret = MF_FAILED; if (mapping->a_ops->error_remove_page) { struct folio *folio = page_folio(p); int err = mapping->a_ops->error_remove_page(mapping, p); if (err != 0) pr_info("%#lx: Failed to punch page: %d\n", pfn, err); else if (!filemap_release_folio(folio, GFP_NOIO)) pr_info("%#lx: failed to release buffers\n", pfn); else ret = MF_RECOVERED; } else { /* * If the file system doesn't support it just invalidate * This fails on dirty or anything with private pages */ if (invalidate_inode_page(p)) ret = MF_RECOVERED; else pr_info("%#lx: Failed to invalidate\n", pfn); } return ret; } struct page_state { unsigned long mask; unsigned long res; enum mf_action_page_type type; /* Callback ->action() has to unlock the relevant page inside it. */ int (*action)(struct page_state *ps, struct page *p); }; /* * Return true if page is still referenced by others, otherwise return * false. * * The extra_pins is true when one extra refcount is expected. */ static bool has_extra_refcount(struct page_state *ps, struct page *p, bool extra_pins) { int count = page_count(p) - 1; if (extra_pins) count -= 1; if (count > 0) { pr_err("%#lx: %s still referenced by %d users\n", page_to_pfn(p), action_page_types[ps->type], count); return true; } return false; } /* * Error hit kernel page. * Do nothing, try to be lucky and not touch this instead. For a few cases we * could be more sophisticated. */ static int me_kernel(struct page_state *ps, struct page *p) { unlock_page(p); return MF_IGNORED; } /* * Page in unknown state. Do nothing. */ static int me_unknown(struct page_state *ps, struct page *p) { pr_err("%#lx: Unknown page state\n", page_to_pfn(p)); unlock_page(p); return MF_FAILED; } /* * Clean (or cleaned) page cache page. */ static int me_pagecache_clean(struct page_state *ps, struct page *p) { int ret; struct address_space *mapping; bool extra_pins; delete_from_lru_cache(p); /* * For anonymous pages we're done the only reference left * should be the one m_f() holds. */ if (PageAnon(p)) { ret = MF_RECOVERED; goto out; } /* * Now truncate the page in the page cache. This is really * more like a "temporary hole punch" * Don't do this for block devices when someone else * has a reference, because it could be file system metadata * and that's not safe to truncate. */ mapping = page_mapping(p); if (!mapping) { /* * Page has been teared down in the meanwhile */ ret = MF_FAILED; goto out; } /* * The shmem page is kept in page cache instead of truncating * so is expected to have an extra refcount after error-handling. */ extra_pins = shmem_mapping(mapping); /* * Truncation is a bit tricky. Enable it per file system for now. * * Open: to take i_rwsem or not for this? Right now we don't. */ ret = truncate_error_page(p, page_to_pfn(p), mapping); if (has_extra_refcount(ps, p, extra_pins)) ret = MF_FAILED; out: unlock_page(p); return ret; } /* * Dirty pagecache page * Issues: when the error hit a hole page the error is not properly * propagated. */ static int me_pagecache_dirty(struct page_state *ps, struct page *p) { struct address_space *mapping = page_mapping(p); SetPageError(p); /* TBD: print more information about the file. */ if (mapping) { /* * IO error will be reported by write(), fsync(), etc. * who check the mapping. * This way the application knows that something went * wrong with its dirty file data. * * There's one open issue: * * The EIO will be only reported on the next IO * operation and then cleared through the IO map. * Normally Linux has two mechanisms to pass IO error * first through the AS_EIO flag in the address space * and then through the PageError flag in the page. * Since we drop pages on memory failure handling the * only mechanism open to use is through AS_AIO. * * This has the disadvantage that it gets cleared on * the first operation that returns an error, while * the PageError bit is more sticky and only cleared * when the page is reread or dropped. If an * application assumes it will always get error on * fsync, but does other operations on the fd before * and the page is dropped between then the error * will not be properly reported. * * This can already happen even without hwpoisoned * pages: first on metadata IO errors (which only * report through AS_EIO) or when the page is dropped * at the wrong time. * * So right now we assume that the application DTRT on * the first EIO, but we're not worse than other parts * of the kernel. */ mapping_set_error(mapping, -EIO); } return me_pagecache_clean(ps, p); } /* * Clean and dirty swap cache. * * Dirty swap cache page is tricky to handle. The page could live both in page * cache and swap cache(ie. page is freshly swapped in). So it could be * referenced concurrently by 2 types of PTEs: * normal PTEs and swap PTEs. We try to handle them consistently by calling * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs, * and then * - clear dirty bit to prevent IO * - remove from LRU * - but keep in the swap cache, so that when we return to it on * a later page fault, we know the application is accessing * corrupted data and shall be killed (we installed simple * interception code in do_swap_page to catch it). * * Clean swap cache pages can be directly isolated. A later page fault will * bring in the known good data from disk. */ static int me_swapcache_dirty(struct page_state *ps, struct page *p) { int ret; bool extra_pins = false; ClearPageDirty(p); /* Trigger EIO in shmem: */ ClearPageUptodate(p); ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED; unlock_page(p); if (ret == MF_DELAYED) extra_pins = true; if (has_extra_refcount(ps, p, extra_pins)) ret = MF_FAILED; return ret; } static int me_swapcache_clean(struct page_state *ps, struct page *p) { struct folio *folio = page_folio(p); int ret; delete_from_swap_cache(folio); ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; folio_unlock(folio); if (has_extra_refcount(ps, p, false)) ret = MF_FAILED; return ret; } /* * Huge pages. Needs work. * Issues: * - Error on hugepage is contained in hugepage unit (not in raw page unit.) * To narrow down kill region to one page, we need to break up pmd. */ static int me_huge_page(struct page_state *ps, struct page *p) { int res; struct page *hpage = compound_head(p); struct address_space *mapping; bool extra_pins = false; mapping = page_mapping(hpage); if (mapping) { res = truncate_error_page(hpage, page_to_pfn(p), mapping); /* The page is kept in page cache. */ extra_pins = true; unlock_page(hpage); } else { unlock_page(hpage); /* * migration entry prevents later access on error hugepage, * so we can free and dissolve it into buddy to save healthy * subpages. */ put_page(hpage); if (__page_handle_poison(p) >= 0) { page_ref_inc(p); res = MF_RECOVERED; } else { res = MF_FAILED; } } if (has_extra_refcount(ps, p, extra_pins)) res = MF_FAILED; return res; } /* * Various page states we can handle. * * A page state is defined by its current page->flags bits. * The table matches them in order and calls the right handler. * * This is quite tricky because we can access page at any time * in its live cycle, so all accesses have to be extremely careful. * * This is not complete. More states could be added. * For any missing state don't attempt recovery. */ #define dirty (1UL << PG_dirty) #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked)) #define unevict (1UL << PG_unevictable) #define mlock (1UL << PG_mlocked) #define lru (1UL << PG_lru) #define head (1UL << PG_head) #define slab (1UL << PG_slab) #define reserved (1UL << PG_reserved) static struct page_state error_states[] = { { reserved, reserved, MF_MSG_KERNEL, me_kernel }, /* * free pages are specially detected outside this table: * PG_buddy pages only make a small fraction of all free pages. */ /* * Could in theory check if slab page is free or if we can drop * currently unused objects without touching them. But just * treat it as standard kernel for now. */ { slab, slab, MF_MSG_SLAB, me_kernel }, { head, head, MF_MSG_HUGE, me_huge_page }, { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty }, { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean }, { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty }, { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean }, { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty }, { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean }, /* * Catchall entry: must be at end. */ { 0, 0, MF_MSG_UNKNOWN, me_unknown }, }; #undef dirty #undef sc #undef unevict #undef mlock #undef lru #undef head #undef slab #undef reserved static void update_per_node_mf_stats(unsigned long pfn, enum mf_result result) { int nid = MAX_NUMNODES; struct memory_failure_stats *mf_stats = NULL; nid = pfn_to_nid(pfn); if (unlikely(nid < 0 || nid >= MAX_NUMNODES)) { WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid); return; } mf_stats = &NODE_DATA(nid)->mf_stats; switch (result) { case MF_IGNORED: ++mf_stats->ignored; break; case MF_FAILED: ++mf_stats->failed; break; case MF_DELAYED: ++mf_stats->delayed; break; case MF_RECOVERED: ++mf_stats->recovered; break; default: WARN_ONCE(1, "Memory failure: mf_result=%d is not properly handled", result); break; } ++mf_stats->total; } /* * "Dirty/Clean" indication is not 100% accurate due to the possibility of * setting PG_dirty outside page lock. See also comment above set_page_dirty(). */ static int action_result(unsigned long pfn, enum mf_action_page_type type, enum mf_result result) { trace_memory_failure_event(pfn, type, result); num_poisoned_pages_inc(pfn); update_per_node_mf_stats(pfn, result); pr_err("%#lx: recovery action for %s: %s\n", pfn, action_page_types[type], action_name[result]); return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY; } static int page_action(struct page_state *ps, struct page *p, unsigned long pfn) { int result; /* page p should be unlocked after returning from ps->action(). */ result = ps->action(ps, p); /* Could do more checks here if page looks ok */ /* * Could adjust zone counters here to correct for the missing page. */ return action_result(pfn, ps->type, result); } static inline bool PageHWPoisonTakenOff(struct page *page) { return PageHWPoison(page) && page_private(page) == MAGIC_HWPOISON; } void SetPageHWPoisonTakenOff(struct page *page) { set_page_private(page, MAGIC_HWPOISON); } void ClearPageHWPoisonTakenOff(struct page *page) { if (PageHWPoison(page)) set_page_private(page, 0); } /* * Return true if a page type of a given page is supported by hwpoison * mechanism (while handling could fail), otherwise false. This function * does not return true for hugetlb or device memory pages, so it's assumed * to be called only in the context where we never have such pages. */ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) { /* Soft offline could migrate non-LRU movable pages */ if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) return true; return PageLRU(page) || is_free_buddy_page(page); } static int __get_hwpoison_page(struct page *page, unsigned long flags) { struct folio *folio = page_folio(page); int ret = 0; bool hugetlb = false; ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, false); if (hugetlb) { /* Make sure hugetlb demotion did not happen from under us. */ if (folio == page_folio(page)) return ret; if (ret > 0) { folio_put(folio); folio = page_folio(page); } } /* * This check prevents from calling folio_try_get() for any * unsupported type of folio in order to reduce the risk of unexpected * races caused by taking a folio refcount. */ if (!HWPoisonHandlable(&folio->page, flags)) return -EBUSY; if (folio_try_get(folio)) { if (folio == page_folio(page)) return 1; pr_info("%#lx cannot catch tail\n", page_to_pfn(page)); folio_put(folio); } return 0; } static int get_any_page(struct page *p, unsigned long flags) { int ret = 0, pass = 0; bool count_increased = false; if (flags & MF_COUNT_INCREASED) count_increased = true; try_again: if (!count_increased) { ret = __get_hwpoison_page(p, flags); if (!ret) { if (page_count(p)) { /* We raced with an allocation, retry. */ if (pass++ < 3) goto try_again; ret = -EBUSY; } else if (!PageHuge(p) && !is_free_buddy_page(p)) { /* We raced with put_page, retry. */ if (pass++ < 3) goto try_again; ret = -EIO; } goto out; } else if (ret == -EBUSY) { /* * We raced with (possibly temporary) unhandlable * page, retry. */ if (pass++ < 3) { shake_page(p); goto try_again; } ret = -EIO; goto out; } } if (PageHuge(p) || HWPoisonHandlable(p, flags)) { ret = 1; } else { /* * A page we cannot handle. Check whether we can turn * it into something we can handle. */ if (pass++ < 3) { put_page(p); shake_page(p); count_increased = false; goto try_again; } put_page(p); ret = -EIO; } out: if (ret == -EIO) pr_err("%#lx: unhandlable page.\n", page_to_pfn(p)); return ret; } static int __get_unpoison_page(struct page *page) { struct folio *folio = page_folio(page); int ret = 0; bool hugetlb = false; ret = get_hwpoison_hugetlb_folio(folio, &hugetlb, true); if (hugetlb) { /* Make sure hugetlb demotion did not happen from under us. */ if (folio == page_folio(page)) return ret; if (ret > 0) folio_put(folio); } /* * PageHWPoisonTakenOff pages are not only marked as PG_hwpoison, * but also isolated from buddy freelist, so need to identify the * state and have to cancel both operations to unpoison. */ if (PageHWPoisonTakenOff(page)) return -EHWPOISON; return get_page_unless_zero(page) ? 1 : 0; } /** * get_hwpoison_page() - Get refcount for memory error handling * @p: Raw error page (hit by memory error) * @flags: Flags controlling behavior of error handling * * get_hwpoison_page() takes a page refcount of an error page to handle memory * error on it, after checking that the error page is in a well-defined state * (defined as a page-type we can successfully handle the memory error on it, * such as LRU page and hugetlb page). * * Memory error handling could be triggered at any time on any type of page, * so it's prone to race with typical memory management lifecycle (like * allocation and free). So to avoid such races, get_hwpoison_page() takes * extra care for the error page's state (as done in __get_hwpoison_page()), * and has some retry logic in get_any_page(). * * When called from unpoison_memory(), the caller should already ensure that * the given page has PG_hwpoison. So it's never reused for other page * allocations, and __get_unpoison_page() never races with them. * * Return: 0 on failure, * 1 on success for in-use pages in a well-defined state, * -EIO for pages on which we can not handle memory errors, * -EBUSY when get_hwpoison_page() has raced with page lifecycle * operations like allocation and free, * -EHWPOISON when the page is hwpoisoned and taken off from buddy. */ static int get_hwpoison_page(struct page *p, unsigned long flags) { int ret; zone_pcp_disable(page_zone(p)); if (flags & MF_UNPOISON) ret = __get_unpoison_page(p); else ret = get_any_page(p, flags); zone_pcp_enable(page_zone(p)); return ret; } /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. */ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, int flags, struct page *hpage) { struct folio *folio = page_folio(hpage); enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; struct address_space *mapping; LIST_HEAD(tokill); bool unmap_success; int forcekill; bool mlocked = PageMlocked(hpage); /* * Here we are interested only in user-mapped pages, so skip any * other types of pages. */ if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p)) return true; if (!(PageLRU(hpage) || PageHuge(p))) return true; /* * This check implies we don't kill processes if their pages * are in the swap cache early. Those are always late kills. */ if (!page_mapped(hpage)) return true; if (PageSwapCache(p)) { pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); ttu &= ~TTU_HWPOISON; } /* * Propagate the dirty bit from PTEs to struct page first, because we * need this to decide if we should kill or just drop the page. * XXX: the dirty test could be racy: set_page_dirty() may not always * be called inside page lock (it's recommended but not enforced). */ mapping = page_mapping(hpage); if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && mapping_can_writeback(mapping)) { if (page_mkclean(hpage)) { SetPageDirty(hpage); } else { ttu &= ~TTU_HWPOISON; pr_info("%#lx: corrupted page was clean: dropped without side effects\n", pfn); } } /* * First collect all the processes that have the page * mapped in dirty form. This has to be done before try_to_unmap, * because ttu takes the rmap data structures down. */ collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); if (PageHuge(hpage) && !PageAnon(hpage)) { /* * For hugetlb pages in shared mappings, try_to_unmap * could potentially call huge_pmd_unshare. Because of * this, take semaphore in write mode here and set * TTU_RMAP_LOCKED to indicate we have taken the lock * at this higher level. */ mapping = hugetlb_page_mapping_lock_write(hpage); if (mapping) { try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); } else pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn); } else { try_to_unmap(folio, ttu); } unmap_success = !page_mapped(hpage); if (!unmap_success) pr_err("%#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); /* * try_to_unmap() might put mlocked page in lru cache, so call * shake_page() again to ensure that it's flushed. */ if (mlocked) shake_page(hpage); /* * Now that the dirty bit has been propagated to the * struct page and all unmaps done we can decide if * killing is needed or not. Only kill when the page * was dirty or the process is not restartable, * otherwise the tokill list is merely * freed. When there was a problem unmapping earlier * use a more force-full uncatchable kill to prevent * any accesses to the poisoned memory. */ forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) || !unmap_success; kill_procs(&tokill, forcekill, !unmap_success, pfn, flags); return unmap_success; } static int identify_page_state(unsigned long pfn, struct page *p, unsigned long page_flags) { struct page_state *ps; /* * The first check uses the current page flags which may not have any * relevant information. The second check with the saved page flags is * carried out only if the first check can't determine the page status. */ for (ps = error_states;; ps++) if ((p->flags & ps->mask) == ps->res) break; page_flags |= (p->flags & (1UL << PG_dirty)); if (!ps->mask) for (ps = error_states;; ps++) if ((page_flags & ps->mask) == ps->res) break; return page_action(ps, p, pfn); } static int try_to_split_thp_page(struct page *page) { int ret; lock_page(page); ret = split_huge_page(page); unlock_page(page); if (unlikely(ret)) put_page(page); return ret; } static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn, struct address_space *mapping, pgoff_t index, int flags) { struct to_kill *tk; unsigned long size = 0; list_for_each_entry(tk, to_kill, nd) if (tk->size_shift) size = max(size, 1UL << tk->size_shift); if (size) { /* * Unmap the largest mapping to avoid breaking up device-dax * mappings which are constant size. The actual size of the * mapping being torn down is communicated in siginfo, see * kill_proc() */ loff_t start = (index << PAGE_SHIFT) & ~(size - 1); unmap_mapping_range(mapping, start, size, 0); } kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags); } static int mf_generic_kill_procs(unsigned long long pfn, int flags, struct dev_pagemap *pgmap) { struct page *page = pfn_to_page(pfn); LIST_HEAD(to_kill); dax_entry_t cookie; int rc = 0; /* * Pages instantiated by device-dax (not filesystem-dax) * may be compound pages. */ page = compound_head(page); /* * Prevent the inode from being freed while we are interrogating * the address_space, typically this would be handled by * lock_page(), but dax pages do not use the page lock. This * also prevents changes to the mapping of this pfn until * poison signaling is complete. */ cookie = dax_lock_page(page); if (!cookie) return -EBUSY; if (hwpoison_filter(page)) { rc = -EOPNOTSUPP; goto unlock; } switch (pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_COHERENT: /* * TODO: Handle device pages which may need coordination * with device-side memory. */ rc = -ENXIO; goto unlock; default: break; } /* * Use this flag as an indication that the dax page has been * remapped UC to prevent speculative consumption of poison. */ SetPageHWPoison(page); /* * Unlike System-RAM there is no possibility to swap in a * different physical page at a given virtual address, so all * userspace consumption of ZONE_DEVICE memory necessitates * SIGBUS (i.e. MF_MUST_KILL) */ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; collect_procs(page, &to_kill, true); unmap_and_kill(&to_kill, pfn, page->mapping, page->index, flags); unlock: dax_unlock_page(page, cookie); return rc; } #ifdef CONFIG_FS_DAX /** * mf_dax_kill_procs - Collect and kill processes who are using this file range * @mapping: address_space of the file in use * @index: start pgoff of the range within the file * @count: length of the range, in unit of PAGE_SIZE * @mf_flags: memory failure flags */ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, unsigned long count, int mf_flags) { LIST_HEAD(to_kill); dax_entry_t cookie; struct page *page; size_t end = index + count; mf_flags |= MF_ACTION_REQUIRED | MF_MUST_KILL; for (; index < end; index++) { page = NULL; cookie = dax_lock_mapping_entry(mapping, index, &page); if (!cookie) return -EBUSY; if (!page) goto unlock; SetPageHWPoison(page); collect_procs_fsdax(page, mapping, index, &to_kill); unmap_and_kill(&to_kill, page_to_pfn(page), mapping, index, mf_flags); unlock: dax_unlock_mapping_entry(mapping, index, cookie); } return 0; } EXPORT_SYMBOL_GPL(mf_dax_kill_procs); #endif /* CONFIG_FS_DAX */ #ifdef CONFIG_HUGETLB_PAGE /* * Struct raw_hwp_page represents information about "raw error page", * constructing singly linked list from ->_hugetlb_hwpoison field of folio. */ struct raw_hwp_page { struct llist_node node; struct page *page; }; static inline struct llist_head *raw_hwp_list_head(struct folio *folio) { return (struct llist_head *)&folio->_hugetlb_hwpoison; } bool is_raw_hwpoison_page_in_hugepage(struct page *page) { struct llist_head *raw_hwp_head; struct raw_hwp_page *p; struct folio *folio = page_folio(page); bool ret = false; if (!folio_test_hwpoison(folio)) return false; if (!folio_test_hugetlb(folio)) return PageHWPoison(page); /* * When RawHwpUnreliable is set, kernel lost track of which subpages * are HWPOISON. So return as if ALL subpages are HWPOISONed. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return true; mutex_lock(&mf_mutex); raw_hwp_head = raw_hwp_list_head(folio); llist_for_each_entry(p, raw_hwp_head->first, node) { if (page == p->page) { ret = true; break; } } mutex_unlock(&mf_mutex); return ret; } static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) { struct llist_node *head; struct raw_hwp_page *p, *next; unsigned long count = 0; head = llist_del_all(raw_hwp_list_head(folio)); llist_for_each_entry_safe(p, next, head, node) { if (move_flag) SetPageHWPoison(p->page); else num_poisoned_pages_sub(page_to_pfn(p->page), 1); kfree(p); count++; } return count; } static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) { struct llist_head *head; struct raw_hwp_page *raw_hwp; struct raw_hwp_page *p, *next; int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; /* * Once the hwpoison hugepage has lost reliable raw error info, * there is little meaning to keep additional error info precisely, * so skip to add additional raw error info. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return -EHWPOISON; head = raw_hwp_list_head(folio); llist_for_each_entry_safe(p, next, head->first, node) { if (p->page == page) return -EHWPOISON; } raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); if (raw_hwp) { raw_hwp->page = page; llist_add(&raw_hwp->node, head); /* the first error event will be counted in action_result(). */ if (ret) num_poisoned_pages_inc(page_to_pfn(page)); } else { /* * Failed to save raw error info. We no longer trace all * hwpoisoned subpages, and we need refuse to free/dissolve * this hwpoisoned hugepage. */ folio_set_hugetlb_raw_hwp_unreliable(folio); /* * Once hugetlb_raw_hwp_unreliable is set, raw_hwp_page is not * used any more, so free it. */ __folio_free_raw_hwp(folio, false); } return ret; } static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag) { /* * hugetlb_vmemmap_optimized hugepages can't be freed because struct * pages for tail pages are required but they don't exist. */ if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio)) return 0; /* * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by * definition. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return 0; return __folio_free_raw_hwp(folio, move_flag); } void folio_clear_hugetlb_hwpoison(struct folio *folio) { if (folio_test_hugetlb_raw_hwp_unreliable(folio)) return; if (folio_test_hugetlb_vmemmap_optimized(folio)) return; folio_clear_hwpoison(folio); folio_free_raw_hwp(folio, true); } /* * Called from hugetlb code with hugetlb_lock held. * * Return values: * 0 - free hugepage * 1 - in-use hugepage * 2 - not a hugepage * -EBUSY - the hugepage is busy (try to retry) * -EHWPOISON - the hugepage is already hwpoisoned */ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) { struct page *page = pfn_to_page(pfn); struct folio *folio = page_folio(page); int ret = 2; /* fallback to normal page handling */ bool count_increased = false; if (!folio_test_hugetlb(folio)) goto out; if (flags & MF_COUNT_INCREASED) { ret = 1; count_increased = true; } else if (folio_test_hugetlb_freed(folio)) { ret = 0; } else if (folio_test_hugetlb_migratable(folio)) { ret = folio_try_get(folio); if (ret) count_increased = true; } else { ret = -EBUSY; if (!(flags & MF_NO_RETRY)) goto out; } if (folio_set_hugetlb_hwpoison(folio, page)) { ret = -EHWPOISON; goto out; } /* * Clearing hugetlb_migratable for hwpoisoned hugepages to prevent them * from being migrated by memory hotremove. */ if (count_increased && folio_test_hugetlb_migratable(folio)) { folio_clear_hugetlb_migratable(folio); *migratable_cleared = true; } return ret; out: if (count_increased) folio_put(folio); return ret; } /* * Taking refcount of hugetlb pages needs extra care about race conditions * with basic operations like hugepage allocation/free/demotion. * So some of prechecks for hwpoison (pinning, and testing/setting * PageHWPoison) should be done in single hugetlb_lock range. */ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { int res; struct page *p = pfn_to_page(pfn); struct folio *folio; unsigned long page_flags; bool migratable_cleared = false; *hugetlb = 1; retry: res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); if (res == 2) { /* fallback to normal page handling */ *hugetlb = 0; return 0; } else if (res == -EHWPOISON) { pr_err("%#lx: already hardware poisoned\n", pfn); if (flags & MF_ACTION_REQUIRED) { folio = page_folio(p); res = kill_accessing_process(current, folio_pfn(folio), flags); } return res; } else if (res == -EBUSY) { if (!(flags & MF_NO_RETRY)) { flags |= MF_NO_RETRY; goto retry; } return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); } folio = page_folio(p); folio_lock(folio); if (hwpoison_filter(p)) { folio_clear_hugetlb_hwpoison(folio); if (migratable_cleared) folio_set_hugetlb_migratable(folio); folio_unlock(folio); if (res == 1) folio_put(folio); return -EOPNOTSUPP; } /* * Handling free hugepage. The possible race with hugepage allocation * or demotion can be prevented by PageHWPoison flag. */ if (res == 0) { folio_unlock(folio); if (__page_handle_poison(p) >= 0) { page_ref_inc(p); res = MF_RECOVERED; } else { res = MF_FAILED; } return action_result(pfn, MF_MSG_FREE_HUGE, res); } page_flags = folio->flags; if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) { folio_unlock(folio); return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); } return identify_page_state(pfn, p, page_flags); } #else static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { return 0; } static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag) { return 0; } #endif /* CONFIG_HUGETLB_PAGE */ /* Drop the extra refcount in case we come from madvise() */ static void put_ref_page(unsigned long pfn, int flags) { struct page *page; if (!(flags & MF_COUNT_INCREASED)) return; page = pfn_to_page(pfn); if (page) put_page(page); } static int memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) { int rc = -ENXIO; /* device metadata space is not recoverable */ if (!pgmap_pfn_valid(pgmap, pfn)) goto out; /* * Call driver's implementation to handle the memory failure, otherwise * fall back to generic handler. */ if (pgmap_has_memory_failure(pgmap)) { rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags); /* * Fall back to generic handler too if operation is not * supported inside the driver/device/filesystem. */ if (rc != -EOPNOTSUPP) goto out; } rc = mf_generic_kill_procs(pfn, flags, pgmap); out: /* drop pgmap ref acquired in caller */ put_dev_pagemap(pgmap); if (rc != -EOPNOTSUPP) action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED); return rc; } /** * memory_failure - Handle memory failure of a page. * @pfn: Page Number of the corrupted page * @flags: fine tune action taken * * This function is called by the low level machine check code * of an architecture when it detects hardware memory corruption * of a page. It tries its best to recover, which includes * dropping pages, killing processes etc. * * The function is primarily of use for corruptions that * happen outside the current execution context (e.g. when * detected by a background scrubber) * * Must run in process context (e.g. a work queue) with interrupts * enabled and no spinlocks held. * * Return: 0 for successfully handled the memory error, * -EOPNOTSUPP for hwpoison_filter() filtered the error event, * < 0(except -EOPNOTSUPP) on failure. */ int memory_failure(unsigned long pfn, int flags) { struct page *p; struct page *hpage; struct dev_pagemap *pgmap; int res = 0; unsigned long page_flags; bool retry = true; int hugetlb = 0; if (!sysctl_memory_failure_recovery) panic("Memory failure on page %lx", pfn); mutex_lock(&mf_mutex); if (!(flags & MF_SW_SIMULATED)) hw_memory_failure = true; p = pfn_to_online_page(pfn); if (!p) { res = arch_memory_failure(pfn, flags); if (res == 0) goto unlock_mutex; if (pfn_valid(pfn)) { pgmap = get_dev_pagemap(pfn, NULL); put_ref_page(pfn, flags); if (pgmap) { res = memory_failure_dev_pagemap(pfn, flags, pgmap); goto unlock_mutex; } } pr_err("%#lx: memory outside kernel control\n", pfn); res = -ENXIO; goto unlock_mutex; } try_again: res = try_memory_failure_hugetlb(pfn, flags, &hugetlb); if (hugetlb) goto unlock_mutex; if (TestSetPageHWPoison(p)) { pr_err("%#lx: already hardware poisoned\n", pfn); res = -EHWPOISON; if (flags & MF_ACTION_REQUIRED) res = kill_accessing_process(current, pfn, flags); if (flags & MF_COUNT_INCREASED) put_page(p); goto unlock_mutex; } /* * We need/can do nothing about count=0 pages. * 1) it's a free page, and therefore in safe hand: * check_new_page() will be the gate keeper. * 2) it's part of a non-compound high order page. * Implies some kernel user: cannot stop them from * R/W the page; let's pray that the page has been * used and will be freed some time later. * In fact it's dangerous to directly bump up page count from 0, * that may make page_ref_freeze()/page_ref_unfreeze() mismatch. */ if (!(flags & MF_COUNT_INCREASED)) { res = get_hwpoison_page(p, flags); if (!res) { if (is_free_buddy_page(p)) { if (take_page_off_buddy(p)) { page_ref_inc(p); res = MF_RECOVERED; } else { /* We lost the race, try again */ if (retry) { ClearPageHWPoison(p); retry = false; goto try_again; } res = MF_FAILED; } res = action_result(pfn, MF_MSG_BUDDY, res); } else { res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); } goto unlock_mutex; } else if (res < 0) { res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED); goto unlock_mutex; } } hpage = compound_head(p); if (PageTransHuge(hpage)) { /* * The flag must be set after the refcount is bumped * otherwise it may race with THP split. * And the flag can't be set in get_hwpoison_page() since * it is called by soft offline too and it is just called * for !MF_COUNT_INCREASED. So here seems to be the best * place. * * Don't need care about the above error handling paths for * get_hwpoison_page() since they handle either free page * or unhandlable page. The refcount is bumped iff the * page is a valid handlable page. */ SetPageHasHWPoisoned(hpage); if (try_to_split_thp_page(p) < 0) { res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED); goto unlock_mutex; } VM_BUG_ON_PAGE(!page_count(p), p); } /* * We ignore non-LRU pages for good reasons. * - PG_locked is only well defined for LRU pages and a few others * - to avoid races with __SetPageLocked() * - to avoid races with __SetPageSlab*() (and more non-atomic ops) * The check (unnecessarily) ignores LRU pages being isolated and * walked by the page reclaim code, however that's not a big loss. */ shake_page(p); lock_page(p); /* * We're only intended to deal with the non-Compound page here. * However, the page could have changed compound pages due to * race window. If this happens, we could try again to hopefully * handle the page next round. */ if (PageCompound(p)) { if (retry) { ClearPageHWPoison(p); unlock_page(p); put_page(p); flags &= ~MF_COUNT_INCREASED; retry = false; goto try_again; } res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); goto unlock_page; } /* * We use page flags to determine what action should be taken, but * the flags can be modified by the error containment action. One * example is an mlocked page, where PG_mlocked is cleared by * page_remove_rmap() in try_to_unmap_one(). So to determine page status * correctly, we save a copy of the page flags at this time. */ page_flags = p->flags; if (hwpoison_filter(p)) { ClearPageHWPoison(p); unlock_page(p); put_page(p); res = -EOPNOTSUPP; goto unlock_mutex; } /* * __munlock_folio() may clear a writeback page's LRU flag without * page_lock. We need wait writeback completion for this page or it * may trigger vfs BUG while evict inode. */ if (!PageLRU(p) && !PageWriteback(p)) goto identify_page_state; /* * It's very difficult to mess with pages currently under IO * and in many cases impossible, so we just avoid it here. */ wait_on_page_writeback(p); /* * Now take care of user space mappings. * Abort on fail: __filemap_remove_folio() assumes unmapped page. */ if (!hwpoison_user_mappings(p, pfn, flags, p)) { res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); goto unlock_page; } /* * Torn down by someone else? */ if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); goto unlock_page; } identify_page_state: res = identify_page_state(pfn, p, page_flags); mutex_unlock(&mf_mutex); return res; unlock_page: unlock_page(p); unlock_mutex: mutex_unlock(&mf_mutex); return res; } EXPORT_SYMBOL_GPL(memory_failure); #define MEMORY_FAILURE_FIFO_ORDER 4 #define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER) struct memory_failure_entry { unsigned long pfn; int flags; }; struct memory_failure_cpu { DECLARE_KFIFO(fifo, struct memory_failure_entry, MEMORY_FAILURE_FIFO_SIZE); spinlock_t lock; struct work_struct work; }; static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu); /** * memory_failure_queue - Schedule handling memory failure of a page. * @pfn: Page Number of the corrupted page * @flags: Flags for memory failure handling * * This function is called by the low level hardware error handler * when it detects hardware memory corruption of a page. It schedules * the recovering of error page, including dropping pages, killing * processes etc. * * The function is primarily of use for corruptions that * happen outside the current execution context (e.g. when * detected by a background scrubber) * * Can run in IRQ context. */ void memory_failure_queue(unsigned long pfn, int flags) { struct memory_failure_cpu *mf_cpu; unsigned long proc_flags; struct memory_failure_entry entry = { .pfn = pfn, .flags = flags, }; mf_cpu = &get_cpu_var(memory_failure_cpu); spin_lock_irqsave(&mf_cpu->lock, proc_flags); if (kfifo_put(&mf_cpu->fifo, entry)) schedule_work_on(smp_processor_id(), &mf_cpu->work); else pr_err("buffer overflow when queuing memory failure at %#lx\n", pfn); spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); put_cpu_var(memory_failure_cpu); } EXPORT_SYMBOL_GPL(memory_failure_queue); static void memory_failure_work_func(struct work_struct *work) { struct memory_failure_cpu *mf_cpu; struct memory_failure_entry entry = { 0, }; unsigned long proc_flags; int gotten; mf_cpu = container_of(work, struct memory_failure_cpu, work); for (;;) { spin_lock_irqsave(&mf_cpu->lock, proc_flags); gotten = kfifo_get(&mf_cpu->fifo, &entry); spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; if (entry.flags & MF_SOFT_OFFLINE) soft_offline_page(entry.pfn, entry.flags); else memory_failure(entry.pfn, entry.flags); } } /* * Process memory_failure work queued on the specified CPU. * Used to avoid return-to-userspace racing with the memory_failure workqueue. */ void memory_failure_queue_kick(int cpu) { struct memory_failure_cpu *mf_cpu; mf_cpu = &per_cpu(memory_failure_cpu, cpu); cancel_work_sync(&mf_cpu->work); memory_failure_work_func(&mf_cpu->work); } static int __init memory_failure_init(void) { struct memory_failure_cpu *mf_cpu; int cpu; for_each_possible_cpu(cpu) { mf_cpu = &per_cpu(memory_failure_cpu, cpu); spin_lock_init(&mf_cpu->lock); INIT_KFIFO(mf_cpu->fifo); INIT_WORK(&mf_cpu->work, memory_failure_work_func); } register_sysctl_init("vm", memory_failure_table); return 0; } core_initcall(memory_failure_init); #undef pr_fmt #define pr_fmt(fmt) "" fmt #define unpoison_pr_info(fmt, pfn, rs) \ ({ \ if (__ratelimit(rs)) \ pr_info(fmt, pfn); \ }) /** * unpoison_memory - Unpoison a previously poisoned page * @pfn: Page number of the to be unpoisoned page * * Software-unpoison a page that has been poisoned by * memory_failure() earlier. * * This is only done on the software-level, so it only works * for linux injected failures, not real hardware failures * * Returns 0 for success, otherwise -errno. */ int unpoison_memory(unsigned long pfn) { struct folio *folio; struct page *p; int ret = -EBUSY, ghp; unsigned long count = 1; bool huge = false; static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if (!pfn_valid(pfn)) return -ENXIO; p = pfn_to_page(pfn); folio = page_folio(p); mutex_lock(&mf_mutex); if (hw_memory_failure) { unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", pfn, &unpoison_rs); ret = -EOPNOTSUPP; goto unlock_mutex; } if (!PageHWPoison(p)) { unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", pfn, &unpoison_rs); goto unlock_mutex; } if (folio_ref_count(folio) > 1) { unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n", pfn, &unpoison_rs); goto unlock_mutex; } if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio) || PageOffline(&folio->page)) goto unlock_mutex; /* * Note that folio->_mapcount is overloaded in SLAB, so the simple test * in folio_mapped() has to be done after folio_test_slab() is checked. */ if (folio_mapped(folio)) { unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n", pfn, &unpoison_rs); goto unlock_mutex; } if (folio_mapping(folio)) { unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n", pfn, &unpoison_rs); goto unlock_mutex; } ghp = get_hwpoison_page(p, MF_UNPOISON); if (!ghp) { if (PageHuge(p)) { huge = true; count = folio_free_raw_hwp(folio, false); if (count == 0) goto unlock_mutex; } ret = folio_test_clear_hwpoison(folio) ? 0 : -EBUSY; } else if (ghp < 0) { if (ghp == -EHWPOISON) { ret = put_page_back_buddy(p) ? 0 : -EBUSY; } else { ret = ghp; unpoison_pr_info("Unpoison: failed to grab page %#lx\n", pfn, &unpoison_rs); } } else { if (PageHuge(p)) { huge = true; count = folio_free_raw_hwp(folio, false); if (count == 0) { folio_put(folio); goto unlock_mutex; } } folio_put(folio); if (TestClearPageHWPoison(p)) { folio_put(folio); ret = 0; } } unlock_mutex: mutex_unlock(&mf_mutex); if (!ret) { if (!huge) num_poisoned_pages_sub(pfn, 1); unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n", page_to_pfn(p), &unpoison_rs); } return ret; } EXPORT_SYMBOL(unpoison_memory); static bool isolate_page(struct page *page, struct list_head *pagelist) { bool isolated = false; if (PageHuge(page)) { isolated = isolate_hugetlb(page_folio(page), pagelist); } else { bool lru = !__PageMovable(page); if (lru) isolated = isolate_lru_page(page); else isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); if (isolated) { list_add(&page->lru, pagelist); if (lru) inc_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page)); } } /* * If we succeed to isolate the page, we grabbed another refcount on * the page, so we can safely drop the one we got from get_any_page(). * If we failed to isolate the page, it means that we cannot go further * and we will return an error, so drop the reference we got from * get_any_page() as well. */ put_page(page); return isolated; } /* * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages. * If the page is a non-dirty unmapped page-cache page, it simply invalidates. * If the page is mapped, it migrates the contents over. */ static int soft_offline_in_use_page(struct page *page) { long ret = 0; unsigned long pfn = page_to_pfn(page); struct page *hpage = compound_head(page); char const *msg_page[] = {"page", "hugepage"}; bool huge = PageHuge(page); LIST_HEAD(pagelist); struct migration_target_control mtc = { .nid = NUMA_NO_NODE, .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, }; if (!huge && PageTransHuge(hpage)) { if (try_to_split_thp_page(page)) { pr_info("soft offline: %#lx: thp split failed\n", pfn); return -EBUSY; } hpage = page; } lock_page(page); if (!huge) wait_on_page_writeback(page); if (PageHWPoison(page)) { unlock_page(page); put_page(page); pr_info("soft offline: %#lx page already poisoned\n", pfn); return 0; } if (!huge && PageLRU(page) && !PageSwapCache(page)) /* * Try to invalidate first. This should work for * non dirty unmapped page cache pages. */ ret = invalidate_inode_page(page); unlock_page(page); if (ret) { pr_info("soft_offline: %#lx: invalidated\n", pfn); page_handle_poison(page, false, true); return 0; } if (isolate_page(hpage, &pagelist)) { ret = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL); if (!ret) { bool release = !huge; if (!page_handle_poison(page, huge, release)) ret = -EBUSY; } else { if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); pr_info("soft offline: %#lx: %s migration failed %ld, type %pGp\n", pfn, msg_page[huge], ret, &page->flags); if (ret > 0) ret = -EBUSY; } } else { pr_info("soft offline: %#lx: %s isolation failed, page count %d, type %pGp\n", pfn, msg_page[huge], page_count(page), &page->flags); ret = -EBUSY; } return ret; } /** * soft_offline_page - Soft offline a page. * @pfn: pfn to soft-offline * @flags: flags. Same as memory_failure(). * * Returns 0 on success * -EOPNOTSUPP for hwpoison_filter() filtered the error event * < 0 otherwise negated errno. * * Soft offline a page, by migration or invalidation, * without killing anything. This is for the case when * a page is not corrupted yet (so it's still valid to access), * but has had a number of corrected errors and is better taken * out. * * The actual policy on when to do that is maintained by * user space. * * This should never impact any application or cause data loss, * however it might take some time. * * This is not a 100% solution for all memory, but tries to be * ``good enough'' for the majority of memory. */ int soft_offline_page(unsigned long pfn, int flags) { int ret; bool try_again = true; struct page *page; if (!pfn_valid(pfn)) { WARN_ON_ONCE(flags & MF_COUNT_INCREASED); return -ENXIO; } /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */ page = pfn_to_online_page(pfn); if (!page) { put_ref_page(pfn, flags); return -EIO; } mutex_lock(&mf_mutex); if (PageHWPoison(page)) { pr_info("%s: %#lx page already poisoned\n", __func__, pfn); put_ref_page(pfn, flags); mutex_unlock(&mf_mutex); return 0; } retry: get_online_mems(); ret = get_hwpoison_page(page, flags | MF_SOFT_OFFLINE); put_online_mems(); if (hwpoison_filter(page)) { if (ret > 0) put_page(page); mutex_unlock(&mf_mutex); return -EOPNOTSUPP; } if (ret > 0) { ret = soft_offline_in_use_page(page); } else if (ret == 0) { if (!page_handle_poison(page, true, false)) { if (try_again) { try_again = false; flags &= ~MF_COUNT_INCREASED; goto retry; } ret = -EBUSY; } } mutex_unlock(&mf_mutex); return ret; }
linux-master
mm/memory-failure.c
// SPDX-License-Identifier: GPL-2.0-only /* * rodata_test.c: functional test for mark_rodata_ro function * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <[email protected]> */ #define pr_fmt(fmt) "rodata_test: " fmt #include <linux/rodata_test.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <asm/sections.h> static const int rodata_test_data = 0xC3; void rodata_test(void) { int zero = 0; /* test 1: read the value */ /* If this test fails, some previous testrun has clobbered the state */ if (!rodata_test_data) { pr_err("test 1 fails (start data)\n"); return; } /* test 2: write to the variable; this should fault */ if (!copy_to_kernel_nofault((void *)&rodata_test_data, (void *)&zero, sizeof(zero))) { pr_err("test data was not read only\n"); return; } /* test 3: check the value hasn't changed */ if (rodata_test_data == zero) { pr_err("test data was changed\n"); return; } /* test 4: check if the rodata section is PAGE_SIZE aligned */ if (!PAGE_ALIGNED(__start_rodata)) { pr_err("start of .rodata is not page size aligned\n"); return; } if (!PAGE_ALIGNED(__end_rodata)) { pr_err("end of .rodata is not page size aligned\n"); return; } pr_info("all tests were successful\n"); }
linux-master
mm/rodata_test.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Zone aware kswapd started 02/00, Kanoj Sarcar ([email protected]). * Multiqueue VM started 5.8.00, Rik van Riel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmpressure.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for buffer_heads_over_limit */ #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/compaction.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/memcontrol.h> #include <linux/migrate.h> #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/memory-tiers.h> #include <linux/oom.h> #include <linux/pagevec.h> #include <linux/prefetch.h> #include <linux/printk.h> #include <linux/dax.h> #include <linux/psi.h> #include <linux/pagewalk.h> #include <linux/shmem_fs.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/khugepaged.h> #include <linux/rculist_nulls.h> #include <linux/random.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include <linux/balloon_compaction.h> #include <linux/sched/sysctl.h> #include "internal.h" #include "swap.h" #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> struct scan_control { /* How many pages shrink_list() should reclaim */ unsigned long nr_to_reclaim; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ nodemask_t *nodemask; /* * The memory cgroup that hit its limit and as a result is the * primary target of this reclaim invocation. */ struct mem_cgroup *target_mem_cgroup; /* * Scan pressure balancing between anon and file LRUs */ unsigned long anon_cost; unsigned long file_cost; /* Can active folios be deactivated as part of reclaim? */ #define DEACTIVATE_ANON 1 #define DEACTIVATE_FILE 2 unsigned int may_deactivate:2; unsigned int force_deactivate:1; unsigned int skipped_deactivate:1; /* Writepage batching in laptop mode; RECLAIM_WRITE */ unsigned int may_writepage:1; /* Can mapped folios be reclaimed? */ unsigned int may_unmap:1; /* Can folios be swapped as part of reclaim? */ unsigned int may_swap:1; /* Proactive reclaim invoked by userspace through memory.reclaim */ unsigned int proactive:1; /* * Cgroup memory below memory.low is protected as long as we * don't threaten to OOM. If any cgroup is reclaimed at * reduced force or passed over entirely due to its memory.low * setting (memcg_low_skipped), and nothing is reclaimed as a * result, then go back for one more cycle that reclaims the protected * memory (memcg_low_reclaim) to avert OOM. */ unsigned int memcg_low_reclaim:1; unsigned int memcg_low_skipped:1; unsigned int hibernation_mode:1; /* One of the zones is ready for compaction */ unsigned int compaction_ready:1; /* There is easily reclaimable cold cache in the current node */ unsigned int cache_trim_mode:1; /* The file folios on the current node are dangerously low */ unsigned int file_is_tiny:1; /* Always discard instead of demoting to lower tier memory */ unsigned int no_demotion:1; /* Allocation order */ s8 order; /* Scan (total_size >> priority) pages at once */ s8 priority; /* The highest zone to isolate folios for reclaim from */ s8 reclaim_idx; /* This context's GFP mask */ gfp_t gfp_mask; /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; struct { unsigned int dirty; unsigned int unqueued_dirty; unsigned int congested; unsigned int writeback; unsigned int immediate; unsigned int file_taken; unsigned int taken; } nr; /* for recording the reclaimed slab by now */ struct reclaim_state reclaim_state; }; #ifdef ARCH_HAS_PREFETCHW #define prefetchw_prev_lru_folio(_folio, _base, _field) \ do { \ if ((_folio)->lru.prev != _base) { \ struct folio *prev; \ \ prev = lru_to_folio(&(_folio->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) #endif /* * From 0 .. 200. Higher means more swappy. */ int vm_swappiness = 60; LIST_HEAD(shrinker_list); DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static int shrinker_nr_max; /* The shrinker_info is expanded in a batch of BITS_PER_LONG */ static inline int shrinker_map_size(int nr_items) { return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long)); } static inline int shrinker_defer_size(int nr_items) { return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t)); } static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, int nid) { return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, lockdep_is_held(&shrinker_rwsem)); } static int expand_one_shrinker_info(struct mem_cgroup *memcg, int map_size, int defer_size, int old_map_size, int old_defer_size, int new_nr_max) { struct shrinker_info *new, *old; struct mem_cgroup_per_node *pn; int nid; int size = map_size + defer_size; for_each_node(nid) { pn = memcg->nodeinfo[nid]; old = shrinker_info_protected(memcg, nid); /* Not yet online memcg */ if (!old) return 0; /* Already expanded this shrinker_info */ if (new_nr_max <= old->map_nr_max) continue; new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); if (!new) return -ENOMEM; new->nr_deferred = (atomic_long_t *)(new + 1); new->map = (void *)new->nr_deferred + defer_size; new->map_nr_max = new_nr_max; /* map: set all old bits, clear all new bits */ memset(new->map, (int)0xff, old_map_size); memset((void *)new->map + old_map_size, 0, map_size - old_map_size); /* nr_deferred: copy old values, clear all new values */ memcpy(new->nr_deferred, old->nr_deferred, old_defer_size); memset((void *)new->nr_deferred + old_defer_size, 0, defer_size - old_defer_size); rcu_assign_pointer(pn->shrinker_info, new); kvfree_rcu(old, rcu); } return 0; } void free_shrinker_info(struct mem_cgroup *memcg) { struct mem_cgroup_per_node *pn; struct shrinker_info *info; int nid; for_each_node(nid) { pn = memcg->nodeinfo[nid]; info = rcu_dereference_protected(pn->shrinker_info, true); kvfree(info); rcu_assign_pointer(pn->shrinker_info, NULL); } } int alloc_shrinker_info(struct mem_cgroup *memcg) { struct shrinker_info *info; int nid, size, ret = 0; int map_size, defer_size = 0; down_write(&shrinker_rwsem); map_size = shrinker_map_size(shrinker_nr_max); defer_size = shrinker_defer_size(shrinker_nr_max); size = map_size + defer_size; for_each_node(nid) { info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid); if (!info) { free_shrinker_info(memcg); ret = -ENOMEM; break; } info->nr_deferred = (atomic_long_t *)(info + 1); info->map = (void *)info->nr_deferred + defer_size; info->map_nr_max = shrinker_nr_max; rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); } up_write(&shrinker_rwsem); return ret; } static int expand_shrinker_info(int new_id) { int ret = 0; int new_nr_max = round_up(new_id + 1, BITS_PER_LONG); int map_size, defer_size = 0; int old_map_size, old_defer_size = 0; struct mem_cgroup *memcg; if (!root_mem_cgroup) goto out; lockdep_assert_held(&shrinker_rwsem); map_size = shrinker_map_size(new_nr_max); defer_size = shrinker_defer_size(new_nr_max); old_map_size = shrinker_map_size(shrinker_nr_max); old_defer_size = shrinker_defer_size(shrinker_nr_max); memcg = mem_cgroup_iter(NULL, NULL, NULL); do { ret = expand_one_shrinker_info(memcg, map_size, defer_size, old_map_size, old_defer_size, new_nr_max); if (ret) { mem_cgroup_iter_break(NULL, memcg); goto out; } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); out: if (!ret) shrinker_nr_max = new_nr_max; return ret; } void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) { if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { struct shrinker_info *info; rcu_read_lock(); info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info); if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) { /* Pairs with smp mb in shrink_slab() */ smp_mb__before_atomic(); set_bit(shrinker_id, info->map); } rcu_read_unlock(); } } static DEFINE_IDR(shrinker_idr); static int prealloc_memcg_shrinker(struct shrinker *shrinker) { int id, ret = -ENOMEM; if (mem_cgroup_disabled()) return -ENOSYS; down_write(&shrinker_rwsem); /* This may call shrinker, so it must use down_read_trylock() */ id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL); if (id < 0) goto unlock; if (id >= shrinker_nr_max) { if (expand_shrinker_info(id)) { idr_remove(&shrinker_idr, id); goto unlock; } } shrinker->id = id; ret = 0; unlock: up_write(&shrinker_rwsem); return ret; } static void unregister_memcg_shrinker(struct shrinker *shrinker) { int id = shrinker->id; BUG_ON(id < 0); lockdep_assert_held(&shrinker_rwsem); idr_remove(&shrinker_idr, id); } static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { struct shrinker_info *info; info = shrinker_info_protected(memcg, nid); return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0); } static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { struct shrinker_info *info; info = shrinker_info_protected(memcg, nid); return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]); } void reparent_shrinker_deferred(struct mem_cgroup *memcg) { int i, nid; long nr; struct mem_cgroup *parent; struct shrinker_info *child_info, *parent_info; parent = parent_mem_cgroup(memcg); if (!parent) parent = root_mem_cgroup; /* Prevent from concurrent shrinker_info expand */ down_read(&shrinker_rwsem); for_each_node(nid) { child_info = shrinker_info_protected(memcg, nid); parent_info = shrinker_info_protected(parent, nid); for (i = 0; i < child_info->map_nr_max; i++) { nr = atomic_long_read(&child_info->nr_deferred[i]); atomic_long_add(nr, &parent_info->nr_deferred[i]); } } up_read(&shrinker_rwsem); } /* Returns true for reclaim through cgroup limits or cgroup interfaces. */ static bool cgroup_reclaim(struct scan_control *sc) { return sc->target_mem_cgroup; } /* * Returns true for reclaim on the root cgroup. This is true for direct * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. */ static bool root_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); } /** * writeback_throttling_sane - is the usual dirty throttling mechanism available? * @sc: scan_control in question * * The normal page dirty throttling mechanism in balance_dirty_pages() is * completely broken with the legacy memcg and direct stalling in * shrink_folio_list() is used for throttling instead, which lacks all the * niceties such as fairness, adaptive pausing, bandwidth proportional * allocation and configurability. * * This function tests whether the vmscan currently in progress can assume * that the normal dirty throttling mechanism is operational. */ static bool writeback_throttling_sane(struct scan_control *sc) { if (!cgroup_reclaim(sc)) return true; #ifdef CONFIG_CGROUP_WRITEBACK if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return true; #endif return false; } #else static int prealloc_memcg_shrinker(struct shrinker *shrinker) { return -ENOSYS; } static void unregister_memcg_shrinker(struct shrinker *shrinker) { } static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { return 0; } static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker, struct mem_cgroup *memcg) { return 0; } static bool cgroup_reclaim(struct scan_control *sc) { return false; } static bool root_reclaim(struct scan_control *sc) { return true; } static bool writeback_throttling_sane(struct scan_control *sc) { return true; } #endif static void set_task_reclaim_state(struct task_struct *task, struct reclaim_state *rs) { /* Check for an overwrite */ WARN_ON_ONCE(rs && task->reclaim_state); /* Check for the nulling of an already-nulled member */ WARN_ON_ONCE(!rs && !task->reclaim_state); task->reclaim_state = rs; } /* * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to * scan_control->nr_reclaimed. */ static void flush_reclaim_state(struct scan_control *sc) { /* * Currently, reclaim_state->reclaimed includes three types of pages * freed outside of vmscan: * (1) Slab pages. * (2) Clean file pages from pruned inodes (on highmem systems). * (3) XFS freed buffer pages. * * For all of these cases, we cannot universally link the pages to a * single memcg. For example, a memcg-aware shrinker can free one object * charged to the target memcg, causing an entire page to be freed. * If we count the entire page as reclaimed from the memcg, we end up * overestimating the reclaimed amount (potentially under-reclaiming). * * Only count such pages for global reclaim to prevent under-reclaiming * from the target memcg; preventing unnecessary retries during memcg * charging and false positives from proactive reclaim. * * For uncommon cases where the freed pages were actually mostly * charged to the target memcg, we end up underestimating the reclaimed * amount. This should be fine. The freed pages will be uncharged * anyway, even if they are not counted here properly, and we will be * able to make forward progress in charging (which is usually in a * retry loop). * * We can go one step further, and report the uncharged objcg pages in * memcg reclaim, to make reporting more accurate and reduce * underestimation, but it's probably not worth the complexity for now. */ if (current->reclaim_state && root_reclaim(sc)) { sc->nr_reclaimed += current->reclaim_state->reclaimed; current->reclaim_state->reclaimed = 0; } } static long xchg_nr_deferred(struct shrinker *shrinker, struct shrink_control *sc) { int nid = sc->nid; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) nid = 0; if (sc->memcg && (shrinker->flags & SHRINKER_MEMCG_AWARE)) return xchg_nr_deferred_memcg(nid, shrinker, sc->memcg); return atomic_long_xchg(&shrinker->nr_deferred[nid], 0); } static long add_nr_deferred(long nr, struct shrinker *shrinker, struct shrink_control *sc) { int nid = sc->nid; if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) nid = 0; if (sc->memcg && (shrinker->flags & SHRINKER_MEMCG_AWARE)) return add_nr_deferred_memcg(nr, nid, shrinker, sc->memcg); return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]); } static bool can_demote(int nid, struct scan_control *sc) { if (!numa_demotion_enabled) return false; if (sc && sc->no_demotion) return false; if (next_demotion_node(nid) == NUMA_NO_NODE) return false; return true; } static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, int nid, struct scan_control *sc) { if (memcg == NULL) { /* * For non-memcg reclaim, is there * space in any swap device? */ if (get_nr_swap_pages() > 0) return true; } else { /* Is the memcg below its swap limit? */ if (mem_cgroup_get_nr_swap_pages(memcg) > 0) return true; } /* * The page can not be swapped. * * Can it be reclaimed from this node via demotion? */ return can_demote(nid, sc); } /* * This misses isolated folios which are not accounted for to save counters. * As the data only determines if reclaim or compaction continues, it is * not expected that isolated folios will be a dominating factor. */ unsigned long zone_reclaimable_pages(struct zone *zone) { unsigned long nr; nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE); if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); return nr; } /** * lruvec_lru_size - Returns the number of pages on the given LRU list. * @lruvec: lru vector * @lru: lru to use * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list) */ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) { unsigned long size = 0; int zid; for (zid = 0; zid <= zone_idx; zid++) { struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; if (!managed_zone(zone)) continue; if (!mem_cgroup_disabled()) size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); else size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru); } return size; } /* * Add a shrinker callback to be called from the vm. */ static int __prealloc_shrinker(struct shrinker *shrinker) { unsigned int size; int err; if (shrinker->flags & SHRINKER_MEMCG_AWARE) { err = prealloc_memcg_shrinker(shrinker); if (err != -ENOSYS) return err; shrinker->flags &= ~SHRINKER_MEMCG_AWARE; } size = sizeof(*shrinker->nr_deferred); if (shrinker->flags & SHRINKER_NUMA_AWARE) size *= nr_node_ids; shrinker->nr_deferred = kzalloc(size, GFP_KERNEL); if (!shrinker->nr_deferred) return -ENOMEM; return 0; } #ifdef CONFIG_SHRINKER_DEBUG int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...) { va_list ap; int err; va_start(ap, fmt); shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); va_end(ap); if (!shrinker->name) return -ENOMEM; err = __prealloc_shrinker(shrinker); if (err) { kfree_const(shrinker->name); shrinker->name = NULL; } return err; } #else int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...) { return __prealloc_shrinker(shrinker); } #endif void free_prealloced_shrinker(struct shrinker *shrinker) { #ifdef CONFIG_SHRINKER_DEBUG kfree_const(shrinker->name); shrinker->name = NULL; #endif if (shrinker->flags & SHRINKER_MEMCG_AWARE) { down_write(&shrinker_rwsem); unregister_memcg_shrinker(shrinker); up_write(&shrinker_rwsem); return; } kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; } void register_shrinker_prepared(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); shrinker->flags |= SHRINKER_REGISTERED; shrinker_debugfs_add(shrinker); up_write(&shrinker_rwsem); } static int __register_shrinker(struct shrinker *shrinker) { int err = __prealloc_shrinker(shrinker); if (err) return err; register_shrinker_prepared(shrinker); return 0; } #ifdef CONFIG_SHRINKER_DEBUG int register_shrinker(struct shrinker *shrinker, const char *fmt, ...) { va_list ap; int err; va_start(ap, fmt); shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap); va_end(ap); if (!shrinker->name) return -ENOMEM; err = __register_shrinker(shrinker); if (err) { kfree_const(shrinker->name); shrinker->name = NULL; } return err; } #else int register_shrinker(struct shrinker *shrinker, const char *fmt, ...) { return __register_shrinker(shrinker); } #endif EXPORT_SYMBOL(register_shrinker); /* * Remove one */ void unregister_shrinker(struct shrinker *shrinker) { struct dentry *debugfs_entry; int debugfs_id; if (!(shrinker->flags & SHRINKER_REGISTERED)) return; down_write(&shrinker_rwsem); list_del(&shrinker->list); shrinker->flags &= ~SHRINKER_REGISTERED; if (shrinker->flags & SHRINKER_MEMCG_AWARE) unregister_memcg_shrinker(shrinker); debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); up_write(&shrinker_rwsem); shrinker_debugfs_remove(debugfs_entry, debugfs_id); kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; } EXPORT_SYMBOL(unregister_shrinker); /** * synchronize_shrinkers - Wait for all running shrinkers to complete. * * This is equivalent to calling unregister_shrink() and register_shrinker(), * but atomically and with less overhead. This is useful to guarantee that all * shrinker invocations have seen an update, before freeing memory, similar to * rcu. */ void synchronize_shrinkers(void) { down_write(&shrinker_rwsem); up_write(&shrinker_rwsem); } EXPORT_SYMBOL(synchronize_shrinkers); #define SHRINK_BATCH 128 static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, struct shrinker *shrinker, int priority) { unsigned long freed = 0; unsigned long long delta; long total_scan; long freeable; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; long scanned = 0, next_deferred; freeable = shrinker->count_objects(shrinker, shrinkctl); if (freeable == 0 || freeable == SHRINK_EMPTY) return freeable; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = xchg_nr_deferred(shrinker, shrinkctl); if (shrinker->seeks) { delta = freeable >> priority; delta *= 4; do_div(delta, shrinker->seeks); } else { /* * These objects don't require any IO to create. Trim * them aggressively under memory pressure to keep * them from causing refetches in the IO caches. */ delta = freeable / 2; } total_scan = nr >> priority; total_scan += delta; total_scan = min(total_scan, (2 * freeable)); trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, freeable, delta, total_scan, priority); /* * Normally, we should not scan less than batch_size objects in one * pass to avoid too frequent shrinker calls, but if the slab has less * than batch_size objects in total and we are really tight on memory, * we will try to reclaim all available objects, otherwise we can end * up failing allocations although there are plenty of reclaimable * objects spread over several slabs with usage less than the * batch_size. * * We detect the "tight on memory" situations by looking at the total * number of objects we want to scan (total_scan). If it is greater * than the total number of objects on slab (freeable), we must be * scanning at high prio and therefore should try to reclaim as much as * possible. */ while (total_scan >= batch_size || total_scan >= freeable) { unsigned long ret; unsigned long nr_to_scan = min(batch_size, total_scan); shrinkctl->nr_to_scan = nr_to_scan; shrinkctl->nr_scanned = nr_to_scan; ret = shrinker->scan_objects(shrinker, shrinkctl); if (ret == SHRINK_STOP) break; freed += ret; count_vm_events(SLABS_SCANNED, shrinkctl->nr_scanned); total_scan -= shrinkctl->nr_scanned; scanned += shrinkctl->nr_scanned; cond_resched(); } /* * The deferred work is increased by any new work (delta) that wasn't * done, decreased by old deferred work that was done now. * * And it is capped to two times of the freeable items. */ next_deferred = max_t(long, (nr + delta - scanned), 0); next_deferred = min(next_deferred, (2 * freeable)); /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. */ new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); return freed; } #ifdef CONFIG_MEMCG static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { struct shrinker_info *info; unsigned long ret, freed = 0; int i; if (!mem_cgroup_online(memcg)) return 0; if (!down_read_trylock(&shrinker_rwsem)) return 0; info = shrinker_info_protected(memcg, nid); if (unlikely(!info)) goto unlock; for_each_set_bit(i, info->map, info->map_nr_max) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; struct shrinker *shrinker; shrinker = idr_find(&shrinker_idr, i); if (unlikely(!shrinker || !(shrinker->flags & SHRINKER_REGISTERED))) { if (!shrinker) clear_bit(i, info->map); continue; } /* Call non-slab shrinkers even though kmem is disabled */ if (!memcg_kmem_online() && !(shrinker->flags & SHRINKER_NONSLAB)) continue; ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) { clear_bit(i, info->map); /* * After the shrinker reported that it had no objects to * free, but before we cleared the corresponding bit in * the memcg shrinker map, a new object might have been * added. To make sure, we have the bit set in this * case, we invoke the shrinker one more time and reset * the bit if it reports that it is not empty anymore. * The memory barrier here pairs with the barrier in * set_shrinker_bit(): * * list_lru_add() shrink_slab_memcg() * list_add_tail() clear_bit() * <MB> <MB> * set_bit() do_shrink_slab() */ smp_mb__after_atomic(); ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) ret = 0; else set_shrinker_bit(memcg, nid, i); } freed += ret; if (rwsem_is_contended(&shrinker_rwsem)) { freed = freed ? : 1; break; } } unlock: up_read(&shrinker_rwsem); return freed; } #else /* CONFIG_MEMCG */ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { return 0; } #endif /* CONFIG_MEMCG */ /** * shrink_slab - shrink slab caches * @gfp_mask: allocation context * @nid: node whose slab caches to target * @memcg: memory cgroup whose slab caches to target * @priority: the reclaim priority * * Call the shrink functions to age shrinkable caches. * * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, * unaware shrinkers will receive a node id of 0 instead. * * @memcg specifies the memory cgroup to target. Unaware shrinkers * are called only if it is the root cgroup. * * @priority is sc->priority, we take the number of objects and >> by priority * in order to get the scan target. * * Returns the number of reclaimed slab objects. */ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority) { unsigned long ret, freed = 0; struct shrinker *shrinker; /* * The root memcg might be allocated even though memcg is disabled * via "cgroup_disable=memory" boot parameter. This could make * mem_cgroup_is_root() return false, then just run memcg slab * shrink, but skip global shrink. This may result in premature * oom. */ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) return shrink_slab_memcg(gfp_mask, nid, memcg, priority); if (!down_read_trylock(&shrinker_rwsem)) goto out; list_for_each_entry(shrinker, &shrinker_list, list) { struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, .memcg = memcg, }; ret = do_shrink_slab(&sc, shrinker, priority); if (ret == SHRINK_EMPTY) ret = 0; freed += ret; /* * Bail out if someone want to register a new shrinker to * prevent the registration from being stalled for long periods * by parallel ongoing shrinking. */ if (rwsem_is_contended(&shrinker_rwsem)) { freed = freed ? : 1; break; } } up_read(&shrinker_rwsem); out: cond_resched(); return freed; } static unsigned long drop_slab_node(int nid) { unsigned long freed = 0; struct mem_cgroup *memcg = NULL; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { freed += shrink_slab(GFP_KERNEL, nid, memcg, 0); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); return freed; } void drop_slab(void) { int nid; int shift = 0; unsigned long freed; do { freed = 0; for_each_online_node(nid) { if (fatal_signal_pending(current)) return; freed += drop_slab_node(nid); } } while ((freed >> shift++) > 1); } static int reclaimer_offset(void) { BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != PGSCAN_DIRECT - PGSCAN_KSWAPD); BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); if (current_is_kswapd()) return 0; if (current_is_khugepaged()) return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; } static inline int is_page_cache_freeable(struct folio *folio) { /* * A freeable page cache folio is referenced only by the caller * that isolated the folio, the page cache and optional filesystem * private data at folio->private. */ return folio_ref_count(folio) - folio_test_private(folio) == 1 + folio_nr_pages(folio); } /* * We detected a synchronous write error writing a folio out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the folio and once * that folio is locked, the mapping is pinned. * * We're allowed to run sleeping folio_lock() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct folio *folio, int error) { folio_lock(folio); if (folio_mapping(folio) == mapping) mapping_set_error(mapping, error); folio_unlock(folio); } static bool skip_throttle_noprogress(pg_data_t *pgdat) { int reclaimable = 0, write_pending = 0; int i; /* * If kswapd is disabled, reschedule if necessary but do not * throttle as the system is likely near OOM. */ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) return true; /* * If there are a lot of dirty/writeback folios then do not * throttle as throttling will occur when the folios cycle * towards the end of the LRU if still under writeback. */ for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; reclaimable += zone_reclaimable_pages(zone); write_pending += zone_page_state_snapshot(zone, NR_ZONE_WRITE_PENDING); } if (2 * write_pending <= reclaimable) return true; return false; } void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) { wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; long timeout, ret; DEFINE_WAIT(wait); /* * Do not throttle user workers, kthreads other than kswapd or * workqueues. They may be required for reclaim to make * forward progress (e.g. journalling workqueues or kthreads). */ if (!current_is_kswapd() && current->flags & (PF_USER_WORKER|PF_KTHREAD)) { cond_resched(); return; } /* * These figures are pulled out of thin air. * VMSCAN_THROTTLE_ISOLATED is a transient condition based on too many * parallel reclaimers which is a short-lived event so the timeout is * short. Failing to make progress or waiting on writeback are * potentially long-lived events so use a longer timeout. This is shaky * logic as a failure to make progress could be due to anything from * writeback to a slow device to excessive referenced folios at the tail * of the inactive LRU. */ switch(reason) { case VMSCAN_THROTTLE_WRITEBACK: timeout = HZ/10; if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { WRITE_ONCE(pgdat->nr_reclaim_start, node_page_state(pgdat, NR_THROTTLED_WRITTEN)); } break; case VMSCAN_THROTTLE_CONGESTED: fallthrough; case VMSCAN_THROTTLE_NOPROGRESS: if (skip_throttle_noprogress(pgdat)) { cond_resched(); return; } timeout = 1; break; case VMSCAN_THROTTLE_ISOLATED: timeout = HZ/50; break; default: WARN_ON_ONCE(1); timeout = HZ; break; } prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); ret = schedule_timeout(timeout); finish_wait(wqh, &wait); if (reason == VMSCAN_THROTTLE_WRITEBACK) atomic_dec(&pgdat->nr_writeback_throttled); trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), jiffies_to_usecs(timeout - ret), reason); } /* * Account for folios written if tasks are throttled waiting on dirty * folios to clean. If enough folios have been cleaned since throttling * started then wakeup the throttled tasks. */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, int nr_throttled) { unsigned long nr_written; node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); /* * This is an inaccurate read as the per-cpu deltas may not * be synchronised. However, given that the system is * writeback throttled, it is not worth taking the penalty * of getting an accurate count. At worst, the throttle * timeout guarantees forward progress. */ nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - READ_ONCE(pgdat->nr_reclaim_start); if (nr_written > SWAP_CLUSTER_MAX * nr_throttled) wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); } /* possible outcome of pageout() */ typedef enum { /* failed to write folio out, folio is locked */ PAGE_KEEP, /* move folio to the active list, folio is locked */ PAGE_ACTIVATE, /* folio has been sent to the disk successfully, folio is unlocked */ PAGE_SUCCESS, /* folio is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_folio_list() for each dirty folio. * Calls ->writepage(). */ static pageout_t pageout(struct folio *folio, struct address_space *mapping, struct swap_iocb **plug) { /* * If the folio is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in __generic_file_write_iter() against * this folio's queue, we can perform writeback even if that * will block. * * If the folio is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * swap_backing_dev_info is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. */ if (!is_page_cache_freeable(folio)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned folios can have * folio->mapping == NULL while being dirty with clean buffers. */ if (folio_test_private(folio)) { if (try_to_free_buffers(folio)) { folio_clear_dirty(folio); pr_info("%s: orphaned folio\n", __func__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (folio_clear_dirty_for_io(folio)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, .swap_plug = plug, }; folio_set_reclaim(folio); res = mapping->a_ops->writepage(&folio->page, &wbc); if (res < 0) handle_write_error(mapping, folio, res); if (res == AOP_WRITEPAGE_ACTIVATE) { folio_clear_reclaim(folio); return PAGE_ACTIVATE; } if (!folio_test_writeback(folio)) { /* synchronous write or broken a_ops? */ folio_clear_reclaim(folio); } trace_mm_vmscan_write_folio(folio); node_stat_add_folio(folio, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Same as remove_mapping, but if the folio is removed from the mapping, it * gets returned with a refcount of 0. */ static int __remove_mapping(struct address_space *mapping, struct folio *folio, bool reclaimed, struct mem_cgroup *target_memcg) { int refcount; void *shadow = NULL; BUG_ON(!folio_test_locked(folio)); BUG_ON(mapping != folio_mapping(folio)); if (!folio_test_swapcache(folio)) spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); /* * The non racy check for a busy folio. * * Must be careful with the order of the tests. When someone has * a ref to the folio, it may be possible that they dirty it then * drop the reference. So if the dirty flag is tested before the * refcount here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !folio_test_dirty(folio) [good] * folio_set_dirty(folio); * folio_put(folio); * !refcount(folio) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the folio->flags * load is not satisfied before that of folio->_refcount. * * Note that if the dirty flag is always set via folio_mark_dirty, * and thus under the i_pages lock, then this ordering is not required. */ refcount = 1 + folio_nr_pages(folio); if (!folio_ref_freeze(folio, refcount)) goto cannot_free; /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */ if (unlikely(folio_test_dirty(folio))) { folio_ref_unfreeze(folio, refcount); goto cannot_free; } if (folio_test_swapcache(folio)) { swp_entry_t swap = folio->swap; if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); __delete_from_swap_cache(folio, swap, shadow); mem_cgroup_swapout(folio, swap); xa_unlock_irq(&mapping->i_pages); put_swap_folio(folio, swap); } else { void (*free_folio)(struct folio *); free_folio = mapping->a_ops->free_folio; /* * Remember a shadow entry for reclaimed file cache in * order to detect refaults, thus thrashing, later on. * * But don't store shadows in an address space that is * already exiting. This is not just an optimization, * inode reclaim needs to empty out the radix tree or * the nodes are lost. Don't plant shadows behind its * back. * * We also don't store shadows for DAX mappings because the * only page cache folios found in these are zero pages * covering holes, and because we don't want to mix DAX * exceptional entries and shadow exceptional entries in the * same address_space. */ if (reclaimed && folio_is_file_lru(folio) && !mapping_exiting(mapping) && !dax_mapping(mapping)) shadow = workingset_eviction(folio, target_memcg); __filemap_remove_folio(folio, shadow); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); if (free_folio) free_folio(folio); } return 1; cannot_free: xa_unlock_irq(&mapping->i_pages); if (!folio_test_swapcache(folio)) spin_unlock(&mapping->host->i_lock); return 0; } /** * remove_mapping() - Attempt to remove a folio from its mapping. * @mapping: The address space. * @folio: The folio to remove. * * If the folio is dirty, under writeback or if someone else has a ref * on it, removal will fail. * Return: The number of pages removed from the mapping. 0 if the folio * could not be removed. * Context: The caller should have a single refcount on the folio and * hold its lock. */ long remove_mapping(struct address_space *mapping, struct folio *folio) { if (__remove_mapping(mapping, folio, false, NULL)) { /* * Unfreezing the refcount with 1 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ folio_ref_unfreeze(folio, 1); return folio_nr_pages(folio); } return 0; } /** * folio_putback_lru - Put previously isolated folio onto appropriate LRU list. * @folio: Folio to be returned to an LRU list. * * Add previously isolated @folio to appropriate LRU list. * The folio may still be unevictable for other reasons. * * Context: lru_lock must not be held, interrupts must be enabled. */ void folio_putback_lru(struct folio *folio) { folio_add_lru(folio); folio_put(folio); /* drop ref from isolate */ } enum folio_references { FOLIOREF_RECLAIM, FOLIOREF_RECLAIM_CLEAN, FOLIOREF_KEEP, FOLIOREF_ACTIVATE, }; static enum folio_references folio_check_references(struct folio *folio, struct scan_control *sc) { int referenced_ptes, referenced_folio; unsigned long vm_flags; referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, &vm_flags); referenced_folio = folio_test_clear_referenced(folio); /* * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. * Let the folio, now marked Mlocked, be moved to the unevictable list. */ if (vm_flags & VM_LOCKED) return FOLIOREF_ACTIVATE; /* rmap lock contention: rotate */ if (referenced_ptes == -1) return FOLIOREF_KEEP; if (referenced_ptes) { /* * All mapped folios start out with page table * references from the instantiating fault, so we need * to look twice if a mapped file/anon folio is used more * than once. * * Mark it and spare it for another trip around the * inactive list. Another page table reference will * lead to its activation. * * Note: the mark is set for activated folios as well * so that recently deactivated but used folios are * quickly recovered. */ folio_set_referenced(folio); if (referenced_folio || referenced_ptes > 1) return FOLIOREF_ACTIVATE; /* * Activate file-backed executable folios after first usage. */ if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) return FOLIOREF_ACTIVATE; return FOLIOREF_KEEP; } /* Reclaim if clean, defer dirty folios to writeback */ if (referenced_folio && folio_is_file_lru(folio)) return FOLIOREF_RECLAIM_CLEAN; return FOLIOREF_RECLAIM; } /* Check if a folio is dirty or under writeback */ static void folio_check_dirty_writeback(struct folio *folio, bool *dirty, bool *writeback) { struct address_space *mapping; /* * Anonymous folios are not handled by flushers and must be written * from reclaim context. Do not stall reclaim based on them. * MADV_FREE anonymous folios are put into inactive file list too. * They could be mistakenly treated as file lru. So further anon * test is needed. */ if (!folio_is_file_lru(folio) || (folio_test_anon(folio) && !folio_test_swapbacked(folio))) { *dirty = false; *writeback = false; return; } /* By default assume that the folio flags are accurate */ *dirty = folio_test_dirty(folio); *writeback = folio_test_writeback(folio); /* Verify dirty/writeback state if the filesystem supports it */ if (!folio_test_private(folio)) return; mapping = folio_mapping(folio); if (mapping && mapping->a_ops->is_dirty_writeback) mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } static struct folio *alloc_demote_folio(struct folio *src, unsigned long private) { struct folio *dst; nodemask_t *allowed_mask; struct migration_target_control *mtc; mtc = (struct migration_target_control *)private; allowed_mask = mtc->nmask; /* * make sure we allocate from the target node first also trying to * demote or reclaim pages from the target node via kswapd if we are * low on free memory on target node. If we don't do this and if * we have free memory on the slower(lower) memtier, we would start * allocating pages from slower(lower) memory tiers without even forcing * a demotion of cold pages from the target memtier. This can result * in the kernel placing hot pages in slower(lower) memory tiers. */ mtc->nmask = NULL; mtc->gfp_mask |= __GFP_THISNODE; dst = alloc_migration_target(src, (unsigned long)mtc); if (dst) return dst; mtc->gfp_mask &= ~__GFP_THISNODE; mtc->nmask = allowed_mask; return alloc_migration_target(src, (unsigned long)mtc); } /* * Take folios on @demote_folios and attempt to demote them to another node. * Folios which are not demoted are left on @demote_folios. */ static unsigned int demote_folio_list(struct list_head *demote_folios, struct pglist_data *pgdat) { int target_nid = next_demotion_node(pgdat->node_id); unsigned int nr_succeeded; nodemask_t allowed_mask; struct migration_target_control mtc = { /* * Allocate from 'node', or fail quickly and quietly. * When this happens, 'page' will likely just be discarded * instead of migrated. */ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN | __GFP_NOMEMALLOC | GFP_NOWAIT, .nid = target_nid, .nmask = &allowed_mask }; if (list_empty(demote_folios)) return 0; if (target_nid == NUMA_NO_NODE) return 0; node_get_allowed_targets(pgdat, &allowed_mask); /* Demotion ignores all cpuset and mempolicy settings */ migrate_pages(demote_folios, alloc_demote_folio, NULL, (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, &nr_succeeded); __count_vm_events(PGDEMOTE_KSWAPD + reclaimer_offset(), nr_succeeded); return nr_succeeded; } static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask) { if (gfp_mask & __GFP_FS) return true; if (!folio_test_swapcache(folio) || !(gfp_mask & __GFP_IO)) return false; /* * We can "enter_fs" for swap-cache with only __GFP_IO * providing this isn't SWP_FS_OPS. * ->flags can be updated non-atomicially (scan_swap_map_slots), * but that will never affect SWP_FS_OPS, so the data_race * is safe. */ return !data_race(folio_swap_flags(folio) & SWP_FS_OPS); } /* * shrink_folio_list() returns the number of reclaimed pages */ static unsigned int shrink_folio_list(struct list_head *folio_list, struct pglist_data *pgdat, struct scan_control *sc, struct reclaim_stat *stat, bool ignore_references) { LIST_HEAD(ret_folios); LIST_HEAD(free_folios); LIST_HEAD(demote_folios); unsigned int nr_reclaimed = 0; unsigned int pgactivate = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; memset(stat, 0, sizeof(*stat)); cond_resched(); do_demote_pass = can_demote(pgdat->node_id, sc); retry: while (!list_empty(folio_list)) { struct address_space *mapping; struct folio *folio; enum folio_references references = FOLIOREF_RECLAIM; bool dirty, writeback; unsigned int nr_pages; cond_resched(); folio = lru_to_folio(folio_list); list_del(&folio->lru); if (!folio_trylock(folio)) goto keep; VM_BUG_ON_FOLIO(folio_test_active(folio), folio); nr_pages = folio_nr_pages(folio); /* Account the number of base pages */ sc->nr_scanned += nr_pages; if (unlikely(!folio_evictable(folio))) goto activate_locked; if (!sc->may_unmap && folio_mapped(folio)) goto keep_locked; /* folio_update_gen() tried to promote this page? */ if (lru_gen_enabled() && !ignore_references && folio_mapped(folio) && folio_test_referenced(folio)) goto keep_locked; /* * The number of dirty pages determines if a node is marked * reclaim_congested. kswapd will stall and start writing * folios if the tail of the LRU is all dirty unqueued folios. */ folio_check_dirty_writeback(folio, &dirty, &writeback); if (dirty || writeback) stat->nr_dirty += nr_pages; if (dirty && !writeback) stat->nr_unqueued_dirty += nr_pages; /* * Treat this folio as congested if folios are cycling * through the LRU so quickly that the folios marked * for immediate reclaim are making it to the end of * the LRU a second time. */ if (writeback && folio_test_reclaim(folio)) stat->nr_congested += nr_pages; /* * If a folio at the tail of the LRU is under writeback, there * are three cases to consider. * * 1) If reclaim is encountering an excessive number * of folios under writeback and this folio has both * the writeback and reclaim flags set, then it * indicates that folios are being queued for I/O but * are being recycled through the LRU before the I/O * can complete. Waiting on the folio itself risks an * indefinite stall if it is impossible to writeback * the folio due to I/O error or disconnected storage * so instead note that the LRU is being scanned too * quickly and the caller can stall after the folio * list has been processed. * * 2) Global or new memcg reclaim encounters a folio that is * not marked for immediate reclaim, or the caller does not * have __GFP_FS (or __GFP_IO if it's simply going to swap, * not to fs). In this case mark the folio for immediate * reclaim and continue scanning. * * Require may_enter_fs() because we would wait on fs, which * may not have submitted I/O yet. And the loop driver might * enter reclaim, and deadlock if it waits on a folio for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * * 3) Legacy memcg encounters a folio that already has the * reclaim flag set. memcg does not have any dirty folio * throttling so we could easily OOM just because too many * folios are in writeback and there is nothing else to * reclaim. Wait for the writeback to complete. * * In cases 1) and 2) we activate the folios to get them out of * the way while we continue scanning for clean folios on the * inactive list and refilling from the active list. The * observation here is that waiting for disk writes is more * expensive than potentially causing reloads down the line. * Since they're marked for immediate reclaim, they won't put * memory pressure on the cache working set any longer than it * takes to write them to disk. */ if (folio_test_writeback(folio)) { /* Case 1 above */ if (current_is_kswapd() && folio_test_reclaim(folio) && test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { stat->nr_immediate += nr_pages; goto activate_locked; /* Case 2 above */ } else if (writeback_throttling_sane(sc) || !folio_test_reclaim(folio) || !may_enter_fs(folio, sc->gfp_mask)) { /* * This is slightly racy - * folio_end_writeback() might have * just cleared the reclaim flag, then * setting the reclaim flag here ends up * interpreted as the readahead flag - but * that does not matter enough to care. * What we do want is for this folio to * have the reclaim flag set next time * memcg reclaim reaches the tests above, * so it will then wait for writeback to * avoid OOM; and it's also appropriate * in global reclaim. */ folio_set_reclaim(folio); stat->nr_writeback += nr_pages; goto activate_locked; /* Case 3 above */ } else { folio_unlock(folio); folio_wait_writeback(folio); /* then go back and try same folio again */ list_add_tail(&folio->lru, folio_list); continue; } } if (!ignore_references) references = folio_check_references(folio, sc); switch (references) { case FOLIOREF_ACTIVATE: goto activate_locked; case FOLIOREF_KEEP: stat->nr_ref_keep += nr_pages; goto keep_locked; case FOLIOREF_RECLAIM: case FOLIOREF_RECLAIM_CLEAN: ; /* try to reclaim the folio below */ } /* * Before reclaiming the folio, try to relocate * its contents to another node. */ if (do_demote_pass && (thp_migration_supported() || !folio_test_large(folio))) { list_add(&folio->lru, &demote_folios); folio_unlock(folio); continue; } /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. * Lazyfree folio could be freed directly */ if (folio_test_anon(folio) && folio_test_swapbacked(folio)) { if (!folio_test_swapcache(folio)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (folio_maybe_dma_pinned(folio)) goto keep_locked; if (folio_test_large(folio)) { /* cannot split folio, skip it */ if (!can_split_folio(folio, NULL)) goto activate_locked; /* * Split folios without a PMD map right * away. Chances are some or all of the * tail pages can be freed without IO. */ if (!folio_entire_mapcount(folio) && split_folio_to_list(folio, folio_list)) goto activate_locked; } if (!add_to_swap(folio)) { if (!folio_test_large(folio)) goto activate_locked_split; /* Fallback to swap normal pages */ if (split_folio_to_list(folio, folio_list)) goto activate_locked; #ifdef CONFIG_TRANSPARENT_HUGEPAGE count_vm_event(THP_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; } } } else if (folio_test_swapbacked(folio) && folio_test_large(folio)) { /* Split shmem folio */ if (split_folio_to_list(folio, folio_list)) goto keep_locked; } /* * If the folio was split above, the tail pages will make * their own pass through this function and be accounted * then. */ if ((nr_pages > 1) && !folio_test_large(folio)) { sc->nr_scanned -= (nr_pages - 1); nr_pages = 1; } /* * The folio is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (folio_mapped(folio)) { enum ttu_flags flags = TTU_BATCH_FLUSH; bool was_swapbacked = folio_test_swapbacked(folio); if (folio_test_pmd_mappable(folio)) flags |= TTU_SPLIT_HUGE_PMD; try_to_unmap(folio, flags); if (folio_mapped(folio)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && folio_test_swapbacked(folio)) stat->nr_lazyfree_fail += nr_pages; goto activate_locked; } } /* * Folio is unmapped now so it cannot be newly pinned anymore. * No point in trying to reclaim folio if it is pinned. * Furthermore we don't want to reclaim underlying fs metadata * if the folio is pinned and thus potentially modified by the * pinning process as that may upset the filesystem. */ if (folio_maybe_dma_pinned(folio)) goto activate_locked; mapping = folio_mapping(folio); if (folio_test_dirty(folio)) { /* * Only kswapd can writeback filesystem folios * to avoid risk of stack overflow. But avoid * injecting inefficient single-folio I/O into * flusher writeback as much as possible: only * write folios when we've encountered many * dirty folios, and when we've already scanned * the rest of the LRU for clean folios and see * the same dirty folios again (with the reclaim * flag set). */ if (folio_is_file_lru(folio) && (!current_is_kswapd() || !folio_test_reclaim(folio) || !test_bit(PGDAT_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. * Similar in principle to folio_deactivate() * except we already have the folio isolated * and know it's dirty */ node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE, nr_pages); folio_set_reclaim(folio); goto activate_locked; } if (references == FOLIOREF_RECLAIM_CLEAN) goto keep_locked; if (!may_enter_fs(folio, sc->gfp_mask)) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* * Folio is dirty. Flush the TLB if a writable entry * potentially exists to avoid CPU writes after I/O * starts and then write it out here. */ try_to_unmap_flush_dirty(); switch (pageout(folio, mapping, &plug)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: stat->nr_pageout += nr_pages; if (folio_test_writeback(folio)) goto keep; if (folio_test_dirty(folio)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the folio. */ if (!folio_trylock(folio)) goto keep; if (folio_test_dirty(folio) || folio_test_writeback(folio)) goto keep_locked; mapping = folio_mapping(folio); fallthrough; case PAGE_CLEAN: ; /* try to free the folio below */ } } /* * If the folio has buffers, try to free the buffer * mappings associated with this folio. If we succeed * we try to free the folio as well. * * We do this even if the folio is dirty. * filemap_release_folio() does not perform I/O, but it * is possible for a folio to have the dirty flag set, * but it is actually clean (all its buffers are clean). * This happens if the buffers were written out directly, * with submit_bh(). ext3 will do this, as well as * the blockdev mapping. filemap_release_folio() will * discover that cleanness and will drop the buffers * and mark the folio clean - it can be freed. * * Rarely, folios can have buffers and no ->mapping. * These are the folios which were not successfully * invalidated in truncate_cleanup_folio(). We try to * drop those buffers here and if that worked, and the * folio is no longer mapped into process address space * (refcount == 1) it can be freed. Otherwise, leave * the folio on the LRU so it is swappable. */ if (folio_needs_release(folio)) { if (!filemap_release_folio(folio, sc->gfp_mask)) goto activate_locked; if (!mapping && folio_ref_count(folio) == 1) { folio_unlock(folio); if (folio_put_testzero(folio)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this folio shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed += nr_pages; continue; } } } if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { /* follow __remove_mapping for reference */ if (!folio_ref_freeze(folio, 1)) goto keep_locked; /* * The folio has only one reference left, which is * from the isolation. After the caller puts the * folio back on the lru and drops the reference, the * folio will be freed anyway. It doesn't matter * which lru it goes on. So we don't bother checking * the dirty flag here. */ count_vm_events(PGLAZYFREED, nr_pages); count_memcg_folio_events(folio, PGLAZYFREED, nr_pages); } else if (!mapping || !__remove_mapping(mapping, folio, true, sc->target_mem_cgroup)) goto keep_locked; folio_unlock(folio); free_it: /* * Folio may get swapped out as a whole, need to account * all pages in it. */ nr_reclaimed += nr_pages; /* * Is there need to periodically free_folio_list? It would * appear not as the counts should be low */ if (unlikely(folio_test_large(folio))) destroy_large_folio(folio); else list_add(&folio->lru, &free_folios); continue; activate_locked_split: /* * The tail pages that are failed to add into swap cache * reach here. Fixup nr_scanned and nr_pages. */ if (nr_pages > 1) { sc->nr_scanned -= (nr_pages - 1); nr_pages = 1; } activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (folio_test_swapcache(folio) && (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio))) folio_free_swap(folio); VM_BUG_ON_FOLIO(folio_test_active(folio), folio); if (!folio_test_mlocked(folio)) { int type = folio_is_file_lru(folio); folio_set_active(folio); stat->nr_activate[type] += nr_pages; count_memcg_folio_events(folio, PGACTIVATE, nr_pages); } keep_locked: folio_unlock(folio); keep: list_add(&folio->lru, &ret_folios); VM_BUG_ON_FOLIO(folio_test_lru(folio) || folio_test_unevictable(folio), folio); } /* 'folio_list' is always empty here */ /* Migrate folios selected for demotion */ nr_reclaimed += demote_folio_list(&demote_folios, pgdat); /* Folios that could not be demoted are still in @demote_folios */ if (!list_empty(&demote_folios)) { /* Folios which weren't demoted go back on @folio_list */ list_splice_init(&demote_folios, folio_list); /* * goto retry to reclaim the undemoted folios in folio_list if * desired. * * Reclaiming directly from top tier nodes is not often desired * due to it breaking the LRU ordering: in general memory * should be reclaimed from lower tier nodes and demoted from * top tier nodes. * * However, disabling reclaim from top tier nodes entirely * would cause ooms in edge scenarios where lower tier memory * is unreclaimable for whatever reason, eg memory being * mlocked or too hot to reclaim. We can disable reclaim * from top tier nodes in proactive reclaim though as that is * not real memory pressure. */ if (!sc->proactive) { do_demote_pass = false; goto retry; } } pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; mem_cgroup_uncharge_list(&free_folios); try_to_unmap_flush(); free_unref_page_list(&free_folios); list_splice(&ret_folios, folio_list); count_vm_events(PGACTIVATE, pgactivate); if (plug) swap_write_unplug(plug); return nr_reclaimed; } unsigned int reclaim_clean_pages_from_list(struct zone *zone, struct list_head *folio_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_unmap = 1, }; struct reclaim_stat stat; unsigned int nr_reclaimed; struct folio *folio, *next; LIST_HEAD(clean_folios); unsigned int noreclaim_flag; list_for_each_entry_safe(folio, next, folio_list, lru) { if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) && !folio_test_dirty(folio) && !__folio_test_movable(folio) && !folio_test_unevictable(folio)) { folio_clear_active(folio); list_move(&folio->lru, &clean_folios); } } /* * We should be safe here since we are only dealing with file pages and * we are not kswapd and therefore cannot write dirty file pages. But * call memalloc_noreclaim_save() anyway, just in case these conditions * change in the future. */ noreclaim_flag = memalloc_noreclaim_save(); nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, &stat, true); memalloc_noreclaim_restore(noreclaim_flag); list_splice(&clean_folios, folio_list); mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -(long)nr_reclaimed); /* * Since lazyfree pages are isolated from file LRU from the beginning, * they will rotate back to anonymous LRU in the end if it failed to * discard so isolated count will be mismatched. * Compensate the isolated count for both LRU lists. */ mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, stat.nr_lazyfree_fail); mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -(long)stat.nr_lazyfree_fail); return nr_reclaimed; } /* * Update LRU sizes after isolating pages. The LRU size updates must * be complete before mem_cgroup_update_lru_size due to a sanity check. */ static __always_inline void update_lru_sizes(struct lruvec *lruvec, enum lru_list lru, unsigned long *nr_zone_taken) { int zid; for (zid = 0; zid < MAX_NR_ZONES; zid++) { if (!nr_zone_taken[zid]) continue; update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); } } #ifdef CONFIG_CMA /* * It is waste of effort to scan and reclaim CMA pages if it is not available * for current allocation context. Kswapd can not be enrolled as it can not * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL */ static bool skip_cma(struct folio *folio, struct scan_control *sc) { return !current_is_kswapd() && gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; } #else static bool skip_cma(struct folio *folio, struct scan_control *sc) { return false; } #endif /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * * lruvec->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Lru_lock must be held before calling this function. * * @nr_to_scan: The number of eligible pages to look through on the list. * @lruvec: The LRU vector to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. * @sc: The scan_control struct for this reclaim session * @lru: LRU list id for isolating * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, struct lruvec *lruvec, struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, enum lru_list lru) { struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; unsigned long nr_skipped[MAX_NR_ZONES] = { 0, }; unsigned long skipped = 0; unsigned long scan, total_scan, nr_pages; LIST_HEAD(folios_skipped); total_scan = 0; scan = 0; while (scan < nr_to_scan && !list_empty(src)) { struct list_head *move_to = src; struct folio *folio; folio = lru_to_folio(src); prefetchw_prev_lru_folio(folio, src, flags); nr_pages = folio_nr_pages(folio); total_scan += nr_pages; if (folio_zonenum(folio) > sc->reclaim_idx || skip_cma(folio, sc)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move; } /* * Do not count skipped folios because that makes the function * return with no isolated folios if the LRU mostly contains * ineligible folios. This causes the VM to not reclaim any * folios, triggering a premature OOM. * Account all pages in a folio. */ scan += nr_pages; if (!folio_test_lru(folio)) goto move; if (!sc->may_unmap && folio_mapped(folio)) goto move; /* * Be careful not to clear the lru flag until after we're * sure the folio is not being freed elsewhere -- the * folio release code relies on it. */ if (unlikely(!folio_try_get(folio))) goto move; if (!folio_test_clear_lru(folio)) { /* Another thread is already isolating this folio */ folio_put(folio); goto move; } nr_taken += nr_pages; nr_zone_taken[folio_zonenum(folio)] += nr_pages; move_to = dst; move: list_move(&folio->lru, move_to); } /* * Splice any skipped folios to the start of the LRU list. Note that * this disrupts the LRU order when reclaiming for lower zones but * we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX * scanning would soon rescan the same folios to skip and waste lots * of cpu cycles. */ if (!list_empty(&folios_skipped)) { int zid; list_splice(&folios_skipped, src); for (zid = 0; zid < MAX_NR_ZONES; zid++) { if (!nr_skipped[zid]) continue; __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]); skipped += nr_skipped[zid]; } } *nr_scanned = total_scan; trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, total_scan, skipped, nr_taken, sc->may_unmap ? 0 : ISOLATE_UNMAPPED, lru); update_lru_sizes(lruvec, lru, nr_zone_taken); return nr_taken; } /** * folio_isolate_lru() - Try to isolate a folio from its LRU list. * @folio: Folio to isolate from its LRU list. * * Isolate a @folio from an LRU list and adjust the vmstat statistic * corresponding to whatever LRU list the folio was on. * * The folio will have its LRU flag cleared. If it was found on the * active list, it will have the Active flag set. If it was found on the * unevictable list, it will have the Unevictable flag set. These flags * may need to be cleared by the caller before letting the page go. * * Context: * * (1) Must be called with an elevated refcount on the folio. This is a * fundamental difference from isolate_lru_folios() (which is called * without a stable reference). * (2) The lru_lock must not be held. * (3) Interrupts must be enabled. * * Return: true if the folio was removed from an LRU list. * false if the folio was not on an LRU list. */ bool folio_isolate_lru(struct folio *folio) { bool ret = false; VM_BUG_ON_FOLIO(!folio_ref_count(folio), folio); if (folio_test_clear_lru(folio)) { struct lruvec *lruvec; folio_get(folio); lruvec = folio_lruvec_lock_irq(folio); lruvec_del_folio(lruvec, folio); unlock_page_lruvec_irq(lruvec); ret = true; } return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get rescheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct pglist_data *pgdat, int file, struct scan_control *sc) { unsigned long inactive, isolated; bool too_many; if (current_is_kswapd()) return 0; if (!writeback_throttling_sane(sc)) return 0; if (file) { inactive = node_page_state(pgdat, NR_INACTIVE_FILE); isolated = node_page_state(pgdat, NR_ISOLATED_FILE); } else { inactive = node_page_state(pgdat, NR_INACTIVE_ANON); isolated = node_page_state(pgdat, NR_ISOLATED_ANON); } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ if (gfp_has_io_fs(sc->gfp_mask)) inactive >>= 3; too_many = isolated > inactive; /* Wake up tasks throttled due to too_many_isolated. */ if (!too_many) wake_throttle_isolated(pgdat); return too_many; } /* * move_folios_to_lru() moves folios from private @list to appropriate LRU list. * On return, @list is reused as a list of folios to be freed by the caller. * * Returns the number of pages moved to the given lruvec. */ static unsigned int move_folios_to_lru(struct lruvec *lruvec, struct list_head *list) { int nr_pages, nr_moved = 0; LIST_HEAD(folios_to_free); while (!list_empty(list)) { struct folio *folio = lru_to_folio(list); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); list_del(&folio->lru); if (unlikely(!folio_evictable(folio))) { spin_unlock_irq(&lruvec->lru_lock); folio_putback_lru(folio); spin_lock_irq(&lruvec->lru_lock); continue; } /* * The folio_set_lru needs to be kept here for list integrity. * Otherwise: * #0 move_folios_to_lru #1 release_pages * if (!folio_put_testzero()) * if (folio_put_testzero()) * !lru //skip lru_lock * folio_set_lru() * list_add(&folio->lru,) * list_add(&folio->lru,) */ folio_set_lru(folio); if (unlikely(folio_put_testzero(folio))) { __folio_clear_lru_flags(folio); if (unlikely(folio_test_large(folio))) { spin_unlock_irq(&lruvec->lru_lock); destroy_large_folio(folio); spin_lock_irq(&lruvec->lru_lock); } else list_add(&folio->lru, &folios_to_free); continue; } /* * All pages were isolated from the same lruvec (and isolation * inhibits memcg migration). */ VM_BUG_ON_FOLIO(!folio_matches_lruvec(folio, lruvec), folio); lruvec_add_folio(lruvec, folio); nr_pages = folio_nr_pages(folio); nr_moved += nr_pages; if (folio_test_active(folio)) workingset_age_nonresident(lruvec, nr_pages); } /* * To save our caller's stack, now use input list for pages to free. */ list_splice(&folios_to_free, list); return nr_moved; } /* * If a kernel thread (such as nfsd for loop-back mounts) services a backing * device by writing to the page cache it sets PF_LOCAL_THROTTLE. In this case * we should not throttle. Otherwise it is safe to do so. */ static int current_may_throttle(void) { return !(current->flags & PF_LOCAL_THROTTLE); } /* * shrink_inactive_list() is a helper for shrink_node(). It returns the number * of reclaimed pages */ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { LIST_HEAD(folio_list); unsigned long nr_scanned; unsigned int nr_reclaimed = 0; unsigned long nr_taken; struct reclaim_stat stat; bool file = is_file_lru(lru); enum vm_event_item item; struct pglist_data *pgdat = lruvec_pgdat(lruvec); bool stalled = false; while (unlikely(too_many_isolated(pgdat, file, sc))) { if (stalled) return 0; /* wait a bit for the reclaimer. */ stalled = true; reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); /* We are about to die and free our memory. Return now. */ if (fatal_signal_pending(current)) return SWAP_CLUSTER_MAX; } lru_add_drain(); spin_lock_irq(&lruvec->lru_lock); nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list, &nr_scanned, sc, lru); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); item = PGSCAN_KSWAPD + reclaimer_offset(); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_scanned); __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned); __count_vm_events(PGSCAN_ANON + file, nr_scanned); spin_unlock_irq(&lruvec->lru_lock); if (nr_taken == 0) return 0; nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false); spin_lock_irq(&lruvec->lru_lock); move_folios_to_lru(lruvec, &folio_list); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); item = PGSTEAL_KSWAPD + reclaimer_offset(); if (!cgroup_reclaim(sc)) __count_vm_events(item, nr_reclaimed); __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed); __count_vm_events(PGSTEAL_ANON + file, nr_reclaimed); spin_unlock_irq(&lruvec->lru_lock); lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); mem_cgroup_uncharge_list(&folio_list); free_unref_page_list(&folio_list); /* * If dirty folios are scanned that are not queued for IO, it * implies that flushers are not doing their job. This can * happen when memory pressure pushes dirty folios to the end of * the LRU before the dirty limits are breached and the dirty * data has expired. It can also happen when the proportion of * dirty folios grows not through writes but through memory * pressure reclaiming all the clean cache. And in some cases, * the flushers simply cannot keep up with the allocation * rate. Nudge the flusher threads in case they are asleep. */ if (stat.nr_unqueued_dirty == nr_taken) { wakeup_flusher_threads(WB_REASON_VMSCAN); /* * For cgroupv1 dirty throttling is achieved by waking up * the kernel flusher here and later waiting on folios * which are in writeback to finish (see shrink_folio_list()). * * Flusher may not be able to issue writeback quickly * enough for cgroupv1 writeback throttling to work * on a large system. */ if (!writeback_throttling_sane(sc)) reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); } sc->nr.dirty += stat.nr_dirty; sc->nr.congested += stat.nr_congested; sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; sc->nr.writeback += stat.nr_writeback; sc->nr.immediate += stat.nr_immediate; sc->nr.taken += nr_taken; if (file) sc->nr.file_taken += nr_taken; trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, nr_scanned, nr_reclaimed, &stat, sc->priority, file); return nr_reclaimed; } /* * shrink_active_list() moves folios from the active LRU to the inactive LRU. * * We move them the other way if the folio is referenced by one or more * processes. * * If the folios are mostly unmapped, the processing is fast and it is * appropriate to hold lru_lock across the whole operation. But if * the folios are mapped, the processing is slow (folio_referenced()), so * we should drop lru_lock around each folio. It's impossible to balance * this, so instead we remove the folios from the LRU while processing them. * It is safe to rely on the active flag against the non-LRU folios in here * because nobody will play with that bit on a non-LRU folio. * * The downside is that we have to touch folio->_refcount against each folio. * But we had to alter folio->flags anyway. */ static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { unsigned long nr_taken; unsigned long nr_scanned; unsigned long vm_flags; LIST_HEAD(l_hold); /* The folios which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); unsigned nr_deactivate, nr_activate; unsigned nr_rotated = 0; int file = is_file_lru(lru); struct pglist_data *pgdat = lruvec_pgdat(lruvec); lru_add_drain(); spin_lock_irq(&lruvec->lru_lock); nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, lru); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken); if (!cgroup_reclaim(sc)) __count_vm_events(PGREFILL, nr_scanned); __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned); spin_unlock_irq(&lruvec->lru_lock); while (!list_empty(&l_hold)) { struct folio *folio; cond_resched(); folio = lru_to_folio(&l_hold); list_del(&folio->lru); if (unlikely(!folio_evictable(folio))) { folio_putback_lru(folio); continue; } if (unlikely(buffer_heads_over_limit)) { if (folio_needs_release(folio) && folio_trylock(folio)) { filemap_release_folio(folio, 0); folio_unlock(folio); } } /* Referenced or rmap lock contention: rotate */ if (folio_referenced(folio, 0, sc->target_mem_cgroup, &vm_flags) != 0) { /* * Identify referenced, file-backed active folios and * give them one more trip around the active list. So * that executable code get better chances to stay in * memory under moderate memory pressure. Anon folios * are not likely to be evicted by use-once streaming * IO, plus JVM can create lots of anon VM_EXEC folios, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio)) { nr_rotated += folio_nr_pages(folio); list_add(&folio->lru, &l_active); continue; } } folio_clear_active(folio); /* we are de-activating */ folio_set_workingset(folio); list_add(&folio->lru, &l_inactive); } /* * Move folios back to the lru list. */ spin_lock_irq(&lruvec->lru_lock); nr_activate = move_folios_to_lru(lruvec, &l_active); nr_deactivate = move_folios_to_lru(lruvec, &l_inactive); /* Keep all free folios in l_active list */ list_splice(&l_inactive, &l_active); __count_vm_events(PGDEACTIVATE, nr_deactivate); __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_deactivate); __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&lruvec->lru_lock); if (nr_rotated) lru_note_cost(lruvec, file, 0, nr_rotated); mem_cgroup_uncharge_list(&l_active); free_unref_page_list(&l_active); trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, nr_deactivate, nr_rotated, sc->priority, file); } static unsigned int reclaim_folio_list(struct list_head *folio_list, struct pglist_data *pgdat) { struct reclaim_stat dummy_stat; unsigned int nr_reclaimed; struct folio *folio; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_writepage = 1, .may_unmap = 1, .may_swap = 1, .no_demotion = 1, }; nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false); while (!list_empty(folio_list)) { folio = lru_to_folio(folio_list); list_del(&folio->lru); folio_putback_lru(folio); } return nr_reclaimed; } unsigned long reclaim_pages(struct list_head *folio_list) { int nid; unsigned int nr_reclaimed = 0; LIST_HEAD(node_folio_list); unsigned int noreclaim_flag; if (list_empty(folio_list)) return nr_reclaimed; noreclaim_flag = memalloc_noreclaim_save(); nid = folio_nid(lru_to_folio(folio_list)); do { struct folio *folio = lru_to_folio(folio_list); if (nid == folio_nid(folio)) { folio_clear_active(folio); list_move(&folio->lru, &node_folio_list); continue; } nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); nid = folio_nid(lru_to_folio(folio_list)); } while (!list_empty(folio_list)); nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid)); memalloc_noreclaim_restore(noreclaim_flag); return nr_reclaimed; } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { if (is_active_lru(lru)) { if (sc->may_deactivate & (1 << is_file_lru(lru))) shrink_active_list(nr_to_scan, lruvec, sc, lru); else sc->skipped_deactivate = 1; return 0; } return shrink_inactive_list(nr_to_scan, lruvec, sc, lru); } /* * The inactive anon list should be small enough that the VM never has * to do too much work. * * The inactive file list should be small enough to leave most memory * to the established workingset on the scan-resistant active list, * but large enough to avoid thrashing the aggregate readahead window. * * Both inactive lists should also be large enough that each inactive * folio has a chance to be referenced again before it is reclaimed. * * If that fails and refaulting is observed, the inactive list grows. * * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios * on this LRU, maintained by the pageout code. An inactive_ratio * of 3 means 3:1 or 25% of the folios are kept on the inactive list. * * total target max * memory ratio inactive * ------------------------------------- * 10MB 1 5MB * 100MB 1 50MB * 1GB 3 250MB * 10GB 10 0.9GB * 100GB 31 3GB * 1TB 101 10GB * 10TB 320 32GB */ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru) { enum lru_list active_lru = inactive_lru + LRU_ACTIVE; unsigned long inactive, active; unsigned long inactive_ratio; unsigned long gb; inactive = lruvec_page_state(lruvec, NR_LRU_BASE + inactive_lru); active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru); gb = (inactive + active) >> (30 - PAGE_SHIFT); if (gb) inactive_ratio = int_sqrt(10 * gb); else inactive_ratio = 1; return inactive * inactive_ratio < active; } enum scan_balance { SCAN_EQUAL, SCAN_FRACT, SCAN_ANON, SCAN_FILE, }; static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) { unsigned long file; struct lruvec *target_lruvec; if (lru_gen_enabled()) return; target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); /* * Flush the memory cgroup stats, so that we read accurate per-memcg * lruvec stats for heuristics. */ mem_cgroup_flush_stats(); /* * Determine the scan balance between anon and file LRUs. */ spin_lock_irq(&target_lruvec->lru_lock); sc->anon_cost = target_lruvec->anon_cost; sc->file_cost = target_lruvec->file_cost; spin_unlock_irq(&target_lruvec->lru_lock); /* * Target desirable inactive:active list ratios for the anon * and file LRU lists. */ if (!sc->force_deactivate) { unsigned long refaults; /* * When refaults are being observed, it means a new * workingset is being established. Deactivate to get * rid of any stale active pages quickly. */ refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || inactive_is_low(target_lruvec, LRU_INACTIVE_ANON)) sc->may_deactivate |= DEACTIVATE_ANON; else sc->may_deactivate &= ~DEACTIVATE_ANON; refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || inactive_is_low(target_lruvec, LRU_INACTIVE_FILE)) sc->may_deactivate |= DEACTIVATE_FILE; else sc->may_deactivate &= ~DEACTIVATE_FILE; } else sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; /* * If we have plenty of inactive file pages that aren't * thrashing, try to reclaim those first before touching * anonymous pages. */ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE); if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE)) sc->cache_trim_mode = 1; else sc->cache_trim_mode = 0; /* * Prevent the reclaimer from falling into the cache trap: as * cache pages start out inactive, every cache fault will tip * the scan balance towards the file LRU. And as the file LRU * shrinks, so does the window for rotation from references. * This means we have a runaway feedback loop where a tiny * thrashing file LRU becomes infinitely more attractive than * anon pages. Try to detect this based on file LRU size. */ if (!cgroup_reclaim(sc)) { unsigned long total_high_wmark = 0; unsigned long free, anon; int z; free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); file = node_page_state(pgdat, NR_ACTIVE_FILE) + node_page_state(pgdat, NR_INACTIVE_FILE); for (z = 0; z < MAX_NR_ZONES; z++) { struct zone *zone = &pgdat->node_zones[z]; if (!managed_zone(zone)) continue; total_high_wmark += high_wmark_pages(zone); } /* * Consider anon: if that's low too, this isn't a * runaway file reclaim problem, but rather just * extreme pressure. Reclaim as per usual then. */ anon = node_page_state(pgdat, NR_INACTIVE_ANON); sc->file_is_tiny = file + free <= total_high_wmark && !(sc->may_deactivate & DEACTIVATE_ANON) && anon >> sc->priority; } } /* * Determine how aggressively the anon and file LRU lists should be * scanned. * * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan */ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long *nr) { struct pglist_data *pgdat = lruvec_pgdat(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); unsigned long anon_cost, file_cost, total_cost; int swappiness = mem_cgroup_swappiness(memcg); u64 fraction[ANON_AND_FILE]; u64 denominator = 0; /* gcc */ enum scan_balance scan_balance; unsigned long ap, fp; enum lru_list lru; /* If we have no swap space, do not bother scanning anon folios. */ if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { scan_balance = SCAN_FILE; goto out; } /* * Global reclaim will swap to prevent OOM even with no * swappiness, but memcg users want to use this knob to * disable swapping for individual groups completely when * using the memory controller's swap limit feature would be * too expensive. */ if (cgroup_reclaim(sc) && !swappiness) { scan_balance = SCAN_FILE; goto out; } /* * Do not apply any pressure balancing cleverness when the * system is close to OOM, scan both anon and file equally * (unless the swappiness setting disagrees with swapping). */ if (!sc->priority && swappiness) { scan_balance = SCAN_EQUAL; goto out; } /* * If the system is almost out of file pages, force-scan anon. */ if (sc->file_is_tiny) { scan_balance = SCAN_ANON; goto out; } /* * If there is enough inactive page cache, we do not reclaim * anything from the anonymous working right now. */ if (sc->cache_trim_mode) { scan_balance = SCAN_FILE; goto out; } scan_balance = SCAN_FRACT; /* * Calculate the pressure balance between anon and file pages. * * The amount of pressure we put on each LRU is inversely * proportional to the cost of reclaiming each list, as * determined by the share of pages that are refaulting, times * the relative IO cost of bringing back a swapped out * anonymous page vs reloading a filesystem page (swappiness). * * Although we limit that influence to ensure no list gets * left behind completely: at least a third of the pressure is * applied, before swappiness. * * With swappiness at 100, anon and file have equal IO cost. */ total_cost = sc->anon_cost + sc->file_cost; anon_cost = total_cost + sc->anon_cost; file_cost = total_cost + sc->file_cost; total_cost = anon_cost + file_cost; ap = swappiness * (total_cost + 1); ap /= anon_cost + 1; fp = (200 - swappiness) * (total_cost + 1); fp /= file_cost + 1; fraction[0] = ap; fraction[1] = fp; denominator = ap + fp; out: for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; unsigned long low, min; unsigned long scan; lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low); if (min || low) { /* * Scale a cgroup's reclaim pressure by proportioning * its current usage to its memory.low or memory.min * setting. * * This is important, as otherwise scanning aggression * becomes extremely binary -- from nothing as we * approach the memory protection threshold, to totally * nominal as we exceed it. This results in requiring * setting extremely liberal protection thresholds. It * also means we simply get no protection at all if we * set it too low, which is not ideal. * * If there is any protection in place, we reduce scan * pressure by how much of the total memory used is * within protection thresholds. * * There is one special case: in the first reclaim pass, * we skip over all groups that are within their low * protection. If that fails to reclaim enough pages to * satisfy the reclaim goal, we come back and override * the best-effort low protection. However, we still * ideally want to honor how well-behaved groups are in * that case instead of simply punishing them all * equally. As such, we reclaim them based on how much * memory they are using, reducing the scan pressure * again by how much of the total memory used is under * hard protection. */ unsigned long cgroup_size = mem_cgroup_size(memcg); unsigned long protection; /* memory.low scaling, make sure we retry before OOM */ if (!sc->memcg_low_reclaim && low > min) { protection = low; sc->memcg_low_skipped = 1; } else { protection = min; } /* Avoid TOCTOU with earlier protection check */ cgroup_size = max(cgroup_size, protection); scan = lruvec_size - lruvec_size * protection / (cgroup_size + 1); /* * Minimally target SWAP_CLUSTER_MAX pages to keep * reclaim moving forwards, avoiding decrementing * sc->priority further than desirable. */ scan = max(scan, SWAP_CLUSTER_MAX); } else { scan = lruvec_size; } scan >>= sc->priority; /* * If the cgroup's already been deleted, make sure to * scrape out the remaining cache. */ if (!scan && !mem_cgroup_online(memcg)) scan = min(lruvec_size, SWAP_CLUSTER_MAX); switch (scan_balance) { case SCAN_EQUAL: /* Scan lists relative to size */ break; case SCAN_FRACT: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. * Make sure we don't miss the last page on * the offlined memory cgroups because of a * round-off error. */ scan = mem_cgroup_online(memcg) ? div64_u64(scan * fraction[file], denominator) : DIV64_U64_ROUND_UP(scan * fraction[file], denominator); break; case SCAN_FILE: case SCAN_ANON: /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) scan = 0; break; default: /* Look ma, no brain */ BUG(); } nr[lru] = scan; } } /* * Anonymous LRU management is a waste if there is * ultimately no way to reclaim the memory. */ static bool can_age_anon_pages(struct pglist_data *pgdat, struct scan_control *sc) { /* Aging the anon LRU is valuable if swap is present: */ if (total_swap_pages > 0) return true; /* Also valuable if anon pages can be demoted: */ return can_demote(pgdat->node_id, sc); } #ifdef CONFIG_LRU_GEN #ifdef CONFIG_LRU_GEN_ENABLED DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS); #define get_cap(cap) static_branch_likely(&lru_gen_caps[cap]) #else DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) #endif static bool should_walk_mmu(void) { return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); } static bool should_clear_pmd_young(void) { return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); } /****************************************************************************** * shorthand helpers ******************************************************************************/ #define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) #define DEFINE_MAX_SEQ(lruvec) \ unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq) #define DEFINE_MIN_SEQ(lruvec) \ unsigned long min_seq[ANON_AND_FILE] = { \ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ } #define for_each_gen_type_zone(gen, type, zone) \ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) { struct pglist_data *pgdat = NODE_DATA(nid); #ifdef CONFIG_MEMCG if (memcg) { struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; /* see the comment in mem_cgroup_lruvec() */ if (!lruvec->pgdat) lruvec->pgdat = pgdat; return lruvec; } #endif VM_WARN_ON_ONCE(!mem_cgroup_disabled()); return &pgdat->__lruvec; } static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) { struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); if (!sc->may_swap) return 0; if (!can_demote(pgdat->node_id, sc) && mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) return 0; return mem_cgroup_swappiness(memcg); } static int get_nr_gens(struct lruvec *lruvec, int type) { return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; } static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) { /* see the comment on lru_gen_folio */ return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; } /****************************************************************************** * Bloom filters ******************************************************************************/ /* * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of * bits in a bitmap, k is the number of hash functions and n is the number of * inserted items. * * Page table walkers use one of the two filters to reduce their search space. * To get rid of non-leaf entries that no longer have enough leaf entries, the * aging uses the double-buffering technique to flip to the other filter each * time it produces a new generation. For non-leaf entries that have enough * leaf entries, the aging carries them over to the next generation in * walk_pmd_range(); the eviction also report them when walking the rmap * in lru_gen_look_around(). * * For future optimizations: * 1. It's not necessary to keep both filters all the time. The spare one can be * freed after the RCU grace period and reallocated if needed again. * 2. And when reallocating, it's worth scaling its size according to the number * of inserted entries in the other filter, to reduce the memory overhead on * small systems and false positives on large systems. * 3. Jenkins' hash function is an alternative to Knuth's. */ #define BLOOM_FILTER_SHIFT 15 static inline int filter_gen_from_seq(unsigned long seq) { return seq % NR_BLOOM_FILTERS; } static void get_item_key(void *item, int *key) { u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2); BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32)); key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); key[1] = hash >> BLOOM_FILTER_SHIFT; } static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) { int key[2]; unsigned long *filter; int gen = filter_gen_from_seq(seq); filter = READ_ONCE(lruvec->mm_state.filters[gen]); if (!filter) return true; get_item_key(item, key); return test_bit(key[0], filter) && test_bit(key[1], filter); } static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item) { int key[2]; unsigned long *filter; int gen = filter_gen_from_seq(seq); filter = READ_ONCE(lruvec->mm_state.filters[gen]); if (!filter) return; get_item_key(item, key); if (!test_bit(key[0], filter)) set_bit(key[0], filter); if (!test_bit(key[1], filter)) set_bit(key[1], filter); } static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq) { unsigned long *filter; int gen = filter_gen_from_seq(seq); filter = lruvec->mm_state.filters[gen]; if (filter) { bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT)); return; } filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); WRITE_ONCE(lruvec->mm_state.filters[gen], filter); } /****************************************************************************** * mm_struct list ******************************************************************************/ static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg) { static struct lru_gen_mm_list mm_list = { .fifo = LIST_HEAD_INIT(mm_list.fifo), .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock), }; #ifdef CONFIG_MEMCG if (memcg) return &memcg->mm_list; #endif VM_WARN_ON_ONCE(!mem_cgroup_disabled()); return &mm_list; } void lru_gen_add_mm(struct mm_struct *mm) { int nid; struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); struct lru_gen_mm_list *mm_list = get_mm_list(memcg); VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); #ifdef CONFIG_MEMCG VM_WARN_ON_ONCE(mm->lru_gen.memcg); mm->lru_gen.memcg = memcg; #endif spin_lock(&mm_list->lock); for_each_node_state(nid, N_MEMORY) { struct lruvec *lruvec = get_lruvec(memcg, nid); /* the first addition since the last iteration */ if (lruvec->mm_state.tail == &mm_list->fifo) lruvec->mm_state.tail = &mm->lru_gen.list; } list_add_tail(&mm->lru_gen.list, &mm_list->fifo); spin_unlock(&mm_list->lock); } void lru_gen_del_mm(struct mm_struct *mm) { int nid; struct lru_gen_mm_list *mm_list; struct mem_cgroup *memcg = NULL; if (list_empty(&mm->lru_gen.list)) return; #ifdef CONFIG_MEMCG memcg = mm->lru_gen.memcg; #endif mm_list = get_mm_list(memcg); spin_lock(&mm_list->lock); for_each_node(nid) { struct lruvec *lruvec = get_lruvec(memcg, nid); /* where the current iteration continues after */ if (lruvec->mm_state.head == &mm->lru_gen.list) lruvec->mm_state.head = lruvec->mm_state.head->prev; /* where the last iteration ended before */ if (lruvec->mm_state.tail == &mm->lru_gen.list) lruvec->mm_state.tail = lruvec->mm_state.tail->next; } list_del_init(&mm->lru_gen.list); spin_unlock(&mm_list->lock); #ifdef CONFIG_MEMCG mem_cgroup_put(mm->lru_gen.memcg); mm->lru_gen.memcg = NULL; #endif } #ifdef CONFIG_MEMCG void lru_gen_migrate_mm(struct mm_struct *mm) { struct mem_cgroup *memcg; struct task_struct *task = rcu_dereference_protected(mm->owner, true); VM_WARN_ON_ONCE(task->mm != mm); lockdep_assert_held(&task->alloc_lock); /* for mm_update_next_owner() */ if (mem_cgroup_disabled()) return; /* migration can happen before addition */ if (!mm->lru_gen.memcg) return; rcu_read_lock(); memcg = mem_cgroup_from_task(task); rcu_read_unlock(); if (memcg == mm->lru_gen.memcg) return; VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); lru_gen_del_mm(mm); lru_gen_add_mm(mm); } #endif static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last) { int i; int hist; lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); if (walk) { hist = lru_hist_from_seq(walk->max_seq); for (i = 0; i < NR_MM_STATS; i++) { WRITE_ONCE(lruvec->mm_state.stats[hist][i], lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]); walk->mm_stats[i] = 0; } } if (NR_HIST_GENS > 1 && last) { hist = lru_hist_from_seq(lruvec->mm_state.seq + 1); for (i = 0; i < NR_MM_STATS; i++) WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0); } } static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk) { int type; unsigned long size = 0; struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) return true; clear_bit(key, &mm->lru_gen.bitmap); for (type = !walk->can_swap; type < ANON_AND_FILE; type++) { size += type ? get_mm_counter(mm, MM_FILEPAGES) : get_mm_counter(mm, MM_ANONPAGES) + get_mm_counter(mm, MM_SHMEMPAGES); } if (size < MIN_LRU_BATCH) return true; return !mmget_not_zero(mm); } static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, struct mm_struct **iter) { bool first = false; bool last = false; struct mm_struct *mm = NULL; struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct lru_gen_mm_list *mm_list = get_mm_list(memcg); struct lru_gen_mm_state *mm_state = &lruvec->mm_state; /* * mm_state->seq is incremented after each iteration of mm_list. There * are three interesting cases for this page table walker: * 1. It tries to start a new iteration with a stale max_seq: there is * nothing left to do. * 2. It started the next iteration: it needs to reset the Bloom filter * so that a fresh set of PTE tables can be recorded. * 3. It ended the current iteration: it needs to reset the mm stats * counters and tell its caller to increment max_seq. */ spin_lock(&mm_list->lock); VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq); if (walk->max_seq <= mm_state->seq) goto done; if (!mm_state->head) mm_state->head = &mm_list->fifo; if (mm_state->head == &mm_list->fifo) first = true; do { mm_state->head = mm_state->head->next; if (mm_state->head == &mm_list->fifo) { WRITE_ONCE(mm_state->seq, mm_state->seq + 1); last = true; break; } /* force scan for those added after the last iteration */ if (!mm_state->tail || mm_state->tail == mm_state->head) { mm_state->tail = mm_state->head->next; walk->force_scan = true; } mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); if (should_skip_mm(mm, walk)) mm = NULL; } while (!mm); done: if (*iter || last) reset_mm_stats(lruvec, walk, last); spin_unlock(&mm_list->lock); if (mm && first) reset_bloom_filter(lruvec, walk->max_seq + 1); if (*iter) mmput_async(*iter); *iter = mm; return last; } static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq) { bool success = false; struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct lru_gen_mm_list *mm_list = get_mm_list(memcg); struct lru_gen_mm_state *mm_state = &lruvec->mm_state; spin_lock(&mm_list->lock); VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq); if (max_seq > mm_state->seq) { mm_state->head = NULL; mm_state->tail = NULL; WRITE_ONCE(mm_state->seq, mm_state->seq + 1); reset_mm_stats(lruvec, NULL, true); success = true; } spin_unlock(&mm_list->lock); return success; } /****************************************************************************** * PID controller ******************************************************************************/ /* * A feedback loop based on Proportional-Integral-Derivative (PID) controller. * * The P term is refaulted/(evicted+protected) from a tier in the generation * currently being evicted; the I term is the exponential moving average of the * P term over the generations previously evicted, using the smoothing factor * 1/2; the D term isn't supported. * * The setpoint (SP) is always the first tier of one type; the process variable * (PV) is either any tier of the other type or any other tier of the same * type. * * The error is the difference between the SP and the PV; the correction is to * turn off protection when SP>PV or turn on protection when SP<PV. * * For future optimizations: * 1. The D term may discount the other two terms over time so that long-lived * generations can resist stale information. */ struct ctrl_pos { unsigned long refaulted; unsigned long total; int gain; }; static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, struct ctrl_pos *pos) { struct lru_gen_folio *lrugen = &lruvec->lrugen; int hist = lru_hist_from_seq(lrugen->min_seq[type]); pos->refaulted = lrugen->avg_refaulted[type][tier] + atomic_long_read(&lrugen->refaulted[hist][type][tier]); pos->total = lrugen->avg_total[type][tier] + atomic_long_read(&lrugen->evicted[hist][type][tier]); if (tier) pos->total += lrugen->protected[hist][type][tier - 1]; pos->gain = gain; } static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) { int hist, tier; struct lru_gen_folio *lrugen = &lruvec->lrugen; bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; lockdep_assert_held(&lruvec->lru_lock); if (!carryover && !clear) return; hist = lru_hist_from_seq(seq); for (tier = 0; tier < MAX_NR_TIERS; tier++) { if (carryover) { unsigned long sum; sum = lrugen->avg_refaulted[type][tier] + atomic_long_read(&lrugen->refaulted[hist][type][tier]); WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); sum = lrugen->avg_total[type][tier] + atomic_long_read(&lrugen->evicted[hist][type][tier]); if (tier) sum += lrugen->protected[hist][type][tier - 1]; WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); } if (clear) { atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); atomic_long_set(&lrugen->evicted[hist][type][tier], 0); if (tier) WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); } } } static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) { /* * Return true if the PV has a limited number of refaults or a lower * refaulted/total than the SP. */ return pv->refaulted < MIN_LRU_BATCH || pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= (sp->refaulted + 1) * pv->total * pv->gain; } /****************************************************************************** * the aging ******************************************************************************/ /* promote pages accessed through page tables */ static int folio_update_gen(struct folio *folio, int gen) { unsigned long new_flags, old_flags = READ_ONCE(folio->flags); VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); VM_WARN_ON_ONCE(!rcu_read_lock_held()); do { /* lru_gen_del_folio() has isolated this page? */ if (!(old_flags & LRU_GEN_MASK)) { /* for shrink_folio_list() */ new_flags = old_flags | BIT(PG_referenced); continue; } new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; } /* protect pages accessed multiple times through file descriptors */ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { int type = folio_is_file_lru(folio); struct lru_gen_folio *lrugen = &lruvec->lrugen; int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); unsigned long new_flags, old_flags = READ_ONCE(folio->flags); VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio); do { new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; /* folio_update_gen() has promoted this page? */ if (new_gen >= 0 && new_gen != old_gen) return new_gen; new_gen = (old_gen + 1) % MAX_NR_GENS; new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; /* for folio_end_writeback() */ if (reclaiming) new_flags |= BIT(PG_reclaim); } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); lru_gen_update_size(lruvec, folio, old_gen, new_gen); return new_gen; } static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio, int old_gen, int new_gen) { int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); int delta = folio_nr_pages(folio); VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS); VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS); walk->batched++; walk->nr_pages[old_gen][type][zone] -= delta; walk->nr_pages[new_gen][type][zone] += delta; } static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) { int gen, type, zone; struct lru_gen_folio *lrugen = &lruvec->lrugen; walk->batched = 0; for_each_gen_type_zone(gen, type, zone) { enum lru_list lru = type * LRU_INACTIVE_FILE; int delta = walk->nr_pages[gen][type][zone]; if (!delta) continue; walk->nr_pages[gen][type][zone] = 0; WRITE_ONCE(lrugen->nr_pages[gen][type][zone], lrugen->nr_pages[gen][type][zone] + delta); if (lru_gen_is_active(lruvec, gen)) lru += LRU_ACTIVE; __update_lru_size(lruvec, lru, zone, delta); } } static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args) { struct address_space *mapping; struct vm_area_struct *vma = args->vma; struct lru_gen_mm_walk *walk = args->private; if (!vma_is_accessible(vma)) return true; if (is_vm_hugetlb_page(vma)) return true; if (!vma_has_recency(vma)) return true; if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) return true; if (vma == get_gate_vma(vma->vm_mm)) return true; if (vma_is_anonymous(vma)) return !walk->can_swap; if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) return true; mapping = vma->vm_file->f_mapping; if (mapping_unevictable(mapping)) return true; if (shmem_mapping(mapping)) return !walk->can_swap; /* to exclude special mappings like dax, etc. */ return !mapping->a_ops->read_folio; } /* * Some userspace memory allocators map many single-page VMAs. Instead of * returning back to the PGD table for each of such VMAs, finish an entire PMD * table to reduce zigzags and improve cache performance. */ static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args, unsigned long *vm_start, unsigned long *vm_end) { unsigned long start = round_up(*vm_end, size); unsigned long end = (start | ~mask) + 1; VMA_ITERATOR(vmi, args->mm, start); VM_WARN_ON_ONCE(mask & size); VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask)); for_each_vma(vmi, args->vma) { if (end && end <= args->vma->vm_start) return false; if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) continue; *vm_start = max(start, args->vma->vm_start); *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; return true; } return false; } static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) { unsigned long pfn = pte_pfn(pte); VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); if (!pte_present(pte) || is_zero_pfn(pfn)) return -1; if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte))) return -1; if (WARN_ON_ONCE(!pfn_valid(pfn))) return -1; return pfn; } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) { unsigned long pfn = pmd_pfn(pmd); VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) return -1; if (WARN_ON_ONCE(pmd_devmap(pmd))) return -1; if (WARN_ON_ONCE(!pfn_valid(pfn))) return -1; return pfn; } #endif static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, struct pglist_data *pgdat, bool can_swap) { struct folio *folio; /* try to avoid unnecessary memory loads */ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) return NULL; folio = pfn_folio(pfn); if (folio_nid(folio) != pgdat->node_id) return NULL; if (folio_memcg_rcu(folio) != memcg) return NULL; /* file VMAs can contain anon pages from COW */ if (!folio_is_file_lru(folio) && !can_swap) return NULL; return folio; } static bool suitable_to_scan(int total, int young) { int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8); /* suitable if the average number of young PTEs per cacheline is >=1 */ return young * n >= total; } static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *args) { int i; pte_t *pte; spinlock_t *ptl; unsigned long addr; int total = 0; int young = 0; struct lru_gen_mm_walk *walk = args->private; struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); if (!pte) return false; if (!spin_trylock(ptl)) { pte_unmap(pte); return false; } arch_enter_lazy_mmu_mode(); restart: for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; struct folio *folio; pte_t ptent = ptep_get(pte + i); total++; walk->mm_stats[MM_LEAF_TOTAL]++; pfn = get_pte_pfn(ptent, args->vma, addr); if (pfn == -1) continue; if (!pte_young(ptent)) { walk->mm_stats[MM_LEAF_OLD]++; continue; } folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); if (!folio) continue; if (!ptep_test_and_clear_young(args->vma, addr, pte + i)) VM_WARN_ON_ONCE(true); young++; walk->mm_stats[MM_LEAF_YOUNG]++; if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); old_gen = folio_update_gen(folio, new_gen); if (old_gen >= 0 && old_gen != new_gen) update_batch_size(walk, folio, old_gen, new_gen); } if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) goto restart; arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte, ptl); return suitable_to_scan(total, young); } #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, struct mm_walk *args, unsigned long *bitmap, unsigned long *first) { int i; pmd_t *pmd; spinlock_t *ptl; struct lru_gen_mm_walk *walk = args->private; struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); VM_WARN_ON_ONCE(pud_leaf(*pud)); /* try to batch at most 1+MIN_LRU_BATCH+1 entries */ if (*first == -1) { *first = addr; bitmap_zero(bitmap, MIN_LRU_BATCH); return; } i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); if (i && i <= MIN_LRU_BATCH) { __set_bit(i - 1, bitmap); return; } pmd = pmd_offset(pud, *first); ptl = pmd_lockptr(args->mm, pmd); if (!spin_trylock(ptl)) goto done; arch_enter_lazy_mmu_mode(); do { unsigned long pfn; struct folio *folio; /* don't round down the first address */ addr = i ? (*first & PMD_MASK) + i * PMD_SIZE : *first; pfn = get_pmd_pfn(pmd[i], vma, addr); if (pfn == -1) goto next; if (!pmd_trans_huge(pmd[i])) { if (should_clear_pmd_young()) pmdp_test_and_clear_young(vma, addr, pmd + i); goto next; } folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); if (!folio) goto next; if (!pmdp_test_and_clear_young(vma, addr, pmd + i)) goto next; walk->mm_stats[MM_LEAF_YOUNG]++; if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); old_gen = folio_update_gen(folio, new_gen); if (old_gen >= 0 && old_gen != new_gen) update_batch_size(walk, folio, old_gen, new_gen); next: i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; } while (i <= MIN_LRU_BATCH); arch_leave_lazy_mmu_mode(); spin_unlock(ptl); done: *first = -1; } #else static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area_struct *vma, struct mm_walk *args, unsigned long *bitmap, unsigned long *first) { } #endif static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, struct mm_walk *args) { int i; pmd_t *pmd; unsigned long next; unsigned long addr; struct vm_area_struct *vma; DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); unsigned long first = -1; struct lru_gen_mm_walk *walk = args->private; VM_WARN_ON_ONCE(pud_leaf(*pud)); /* * Finish an entire PMD in two passes: the first only reaches to PTE * tables to avoid taking the PMD lock; the second, if necessary, takes * the PMD lock to clear the accessed bit in PMD entries. */ pmd = pmd_offset(pud, start & PUD_MASK); restart: /* walk_pte_range() may call get_next_vma() */ vma = args->vma; for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) { pmd_t val = pmdp_get_lockless(pmd + i); next = pmd_addr_end(addr, end); if (!pmd_present(val) || is_huge_zero_pmd(val)) { walk->mm_stats[MM_LEAF_TOTAL]++; continue; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(val)) { unsigned long pfn = pmd_pfn(val); struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); walk->mm_stats[MM_LEAF_TOTAL]++; if (!pmd_young(val)) { walk->mm_stats[MM_LEAF_OLD]++; continue; } /* try to avoid unnecessary memory loads */ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) continue; walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); continue; } #endif walk->mm_stats[MM_NONLEAF_TOTAL]++; if (should_clear_pmd_young()) { if (!pmd_young(val)) continue; walk_pmd_range_locked(pud, addr, vma, args, bitmap, &first); } if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) continue; walk->mm_stats[MM_NONLEAF_FOUND]++; if (!walk_pte_range(&val, addr, next, args)) continue; walk->mm_stats[MM_NONLEAF_ADDED]++; /* carry over to the next generation */ update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i); } walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end)) goto restart; } static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end, struct mm_walk *args) { int i; pud_t *pud; unsigned long addr; unsigned long next; struct lru_gen_mm_walk *walk = args->private; VM_WARN_ON_ONCE(p4d_leaf(*p4d)); pud = pud_offset(p4d, start & P4D_MASK); restart: for (i = pud_index(start), addr = start; addr != end; i++, addr = next) { pud_t val = READ_ONCE(pud[i]); next = pud_addr_end(addr, end); if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val))) continue; walk_pmd_range(&val, addr, next, args); if (need_resched() || walk->batched >= MAX_LRU_BATCH) { end = (addr | ~PUD_MASK) + 1; goto done; } } if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end)) goto restart; end = round_up(end, P4D_SIZE); done: if (!end || !args->vma) return 1; walk->next_addr = max(end, args->vma->vm_start); return -EAGAIN; } static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk) { static const struct mm_walk_ops mm_walk_ops = { .test_walk = should_skip_vma, .p4d_entry = walk_pud_range, .walk_lock = PGWALK_RDLOCK, }; int err; struct mem_cgroup *memcg = lruvec_memcg(lruvec); walk->next_addr = FIRST_USER_ADDRESS; do { DEFINE_MAX_SEQ(lruvec); err = -EBUSY; /* another thread might have called inc_max_seq() */ if (walk->max_seq != max_seq) break; /* folio_update_gen() requires stable folio_memcg() */ if (!mem_cgroup_trylock_pages(memcg)) break; /* the caller might be holding the lock for write */ if (mmap_read_trylock(mm)) { err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); mmap_read_unlock(mm); } mem_cgroup_unlock_pages(); if (walk->batched) { spin_lock_irq(&lruvec->lru_lock); reset_batch_size(lruvec, walk); spin_unlock_irq(&lruvec->lru_lock); } cond_resched(); } while (err == -EAGAIN); } static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc) { struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; if (pgdat && current_is_kswapd()) { VM_WARN_ON_ONCE(walk); walk = &pgdat->mm_walk; } else if (!walk && force_alloc) { VM_WARN_ON_ONCE(current_is_kswapd()); walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); } current->reclaim_state->mm_walk = walk; return walk; } static void clear_mm_walk(void) { struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); current->reclaim_state->mm_walk = NULL; if (!current_is_kswapd()) kfree(walk); } static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) { int zone; int remaining = MAX_LRU_BATCH; struct lru_gen_folio *lrugen = &lruvec->lrugen; int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); if (type == LRU_GEN_ANON && !can_swap) goto done; /* prevent cold/hot inversion if force_scan is true */ for (zone = 0; zone < MAX_NR_ZONES; zone++) { struct list_head *head = &lrugen->folios[old_gen][type][zone]; while (!list_empty(head)) { struct folio *folio = lru_to_folio(head); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); new_gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); if (!--remaining) return false; } } done: reset_ctrl_pos(lruvec, type, true); WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); return true; } static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) { int gen, type, zone; bool success = false; struct lru_gen_folio *lrugen = &lruvec->lrugen; DEFINE_MIN_SEQ(lruvec); VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); /* find the oldest populated generation */ for (type = !can_swap; type < ANON_AND_FILE; type++) { while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { gen = lru_gen_from_seq(min_seq[type]); for (zone = 0; zone < MAX_NR_ZONES; zone++) { if (!list_empty(&lrugen->folios[gen][type][zone])) goto next; } min_seq[type]++; } next: ; } /* see the comment on lru_gen_folio */ if (can_swap) { min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); } for (type = !can_swap; type < ANON_AND_FILE; type++) { if (min_seq[type] == lrugen->min_seq[type]) continue; reset_ctrl_pos(lruvec, type, true); WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); success = true; } return success; } static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) { int prev, next; int type, zone; struct lru_gen_folio *lrugen = &lruvec->lrugen; restart: spin_lock_irq(&lruvec->lru_lock); VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); for (type = ANON_AND_FILE - 1; type >= 0; type--) { if (get_nr_gens(lruvec, type) != MAX_NR_GENS) continue; VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); if (inc_min_seq(lruvec, type, can_swap)) continue; spin_unlock_irq(&lruvec->lru_lock); cond_resched(); goto restart; } /* * Update the active/inactive LRU sizes for compatibility. Both sides of * the current max_seq need to be covered, since max_seq+1 can overlap * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do * overlap, cold/hot inversion happens. */ prev = lru_gen_from_seq(lrugen->max_seq - 1); next = lru_gen_from_seq(lrugen->max_seq + 1); for (type = 0; type < ANON_AND_FILE; type++) { for (zone = 0; zone < MAX_NR_ZONES; zone++) { enum lru_list lru = type * LRU_INACTIVE_FILE; long delta = lrugen->nr_pages[prev][type][zone] - lrugen->nr_pages[next][type][zone]; if (!delta) continue; __update_lru_size(lruvec, lru, zone, delta); __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); } } for (type = 0; type < ANON_AND_FILE; type++) reset_ctrl_pos(lruvec, type, false); WRITE_ONCE(lrugen->timestamps[next], jiffies); /* make sure preceding modifications appear */ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); spin_unlock_irq(&lruvec->lru_lock); } static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, struct scan_control *sc, bool can_swap, bool force_scan) { bool success; struct lru_gen_mm_walk *walk; struct mm_struct *mm = NULL; struct lru_gen_folio *lrugen = &lruvec->lrugen; VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); /* see the comment in iterate_mm_list() */ if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) { success = false; goto done; } /* * If the hardware doesn't automatically set the accessed bit, fallback * to lru_gen_look_around(), which only clears the accessed bit in a * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ if (!should_walk_mmu()) { success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } walk = set_mm_walk(NULL, true); if (!walk) { success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } walk->lruvec = lruvec; walk->max_seq = max_seq; walk->can_swap = can_swap; walk->force_scan = force_scan; do { success = iterate_mm_list(lruvec, walk, &mm); if (mm) walk_mm(lruvec, mm, walk); } while (mm); done: if (success) inc_max_seq(lruvec, can_swap, force_scan); return success; } /****************************************************************************** * working set protection ******************************************************************************/ static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) { int gen, type, zone; unsigned long total = 0; bool can_swap = get_swappiness(lruvec, sc); struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec); for (type = !can_swap; type < ANON_AND_FILE; type++) { unsigned long seq; for (seq = min_seq[type]; seq <= max_seq; seq++) { gen = lru_gen_from_seq(seq); for (zone = 0; zone < MAX_NR_ZONES; zone++) total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); } } /* whether the size is big enough to be helpful */ return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; } static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl) { int gen; unsigned long birth; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MIN_SEQ(lruvec); /* see the comment on lru_gen_folio */ gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); if (time_is_after_jiffies(birth + min_ttl)) return false; if (!lruvec_is_sizable(lruvec, sc)) return false; mem_cgroup_calculate_protection(NULL, memcg); return !mem_cgroup_below_min(NULL, memcg); } /* to protect the working set of the last N jiffies */ static unsigned long lru_gen_min_ttl __read_mostly; static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl); VM_WARN_ON_ONCE(!current_is_kswapd()); /* check the order to exclude compaction-induced reclaim */ if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) { mem_cgroup_iter_break(NULL, memcg); return; } cond_resched(); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); /* * The main goal is to OOM kill if every generation from all memcgs is * younger than min_ttl. However, another possibility is all memcgs are * either too small or below min. */ if (mutex_trylock(&oom_lock)) { struct oom_control oc = { .gfp_mask = sc->gfp_mask, }; out_of_memory(&oc); mutex_unlock(&oom_lock); } } /****************************************************************************** * rmap/PT walk feedback ******************************************************************************/ /* * This function exploits spatial locality when shrink_folio_list() walks the * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If * the scan was done cacheline efficiently, it adds the PMD entry pointing to * the PTE table to the Bloom filter. This forms a feedback loop between the * eviction and the aging. */ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) { int i; unsigned long start; unsigned long end; struct lru_gen_mm_walk *walk; int young = 0; pte_t *pte = pvmw->pte; unsigned long addr = pvmw->address; struct folio *folio = pfn_folio(pvmw->pfn); bool can_swap = !folio_is_file_lru(folio); struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = folio_pgdat(folio); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); DEFINE_MAX_SEQ(lruvec); int old_gen, new_gen = lru_gen_from_seq(max_seq); lockdep_assert_held(pvmw->ptl); VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); if (spin_is_contended(pvmw->ptl)) return; /* avoid taking the LRU lock under the PTL when possible */ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; start = max(addr & PMD_MASK, pvmw->vma->vm_start); end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) end = start + MIN_LRU_BATCH * PAGE_SIZE; else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) start = end - MIN_LRU_BATCH * PAGE_SIZE; else { start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; end = addr + MIN_LRU_BATCH * PAGE_SIZE / 2; } } /* folio_update_gen() requires stable folio_memcg() */ if (!mem_cgroup_trylock_pages(memcg)) return; arch_enter_lazy_mmu_mode(); pte -= (addr - start) / PAGE_SIZE; for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; pte_t ptent = ptep_get(pte + i); pfn = get_pte_pfn(ptent, pvmw->vma, addr); if (pfn == -1) continue; if (!pte_young(ptent)) continue; folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); if (!folio) continue; if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) VM_WARN_ON_ONCE(true); young++; if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); if (walk) { old_gen = folio_update_gen(folio, new_gen); if (old_gen >= 0 && old_gen != new_gen) update_batch_size(walk, folio, old_gen, new_gen); continue; } old_gen = folio_lru_gen(folio); if (old_gen < 0) folio_set_referenced(folio); else if (old_gen != new_gen) folio_activate(folio); } arch_leave_lazy_mmu_mode(); mem_cgroup_unlock_pages(); /* feedback from rmap walkers to page table walkers */ if (suitable_to_scan(i, young)) update_bloom_filter(lruvec, max_seq, pvmw->pmd); } /****************************************************************************** * memcg LRU ******************************************************************************/ /* see the comment on MEMCG_NR_GENS */ enum { MEMCG_LRU_NOP, MEMCG_LRU_HEAD, MEMCG_LRU_TAIL, MEMCG_LRU_OLD, MEMCG_LRU_YOUNG, }; #ifdef CONFIG_MEMCG static int lru_gen_memcg_seg(struct lruvec *lruvec) { return READ_ONCE(lruvec->lrugen.seg); } static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) { int seg; int old, new; unsigned long flags; int bin = get_random_u32_below(MEMCG_NR_BINS); struct pglist_data *pgdat = lruvec_pgdat(lruvec); spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); seg = 0; new = old = lruvec->lrugen.gen; /* see the comment on MEMCG_NR_GENS */ if (op == MEMCG_LRU_HEAD) seg = MEMCG_LRU_HEAD; else if (op == MEMCG_LRU_TAIL) seg = MEMCG_LRU_TAIL; else if (op == MEMCG_LRU_OLD) new = get_memcg_gen(pgdat->memcg_lru.seq); else if (op == MEMCG_LRU_YOUNG) new = get_memcg_gen(pgdat->memcg_lru.seq + 1); else VM_WARN_ON_ONCE(true); hlist_nulls_del_rcu(&lruvec->lrugen.list); if (op == MEMCG_LRU_HEAD || op == MEMCG_LRU_OLD) hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); else hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); pgdat->memcg_lru.nr_memcgs[old]--; pgdat->memcg_lru.nr_memcgs[new]++; lruvec->lrugen.gen = new; WRITE_ONCE(lruvec->lrugen.seg, seg); if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); } void lru_gen_online_memcg(struct mem_cgroup *memcg) { int gen; int nid; int bin = get_random_u32_below(MEMCG_NR_BINS); for_each_node(nid) { struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); gen = get_memcg_gen(pgdat->memcg_lru.seq); hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); pgdat->memcg_lru.nr_memcgs[gen]++; lruvec->lrugen.gen = gen; spin_unlock_irq(&pgdat->memcg_lru.lock); } } void lru_gen_offline_memcg(struct mem_cgroup *memcg) { int nid; for_each_node(nid) { struct lruvec *lruvec = get_lruvec(memcg, nid); lru_gen_rotate_memcg(lruvec, MEMCG_LRU_OLD); } } void lru_gen_release_memcg(struct mem_cgroup *memcg) { int gen; int nid; for_each_node(nid) { struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); spin_lock_irq(&pgdat->memcg_lru.lock); if (hlist_nulls_unhashed(&lruvec->lrugen.list)) goto unlock; gen = lruvec->lrugen.gen; hlist_nulls_del_init_rcu(&lruvec->lrugen.list); pgdat->memcg_lru.nr_memcgs[gen]--; if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); unlock: spin_unlock_irq(&pgdat->memcg_lru.lock); } } void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { struct lruvec *lruvec = get_lruvec(memcg, nid); /* see the comment on MEMCG_NR_GENS */ if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD) lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); } #else /* !CONFIG_MEMCG */ static int lru_gen_memcg_seg(struct lruvec *lruvec) { return 0; } #endif /****************************************************************************** * the eviction ******************************************************************************/ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, int tier_idx) { bool success; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); int delta = folio_nr_pages(folio); int refs = folio_lru_refs(folio); int tier = lru_tier_from_refs(refs); struct lru_gen_folio *lrugen = &lruvec->lrugen; VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); /* unevictable */ if (!folio_evictable(folio)) { success = lru_gen_del_folio(lruvec, folio, true); VM_WARN_ON_ONCE_FOLIO(!success, folio); folio_set_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGCULLED, delta); return true; } /* dirty lazyfree */ if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) { success = lru_gen_del_folio(lruvec, folio, true); VM_WARN_ON_ONCE_FOLIO(!success, folio); folio_set_swapbacked(folio); lruvec_add_folio_tail(lruvec, folio); return true; } /* promoted */ if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { list_move(&folio->lru, &lrugen->folios[gen][type][zone]); return true; } /* protected */ if (tier > tier_idx) { int hist = lru_hist_from_seq(lrugen->min_seq[type]); gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); WRITE_ONCE(lrugen->protected[hist][type][tier - 1], lrugen->protected[hist][type][tier - 1] + delta); return true; } /* ineligible */ if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; } /* waiting for writeback */ if (folio_test_locked(folio) || folio_test_writeback(folio) || (type == LRU_GEN_FILE && folio_test_dirty(folio))) { gen = folio_inc_gen(lruvec, folio, true); list_move(&folio->lru, &lrugen->folios[gen][type][zone]); return true; } return false; } static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc) { bool success; /* swapping inhibited */ if (!(sc->gfp_mask & __GFP_IO) && (folio_test_dirty(folio) || (folio_test_anon(folio) && !folio_test_swapcache(folio)))) return false; /* raced with release_pages() */ if (!folio_try_get(folio)) return false; /* raced with another isolation */ if (!folio_test_clear_lru(folio)) { folio_put(folio); return false; } /* see the comment on MAX_NR_TIERS */ if (!folio_test_referenced(folio)) set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); /* for shrink_folio_list() */ folio_clear_reclaim(folio); folio_clear_referenced(folio); success = lru_gen_del_folio(lruvec, folio, true); VM_WARN_ON_ONCE_FOLIO(!success, folio); return true; } static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, int type, int tier, struct list_head *list) { int i; int gen; enum vm_event_item item; int sorted = 0; int scanned = 0; int isolated = 0; int remaining = MAX_LRU_BATCH; struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); VM_WARN_ON_ONCE(!list_empty(list)); if (get_nr_gens(lruvec, type) == MIN_NR_GENS) return 0; gen = lru_gen_from_seq(lrugen->min_seq[type]); for (i = MAX_NR_ZONES; i > 0; i--) { LIST_HEAD(moved); int skipped = 0; int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; struct list_head *head = &lrugen->folios[gen][type][zone]; while (!list_empty(head)) { struct folio *folio = lru_to_folio(head); int delta = folio_nr_pages(folio); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); scanned += delta; if (sort_folio(lruvec, folio, sc, tier)) sorted += delta; else if (isolate_folio(lruvec, folio, sc)) { list_add(&folio->lru, list); isolated += delta; } else { list_move(&folio->lru, &moved); skipped += delta; } if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH) break; } if (skipped) { list_splice(&moved, head); __count_zid_vm_events(PGSCAN_SKIP, zone, skipped); } if (!remaining || isolated >= MIN_LRU_BATCH) break; } item = PGSCAN_KSWAPD + reclaimer_offset(); if (!cgroup_reclaim(sc)) { __count_vm_events(item, isolated); __count_vm_events(PGREFILL, sorted); } __count_memcg_events(memcg, item, isolated); __count_memcg_events(memcg, PGREFILL, sorted); __count_vm_events(PGSCAN_ANON + type, isolated); /* * There might not be eligible folios due to reclaim_idx. Check the * remaining to prevent livelock if it's not making progress. */ return isolated || !remaining ? scanned : 0; } static int get_tier_idx(struct lruvec *lruvec, int type) { int tier; struct ctrl_pos sp, pv; /* * To leave a margin for fluctuations, use a larger gain factor (1:2). * This value is chosen because any other tier would have at least twice * as many refaults as the first tier. */ read_ctrl_pos(lruvec, type, 0, 1, &sp); for (tier = 1; tier < MAX_NR_TIERS; tier++) { read_ctrl_pos(lruvec, type, tier, 2, &pv); if (!positive_ctrl_err(&sp, &pv)) break; } return tier - 1; } static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) { int type, tier; struct ctrl_pos sp, pv; int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness }; /* * Compare the first tier of anon with that of file to determine which * type to scan. Also need to compare other tiers of the selected type * with the first tier of the other type to determine the last tier (of * the selected type) to evict. */ read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); type = positive_ctrl_err(&sp, &pv); read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); for (tier = 1; tier < MAX_NR_TIERS; tier++) { read_ctrl_pos(lruvec, type, tier, gain[type], &pv); if (!positive_ctrl_err(&sp, &pv)) break; } *tier_idx = tier - 1; return type; } static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, int *type_scanned, struct list_head *list) { int i; int type; int scanned; int tier = -1; DEFINE_MIN_SEQ(lruvec); /* * Try to make the obvious choice first. When anon and file are both * available from the same generation, interpret swappiness 1 as file * first and 200 as anon first. */ if (!swappiness) type = LRU_GEN_FILE; else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) type = LRU_GEN_ANON; else if (swappiness == 1) type = LRU_GEN_FILE; else if (swappiness == 200) type = LRU_GEN_ANON; else type = get_type_to_scan(lruvec, swappiness, &tier); for (i = !swappiness; i < ANON_AND_FILE; i++) { if (tier < 0) tier = get_tier_idx(lruvec, type); scanned = scan_folios(lruvec, sc, type, tier, list); if (scanned) break; type = !type; tier = -1; } *type_scanned = type; return scanned; } static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) { int type; int scanned; int reclaimed; LIST_HEAD(list); LIST_HEAD(clean); struct folio *folio; struct folio *next; enum vm_event_item item; struct reclaim_stat stat; struct lru_gen_mm_walk *walk; bool skip_retry = false; struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); spin_lock_irq(&lruvec->lru_lock); scanned = isolate_folios(lruvec, sc, swappiness, &type, &list); scanned += try_to_inc_min_seq(lruvec, swappiness); if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) scanned = 0; spin_unlock_irq(&lruvec->lru_lock); if (list_empty(&list)) return scanned; retry: reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false); sc->nr_reclaimed += reclaimed; list_for_each_entry_safe_reverse(folio, next, &list, lru) { if (!folio_evictable(folio)) { list_del(&folio->lru); folio_putback_lru(folio); continue; } if (folio_test_reclaim(folio) && (folio_test_dirty(folio) || folio_test_writeback(folio))) { /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ if (folio_test_workingset(folio)) folio_set_referenced(folio); continue; } if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || folio_mapped(folio) || folio_test_locked(folio) || folio_test_dirty(folio) || folio_test_writeback(folio)) { /* don't add rejected folios to the oldest generation */ set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, BIT(PG_active)); continue; } /* retry folios that may have missed folio_rotate_reclaimable() */ list_move(&folio->lru, &clean); sc->nr_scanned -= folio_nr_pages(folio); } spin_lock_irq(&lruvec->lru_lock); move_folios_to_lru(lruvec, &list); walk = current->reclaim_state->mm_walk; if (walk && walk->batched) reset_batch_size(lruvec, walk); item = PGSTEAL_KSWAPD + reclaimer_offset(); if (!cgroup_reclaim(sc)) __count_vm_events(item, reclaimed); __count_memcg_events(memcg, item, reclaimed); __count_vm_events(PGSTEAL_ANON + type, reclaimed); spin_unlock_irq(&lruvec->lru_lock); mem_cgroup_uncharge_list(&list); free_unref_page_list(&list); INIT_LIST_HEAD(&list); list_splice_init(&clean, &list); if (!list_empty(&list)) { skip_retry = true; goto retry; } return scanned; } static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) { int gen, type, zone; unsigned long old = 0; unsigned long young = 0; unsigned long total = 0; struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MIN_SEQ(lruvec); /* whether this lruvec is completely out of cold folios */ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { *nr_to_scan = 0; return true; } for (type = !can_swap; type < ANON_AND_FILE; type++) { unsigned long seq; for (seq = min_seq[type]; seq <= max_seq; seq++) { unsigned long size = 0; gen = lru_gen_from_seq(seq); for (zone = 0; zone < MAX_NR_ZONES; zone++) size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); total += size; if (seq == max_seq) young += size; else if (seq + MIN_NR_GENS == max_seq) old += size; } } /* try to scrape all its memory if this memcg was deleted */ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total; /* * The aging tries to be lazy to reduce the overhead, while the eviction * stalls when the number of generations reaches MIN_NR_GENS. Hence, the * ideal number of generations is MIN_NR_GENS+1. */ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) return false; /* * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) * of the total number of pages for each generation. A reasonable range * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The * aging cares about the upper bound of hot pages, while the eviction * cares about the lower bound of cold pages. */ if (young * MIN_NR_GENS > total) return true; if (old * (MIN_NR_GENS + 2) < total) return true; return false; } /* * For future optimizations: * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg * reclaim. */ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) { unsigned long nr_to_scan; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) return 0; if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan)) return nr_to_scan; /* skip the aging path at the default priority */ if (sc->priority == DEF_PRIORITY) return nr_to_scan; /* skip this lruvec as it's low on cold folios */ return try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false) ? -1 : 0; } static unsigned long get_nr_to_reclaim(struct scan_control *sc) { /* don't abort memcg reclaim to ensure fairness */ if (!root_reclaim(sc)) return -1; return max(sc->nr_to_reclaim, compact_gap(sc->order)); } static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { long nr_to_scan; unsigned long scanned = 0; unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); int swappiness = get_swappiness(lruvec, sc); /* clean file folios are more likely to exist */ if (swappiness && !(sc->gfp_mask & __GFP_IO)) swappiness = 1; while (true) { int delta; nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); if (nr_to_scan <= 0) break; delta = evict_folios(lruvec, sc, swappiness); if (!delta) break; scanned += delta; if (scanned >= nr_to_scan) break; if (sc->nr_reclaimed >= nr_to_reclaim) break; cond_resched(); } /* whether try_to_inc_max_seq() was successful */ return nr_to_scan < 0; } static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) { bool success; unsigned long scanned = sc->nr_scanned; unsigned long reclaimed = sc->nr_reclaimed; int seg = lru_gen_memcg_seg(lruvec); struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); /* see the comment on MEMCG_NR_GENS */ if (!lruvec_is_sizable(lruvec, sc)) return seg != MEMCG_LRU_TAIL ? MEMCG_LRU_TAIL : MEMCG_LRU_YOUNG; mem_cgroup_calculate_protection(NULL, memcg); if (mem_cgroup_below_min(NULL, memcg)) return MEMCG_LRU_YOUNG; if (mem_cgroup_below_low(NULL, memcg)) { /* see the comment on MEMCG_NR_GENS */ if (seg != MEMCG_LRU_TAIL) return MEMCG_LRU_TAIL; memcg_memory_event(memcg, MEMCG_LOW); } success = try_to_shrink_lruvec(lruvec, sc); shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); if (!sc->proactive) vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); flush_reclaim_state(sc); return success ? MEMCG_LRU_YOUNG : 0; } #ifdef CONFIG_MEMCG static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) { int op; int gen; int bin; int first_bin; struct lruvec *lruvec; struct lru_gen_folio *lrugen; struct mem_cgroup *memcg; const struct hlist_nulls_node *pos; unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); bin = first_bin = get_random_u32_below(MEMCG_NR_BINS); restart: op = 0; memcg = NULL; gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); rcu_read_lock(); hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { if (op) { lru_gen_rotate_memcg(lruvec, op); op = 0; } mem_cgroup_put(memcg); lruvec = container_of(lrugen, struct lruvec, lrugen); memcg = lruvec_memcg(lruvec); if (!mem_cgroup_tryget(memcg)) { lru_gen_release_memcg(memcg); memcg = NULL; continue; } rcu_read_unlock(); op = shrink_one(lruvec, sc); rcu_read_lock(); if (sc->nr_reclaimed >= nr_to_reclaim) break; } rcu_read_unlock(); if (op) lru_gen_rotate_memcg(lruvec, op); mem_cgroup_put(memcg); if (sc->nr_reclaimed >= nr_to_reclaim) return; /* restart if raced with lru_gen_rotate_memcg() */ if (gen != get_nulls_value(pos)) goto restart; /* try the rest of the bins of the current generation */ bin = get_memcg_bin(bin + 1); if (bin != first_bin) goto restart; } static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { struct blk_plug plug; VM_WARN_ON_ONCE(root_reclaim(sc)); VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); lru_add_drain(); blk_start_plug(&plug); set_mm_walk(NULL, sc->proactive); if (try_to_shrink_lruvec(lruvec, sc)) lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); clear_mm_walk(); blk_finish_plug(&plug); } #else /* !CONFIG_MEMCG */ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) { BUILD_BUG(); } static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { BUILD_BUG(); } #endif static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) { int priority; unsigned long reclaimable; struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) return; /* * Determine the initial priority based on ((total / MEMCG_NR_GENS) >> * priority) * reclaimed_to_scanned_ratio = nr_to_reclaim, where the * estimated reclaimed_to_scanned_ratio = inactive / total. */ reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); if (get_swappiness(lruvec, sc)) reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); reclaimable /= MEMCG_NR_GENS; /* round down reclaimable and round up sc->nr_to_reclaim */ priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); sc->priority = clamp(priority, 0, DEF_PRIORITY); } static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) { struct blk_plug plug; unsigned long reclaimed = sc->nr_reclaimed; VM_WARN_ON_ONCE(!root_reclaim(sc)); /* * Unmapped clean folios are already prioritized. Scanning for more of * them is likely futile and can cause high reclaim latency when there * is a large number of memcgs. */ if (!sc->may_writepage || !sc->may_unmap) goto done; lru_add_drain(); blk_start_plug(&plug); set_mm_walk(pgdat, sc->proactive); set_initial_priority(pgdat, sc); if (current_is_kswapd()) sc->nr_reclaimed = 0; if (mem_cgroup_disabled()) shrink_one(&pgdat->__lruvec, sc); else shrink_many(pgdat, sc); if (current_is_kswapd()) sc->nr_reclaimed += reclaimed; clear_mm_walk(); blk_finish_plug(&plug); done: /* kswapd should never fail */ pgdat->kswapd_failures = 0; } /****************************************************************************** * state change ******************************************************************************/ static bool __maybe_unused state_is_valid(struct lruvec *lruvec) { struct lru_gen_folio *lrugen = &lruvec->lrugen; if (lrugen->enabled) { enum lru_list lru; for_each_evictable_lru(lru) { if (!list_empty(&lruvec->lists[lru])) return false; } } else { int gen, type, zone; for_each_gen_type_zone(gen, type, zone) { if (!list_empty(&lrugen->folios[gen][type][zone])) return false; } } return true; } static bool fill_evictable(struct lruvec *lruvec) { enum lru_list lru; int remaining = MAX_LRU_BATCH; for_each_evictable_lru(lru) { int type = is_file_lru(lru); bool active = is_active_lru(lru); struct list_head *head = &lruvec->lists[lru]; while (!list_empty(head)) { bool success; struct folio *folio = lru_to_folio(head); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio); VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); lruvec_del_folio(lruvec, folio); success = lru_gen_add_folio(lruvec, folio, false); VM_WARN_ON_ONCE(!success); if (!--remaining) return false; } } return true; } static bool drain_evictable(struct lruvec *lruvec) { int gen, type, zone; int remaining = MAX_LRU_BATCH; for_each_gen_type_zone(gen, type, zone) { struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; while (!list_empty(head)) { bool success; struct folio *folio = lru_to_folio(head); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio); VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio); success = lru_gen_del_folio(lruvec, folio, false); VM_WARN_ON_ONCE(!success); lruvec_add_folio(lruvec, folio); if (!--remaining) return false; } } return true; } static void lru_gen_change_state(bool enabled) { static DEFINE_MUTEX(state_mutex); struct mem_cgroup *memcg; cgroup_lock(); cpus_read_lock(); get_online_mems(); mutex_lock(&state_mutex); if (enabled == lru_gen_enabled()) goto unlock; if (enabled) static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); else static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]); memcg = mem_cgroup_iter(NULL, NULL, NULL); do { int nid; for_each_node(nid) { struct lruvec *lruvec = get_lruvec(memcg, nid); spin_lock_irq(&lruvec->lru_lock); VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); VM_WARN_ON_ONCE(!state_is_valid(lruvec)); lruvec->lrugen.enabled = enabled; while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) { spin_unlock_irq(&lruvec->lru_lock); cond_resched(); spin_lock_irq(&lruvec->lru_lock); } spin_unlock_irq(&lruvec->lru_lock); } cond_resched(); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); unlock: mutex_unlock(&state_mutex); put_online_mems(); cpus_read_unlock(); cgroup_unlock(); } /****************************************************************************** * sysfs interface ******************************************************************************/ static ssize_t min_ttl_ms_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl))); } /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ static ssize_t min_ttl_ms_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { unsigned int msecs; if (kstrtouint(buf, 0, &msecs)) return -EINVAL; WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs)); return len; } static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR_RW(min_ttl_ms); static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int caps = 0; if (get_cap(LRU_GEN_CORE)) caps |= BIT(LRU_GEN_CORE); if (should_walk_mmu()) caps |= BIT(LRU_GEN_MM_WALK); if (should_clear_pmd_young()) caps |= BIT(LRU_GEN_NONLEAF_YOUNG); return sysfs_emit(buf, "0x%04x\n", caps); } /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { int i; unsigned int caps; if (tolower(*buf) == 'n') caps = 0; else if (tolower(*buf) == 'y') caps = -1; else if (kstrtouint(buf, 0, &caps)) return -EINVAL; for (i = 0; i < NR_LRU_GEN_CAPS; i++) { bool enabled = caps & BIT(i); if (i == LRU_GEN_CORE) lru_gen_change_state(enabled); else if (enabled) static_branch_enable(&lru_gen_caps[i]); else static_branch_disable(&lru_gen_caps[i]); } return len; } static struct kobj_attribute lru_gen_enabled_attr = __ATTR_RW(enabled); static struct attribute *lru_gen_attrs[] = { &lru_gen_min_ttl_attr.attr, &lru_gen_enabled_attr.attr, NULL }; static const struct attribute_group lru_gen_attr_group = { .name = "lru_gen", .attrs = lru_gen_attrs, }; /****************************************************************************** * debugfs interface ******************************************************************************/ static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos) { struct mem_cgroup *memcg; loff_t nr_to_skip = *pos; m->private = kvmalloc(PATH_MAX, GFP_KERNEL); if (!m->private) return ERR_PTR(-ENOMEM); memcg = mem_cgroup_iter(NULL, NULL, NULL); do { int nid; for_each_node_state(nid, N_MEMORY) { if (!nr_to_skip--) return get_lruvec(memcg, nid); } } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); return NULL; } static void lru_gen_seq_stop(struct seq_file *m, void *v) { if (!IS_ERR_OR_NULL(v)) mem_cgroup_iter_break(NULL, lruvec_memcg(v)); kvfree(m->private); m->private = NULL; } static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos) { int nid = lruvec_pgdat(v)->node_id; struct mem_cgroup *memcg = lruvec_memcg(v); ++*pos; nid = next_memory_node(nid); if (nid == MAX_NUMNODES) { memcg = mem_cgroup_iter(NULL, memcg, NULL); if (!memcg) return NULL; nid = first_memory_node; } return get_lruvec(memcg, nid); } static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq, unsigned long seq) { int i; int type, tier; int hist = lru_hist_from_seq(seq); struct lru_gen_folio *lrugen = &lruvec->lrugen; for (tier = 0; tier < MAX_NR_TIERS; tier++) { seq_printf(m, " %10d", tier); for (type = 0; type < ANON_AND_FILE; type++) { const char *s = " "; unsigned long n[3] = {}; if (seq == max_seq) { s = "RT "; n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); n[1] = READ_ONCE(lrugen->avg_total[type][tier]); } else if (seq == min_seq[type] || NR_HIST_GENS > 1) { s = "rep"; n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); if (tier) n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); } for (i = 0; i < 3; i++) seq_printf(m, " %10lu%c", n[i], s[i]); } seq_putc(m, '\n'); } seq_puts(m, " "); for (i = 0; i < NR_MM_STATS; i++) { const char *s = " "; unsigned long n = 0; if (seq == max_seq && NR_HIST_GENS == 1) { s = "LOYNFA"; n = READ_ONCE(lruvec->mm_state.stats[hist][i]); } else if (seq != max_seq && NR_HIST_GENS > 1) { s = "loynfa"; n = READ_ONCE(lruvec->mm_state.stats[hist][i]); } seq_printf(m, " %10lu%c", n, s[i]); } seq_putc(m, '\n'); } /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ static int lru_gen_seq_show(struct seq_file *m, void *v) { unsigned long seq; bool full = !debugfs_real_fops(m->file)->write; struct lruvec *lruvec = v; struct lru_gen_folio *lrugen = &lruvec->lrugen; int nid = lruvec_pgdat(lruvec)->node_id; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec); if (nid == first_memory_node) { const char *path = memcg ? m->private : ""; #ifdef CONFIG_MEMCG if (memcg) cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); #endif seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path); } seq_printf(m, " node %5d\n", nid); if (!full) seq = min_seq[LRU_GEN_ANON]; else if (max_seq >= MAX_NR_GENS) seq = max_seq - MAX_NR_GENS + 1; else seq = 0; for (; seq <= max_seq; seq++) { int type, zone; int gen = lru_gen_from_seq(seq); unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); for (type = 0; type < ANON_AND_FILE; type++) { unsigned long size = 0; char mark = full && seq < min_seq[type] ? 'x' : ' '; for (zone = 0; zone < MAX_NR_ZONES; zone++) size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); seq_printf(m, " %10lu%c", size, mark); } seq_putc(m, '\n'); if (full) lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq); } return 0; } static const struct seq_operations lru_gen_seq_ops = { .start = lru_gen_seq_start, .stop = lru_gen_seq_stop, .next = lru_gen_seq_next, .show = lru_gen_seq_show, }; static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, bool can_swap, bool force_scan) { DEFINE_MAX_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec); if (seq < max_seq) return 0; if (seq > max_seq) return -EINVAL; if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) return -ERANGE; try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan); return 0; } static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, int swappiness, unsigned long nr_to_reclaim) { DEFINE_MAX_SEQ(lruvec); if (seq + MIN_NR_GENS > max_seq) return -EINVAL; sc->nr_reclaimed = 0; while (!signal_pending(current)) { DEFINE_MIN_SEQ(lruvec); if (seq < min_seq[!swappiness]) return 0; if (sc->nr_reclaimed >= nr_to_reclaim) return 0; if (!evict_folios(lruvec, sc, swappiness)) return 0; cond_resched(); } return -EINTR; } static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, struct scan_control *sc, int swappiness, unsigned long opt) { struct lruvec *lruvec; int err = -EINVAL; struct mem_cgroup *memcg = NULL; if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY)) return -EINVAL; if (!mem_cgroup_disabled()) { rcu_read_lock(); memcg = mem_cgroup_from_id(memcg_id); if (!mem_cgroup_tryget(memcg)) memcg = NULL; rcu_read_unlock(); if (!memcg) return -EINVAL; } if (memcg_id != mem_cgroup_id(memcg)) goto done; lruvec = get_lruvec(memcg, nid); if (swappiness < 0) swappiness = get_swappiness(lruvec, sc); else if (swappiness > 200) goto done; switch (cmd) { case '+': err = run_aging(lruvec, seq, sc, swappiness, opt); break; case '-': err = run_eviction(lruvec, seq, sc, swappiness, opt); break; } done: mem_cgroup_put(memcg); return err; } /* see Documentation/admin-guide/mm/multigen_lru.rst for details */ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, size_t len, loff_t *pos) { void *buf; char *cur, *next; unsigned int flags; struct blk_plug plug; int err = -EINVAL; struct scan_control sc = { .may_writepage = true, .may_unmap = true, .may_swap = true, .reclaim_idx = MAX_NR_ZONES - 1, .gfp_mask = GFP_KERNEL, }; buf = kvmalloc(len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_user(buf, src, len)) { kvfree(buf); return -EFAULT; } set_task_reclaim_state(current, &sc.reclaim_state); flags = memalloc_noreclaim_save(); blk_start_plug(&plug); if (!set_mm_walk(NULL, true)) { err = -ENOMEM; goto done; } next = buf; next[len] = '\0'; while ((cur = strsep(&next, ",;\n"))) { int n; int end; char cmd; unsigned int memcg_id; unsigned int nid; unsigned long seq; unsigned int swappiness = -1; unsigned long opt = -1; cur = skip_spaces(cur); if (!*cur) continue; n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid, &seq, &end, &swappiness, &end, &opt, &end); if (n < 4 || cur[end]) { err = -EINVAL; break; } err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt); if (err) break; } done: clear_mm_walk(); blk_finish_plug(&plug); memalloc_noreclaim_restore(flags); set_task_reclaim_state(current, NULL); kvfree(buf); return err ? : len; } static int lru_gen_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &lru_gen_seq_ops); } static const struct file_operations lru_gen_rw_fops = { .open = lru_gen_seq_open, .read = seq_read, .write = lru_gen_seq_write, .llseek = seq_lseek, .release = seq_release, }; static const struct file_operations lru_gen_ro_fops = { .open = lru_gen_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; /****************************************************************************** * initialization ******************************************************************************/ void lru_gen_init_lruvec(struct lruvec *lruvec) { int i; int gen, type, zone; struct lru_gen_folio *lrugen = &lruvec->lrugen; lrugen->max_seq = MIN_NR_GENS + 1; lrugen->enabled = lru_gen_enabled(); for (i = 0; i <= MIN_NR_GENS + 1; i++) lrugen->timestamps[i] = jiffies; for_each_gen_type_zone(gen, type, zone) INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); lruvec->mm_state.seq = MIN_NR_GENS; } #ifdef CONFIG_MEMCG void lru_gen_init_pgdat(struct pglist_data *pgdat) { int i, j; spin_lock_init(&pgdat->memcg_lru.lock); for (i = 0; i < MEMCG_NR_GENS; i++) { for (j = 0; j < MEMCG_NR_BINS; j++) INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); } } void lru_gen_init_memcg(struct mem_cgroup *memcg) { INIT_LIST_HEAD(&memcg->mm_list.fifo); spin_lock_init(&memcg->mm_list.lock); } void lru_gen_exit_memcg(struct mem_cgroup *memcg) { int i; int nid; VM_WARN_ON_ONCE(!list_empty(&memcg->mm_list.fifo)); for_each_node(nid) { struct lruvec *lruvec = get_lruvec(memcg, nid); VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, sizeof(lruvec->lrugen.nr_pages))); lruvec->lrugen.list.next = LIST_POISON1; for (i = 0; i < NR_BLOOM_FILTERS; i++) { bitmap_free(lruvec->mm_state.filters[i]); lruvec->mm_state.filters[i] = NULL; } } } #endif /* CONFIG_MEMCG */ static int __init init_lru_gen(void) { BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS); BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS); if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) pr_err("lru_gen: failed to create sysfs group\n"); debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops); debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops); return 0; }; late_initcall(init_lru_gen); #else /* !CONFIG_LRU_GEN */ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { } static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { } static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) { } #endif /* CONFIG_LRU_GEN */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; unsigned long targets[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; bool proportional_reclaim; struct blk_plug plug; if (lru_gen_enabled() && !root_reclaim(sc)) { lru_gen_shrink_lruvec(lruvec, sc); return; } get_scan_count(lruvec, sc, nr); /* Record the original scan target for proportional adjustments later */ memcpy(targets, nr, sizeof(nr)); /* * Global reclaiming within direct reclaim at DEF_PRIORITY is a normal * event that can occur when there is little memory pressure e.g. * multiple streaming readers/writers. Hence, we do not abort scanning * when the requested number of pages are reclaimed when scanning at * DEF_PRIORITY on the assumption that the fact we are direct * reclaiming implies that kswapd is not keeping up and it is best to * do a batch of work at once. For memcg reclaim one check is made to * abort proportional reclaim if either the file or anon lru has already * dropped to zero at the first pass. */ proportional_reclaim = (!cgroup_reclaim(sc) && !current_is_kswapd() && sc->priority == DEF_PRIORITY); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { unsigned long nr_anon, nr_file, percentage; unsigned long nr_scanned; for_each_evictable_lru(lru) { if (nr[lru]) { nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); } } cond_resched(); if (nr_reclaimed < nr_to_reclaim || proportional_reclaim) continue; /* * For kswapd and memcg, reclaim at least the number of pages * requested. Ensure that the anon and file LRUs are scanned * proportionally what was requested by get_scan_count(). We * stop reclaiming one LRU and reduce the amount scanning * proportional to the original scan target. */ nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE]; nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON]; /* * It's just vindictive to attack the larger once the smaller * has gone to zero. And given the way we stop scanning the * smaller below, this makes sure that we only make one nudge * towards proportionality once we've got nr_to_reclaim. */ if (!nr_file || !nr_anon) break; if (nr_file > nr_anon) { unsigned long scan_target = targets[LRU_INACTIVE_ANON] + targets[LRU_ACTIVE_ANON] + 1; lru = LRU_BASE; percentage = nr_anon * 100 / scan_target; } else { unsigned long scan_target = targets[LRU_INACTIVE_FILE] + targets[LRU_ACTIVE_FILE] + 1; lru = LRU_FILE; percentage = nr_file * 100 / scan_target; } /* Stop scanning the smaller of the LRU */ nr[lru] = 0; nr[lru + LRU_ACTIVE] = 0; /* * Recalculate the other LRU scan count based on its original * scan target and the percentage scanning already complete */ lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); lru += LRU_ACTIVE; nr_scanned = targets[lru] - nr[lru]; nr[lru] = targets[lru] * (100 - percentage) / 100; nr[lru] -= min(nr[lru], nr_scanned); } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ if (can_age_anon_pages(lruvec_pgdat(lruvec), sc) && inactive_is_low(lruvec, LRU_INACTIVE_ANON)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); } /* Use reclaim/compaction for costly allocs or under memory pressure */ static bool in_reclaim_compaction(struct scan_control *sc) { if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && (sc->order > PAGE_ALLOC_COSTLY_ORDER || sc->priority < DEF_PRIORITY - 2)) return true; return false; } /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. should_continue_reclaim() returns * true if more pages should be reclaimed such that when the page allocator * calls try_to_compact_pages() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ static inline bool should_continue_reclaim(struct pglist_data *pgdat, unsigned long nr_reclaimed, struct scan_control *sc) { unsigned long pages_for_compaction; unsigned long inactive_lru_pages; int z; /* If not in reclaim/compaction mode, stop */ if (!in_reclaim_compaction(sc)) return false; /* * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX * number of pages that were scanned. This will return to the caller * with the risk reclaim/compaction and the resulting allocation attempt * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL * allocations through requiring that the full LRU list has been scanned * first, by assuming that zero delta of sc->nr_scanned means full LRU * scan, but that approximation was wrong, and there were corner cases * where always a non-zero amount of pages were scanned. */ if (!nr_reclaimed) return false; /* If compaction would go ahead or the allocation would succeed, stop */ for (z = 0; z <= sc->reclaim_idx; z++) { struct zone *zone = &pgdat->node_zones[z]; if (!managed_zone(zone)) continue; /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) return false; if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; } /* * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ pages_for_compaction = compact_gap(sc->order); inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); return inactive_lru_pages > pages_for_compaction; } static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) { struct mem_cgroup *target_memcg = sc->target_mem_cgroup; struct mem_cgroup *memcg; memcg = mem_cgroup_iter(target_memcg, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); unsigned long reclaimed; unsigned long scanned; /* * This loop can become CPU-bound when target memcgs * aren't eligible for reclaim - either because they * don't have any reclaimable pages, or because their * memory is explicitly protected. Avoid soft lockups. */ cond_resched(); mem_cgroup_calculate_protection(target_memcg, memcg); if (mem_cgroup_below_min(target_memcg, memcg)) { /* * Hard protection. * If there is no reclaimable memory, OOM. */ continue; } else if (mem_cgroup_below_low(target_memcg, memcg)) { /* * Soft protection. * Respect the protection only as long as * there is an unprotected supply * of reclaimable memory from other cgroups. */ if (!sc->memcg_low_reclaim) { sc->memcg_low_skipped = 1; continue; } memcg_memory_event(memcg, MEMCG_LOW); } reclaimed = sc->nr_reclaimed; scanned = sc->nr_scanned; shrink_lruvec(lruvec, sc); shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); /* Record the group's reclaim efficiency */ if (!sc->proactive) vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); } static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) { unsigned long nr_reclaimed, nr_scanned, nr_node_reclaimed; struct lruvec *target_lruvec; bool reclaimable = false; if (lru_gen_enabled() && root_reclaim(sc)) { lru_gen_shrink_node(pgdat, sc); return; } target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); again: memset(&sc->nr, 0, sizeof(sc->nr)); nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; prepare_scan_count(pgdat, sc); shrink_node_memcgs(pgdat, sc); flush_reclaim_state(sc); nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; /* Record the subtree's reclaim efficiency */ if (!sc->proactive) vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, sc->nr_scanned - nr_scanned, nr_node_reclaimed); if (nr_node_reclaimed) reclaimable = true; if (current_is_kswapd()) { /* * If reclaim is isolating dirty pages under writeback, * it implies that the long-lived page allocation rate * is exceeding the page laundering rate. Either the * global limits are not being effective at throttling * processes due to the page distribution throughout * zones or there is heavy usage of a slow backing * device. The only option is to throttle from reclaim * context which is not ideal as there is no guarantee * the dirtying process is throttled in the same way * balance_dirty_pages() manages. * * Once a node is flagged PGDAT_WRITEBACK, kswapd will * count the number of pages under pages flagged for * immediate reclaim and stall if any are encountered * in the nr_immediate check below. */ if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) set_bit(PGDAT_WRITEBACK, &pgdat->flags); /* Allow kswapd to start writing pages during reclaim.*/ if (sc->nr.unqueued_dirty == sc->nr.file_taken) set_bit(PGDAT_DIRTY, &pgdat->flags); /* * If kswapd scans pages marked for immediate * reclaim and under writeback (nr_immediate), it * implies that pages are cycling through the LRU * faster than they are written so forcibly stall * until some pages complete writeback. */ if (sc->nr.immediate) reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK); } /* * Tag a node/memcg as congested if all the dirty pages were marked * for writeback and immediate reclaim (counted in nr.congested). * * Legacy memcg will stall in page writeback so avoid forcibly * stalling in reclaim_throttle(). */ if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); if (current_is_kswapd()) set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); } /* * Stall direct reclaim for IO completions if the lruvec is * node is congested. Allow kswapd to continue until it * starts encountering unqueued dirty pages or cycling through * the LRU too quickly. */ if (!current_is_kswapd() && current_may_throttle() && !sc->hibernation_mode && (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) goto again; /* * Kswapd gives up on balancing particular nodes after too * many failures to reclaim anything from them and goes to * sleep. On reclaim progress, reset the failure counter. A * successful direct reclaim run will revive a dormant kswapd. */ if (reclaimable) pgdat->kswapd_failures = 0; } /* * Returns true if compaction should go ahead for a costly-order request, or * the allocation would already succeed without compaction. Return false if we * should reclaim first. */ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) return true; /* Compaction cannot yet proceed. Do reclaim. */ if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed * with reclaim to make a buffer of free pages available to give * compaction a reasonable chance of completing and allocating the page. * Note that we won't actually reclaim the whole buffer in one attempt * as the target watermark in should_continue_reclaim() is lower. But if * we are already above the high+gap watermark, don't reclaim at all. */ watermark = high_wmark_pages(zone) + compact_gap(sc->order); return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); } static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) { /* * If reclaim is making progress greater than 12% efficiency then * wake all the NOPROGRESS throttled tasks. */ if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { wait_queue_head_t *wqh; wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; if (waitqueue_active(wqh)) wake_up(wqh); return; } /* * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages * under writeback and marked for immediate reclaim at the tail of the * LRU. */ if (current_is_kswapd() || cgroup_reclaim(sc)) return; /* Throttle if making no progress at high prioities. */ if (sc->priority == 1 && !sc->nr_reclaimed) reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS); } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; gfp_t orig_mask; pg_data_t *last_pgdat = NULL; pg_data_t *first_pgdat = NULL; /* * If the number of buffer_heads in the machine exceeds the maximum * allowed level, force direct reclaim to scan the highmem zone as * highmem pages could be pinning lowmem pages storing buffer_heads */ orig_mask = sc->gfp_mask; if (buffer_heads_over_limit) { sc->gfp_mask |= __GFP_HIGHMEM; sc->reclaim_idx = gfp_zone(sc->gfp_mask); } for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, sc->nodemask) { /* * Take care memory controller reclaiming has small influence * to global LRU. */ if (!cgroup_reclaim(sc)) { if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL)) continue; /* * If we already have plenty of memory free for * compaction in this zone, don't free any more. * Even though compaction is invoked for any * non-zero order, only frequent costly order * reclamation is disruptive enough to become a * noticeable problem, like transparent huge * page allocations. */ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order > PAGE_ALLOC_COSTLY_ORDER && compaction_ready(zone, sc)) { sc->compaction_ready = true; continue; } /* * Shrink each node in the zonelist once. If the * zonelist is ordered by zone (not the default) then a * node may be shrunk multiple times but in that case * the user prefers lower zones being preserved. */ if (zone->zone_pgdat == last_pgdat) continue; /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and * scanned pages. This works for global memory pressure * and balancing, not for a memcg's limit. */ nr_soft_scanned = 0; nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone->zone_pgdat, sc->order, sc->gfp_mask, &nr_soft_scanned); sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_scanned += nr_soft_scanned; /* need some check for avoid more shrink_zone() */ } if (!first_pgdat) first_pgdat = zone->zone_pgdat; /* See comment about same check for global reclaim above */ if (zone->zone_pgdat == last_pgdat) continue; last_pgdat = zone->zone_pgdat; shrink_node(zone->zone_pgdat, sc); } if (first_pgdat) consider_reclaim_throttle(first_pgdat, sc); /* * Restore to original mask to avoid the impact on the caller if we * promoted it to __GFP_HIGHMEM. */ sc->gfp_mask = orig_mask; } static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) { struct lruvec *target_lruvec; unsigned long refaults; if (lru_gen_enabled()) return; target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); target_lruvec->refaults[WORKINGSET_ANON] = refaults; refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE); target_lruvec->refaults[WORKINGSET_FILE] = refaults; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick the writeback threads and take explicit * naps in the hope that some of these pages can be written. But if the * allocating task holds filesystem locks which prevent writeout this might not * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed */ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int initial_priority = sc->priority; pg_data_t *last_pgdat; struct zoneref *z; struct zone *zone; retry: delayacct_freepages_start(); if (!cgroup_reclaim(sc)) __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); do { if (!sc->proactive) vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); sc->nr_scanned = 0; shrink_zones(zonelist, sc); if (sc->nr_reclaimed >= sc->nr_to_reclaim) break; if (sc->compaction_ready) break; /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc->priority < DEF_PRIORITY - 2) sc->may_writepage = 1; } while (--sc->priority >= 0); last_pgdat = NULL; for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, sc->nodemask) { if (zone->zone_pgdat == last_pgdat) continue; last_pgdat = zone->zone_pgdat; snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); if (cgroup_reclaim(sc)) { struct lruvec *lruvec; lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, zone->zone_pgdat); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); } } delayacct_freepages_end(); if (sc->nr_reclaimed) return sc->nr_reclaimed; /* Aborted reclaim to try compaction? don't OOM, then */ if (sc->compaction_ready) return 1; /* * We make inactive:active ratio decisions based on the node's * composition of memory, but a restrictive reclaim_idx or a * memory.low cgroup setting can exempt large amounts of * memory from reclaim. Neither of which are very common, so * instead of doing costly eligibility calculations of the * entire cgroup subtree up front, we assume the estimates are * good, and retry with forcible deactivation if that fails. */ if (sc->skipped_deactivate) { sc->priority = initial_priority; sc->force_deactivate = 1; sc->skipped_deactivate = 0; goto retry; } /* Untapped cgroup reserves? Don't OOM, retry. */ if (sc->memcg_low_skipped) { sc->priority = initial_priority; sc->force_deactivate = 0; sc->memcg_low_reclaim = 1; sc->memcg_low_skipped = 0; goto retry; } return 0; } static bool allow_direct_reclaim(pg_data_t *pgdat) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; unsigned long free_pages = 0; int i; bool wmark_ok; if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) return true; for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; if (!managed_zone(zone)) continue; if (!zone_reclaimable_pages(zone)) continue; pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ if (!pfmemalloc_reserve) return true; wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); wake_up_interruptible(&pgdat->kswapd_wait); } return wmark_ok; } /* * Throttle direct reclaimers if backing storage is backed by the network * and the PFMEMALLOC reserve for the preferred node is getting dangerously * depleted. kswapd will continue to make progress and wake the processes * when the low watermark is reached. * * Returns true if a fatal signal was delivered during throttling. If this * happens, the page allocator should not consider triggering the OOM killer. */ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly * responsible for cleaning pages necessary for reclaim to make forward * progress. kjournald for example may enter direct reclaim while * committing a transaction where throttling it could forcing other * processes to block on log_wait_commit(). */ if (current->flags & PF_KTHREAD) goto out; /* * If a fatal signal is pending, this process should not throttle. * It should return quickly so it can exit and free its memory */ if (fatal_signal_pending(current)) goto out; /* * Check if the pfmemalloc reserves are ok by finding the first node * with a usable ZONE_NORMAL or lower zone. The expectation is that * GFP_KERNEL will be required for allocating network buffers when * swapping over the network so ZONE_HIGHMEM is unusable. * * Throttling is based on the first usable node and throttled processes * wait on a queue until kswapd makes progress and wakes them. There * is an affinity then between processes waking up and where reclaim * progress has been made assuming the process wakes on the same node. * More importantly, processes running on remote nodes will not compete * for remote pfmemalloc reserves and processes on different nodes * should make reasonable progress. */ for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { if (zone_idx(zone) > ZONE_NORMAL) continue; /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; if (allow_direct_reclaim(pgdat)) goto out; break; } /* If no zone was usable by the allocation flags then do not throttle */ if (!pgdat) goto out; /* Account for the throttling */ count_vm_event(PGSCAN_DIRECT_THROTTLE); /* * If the caller cannot enter the filesystem, it's possible that it * is due to the caller holding an FS lock or performing a journal * transaction in the case of a filesystem like ext[3|4]. In this case, * it is not safe to block on pfmemalloc_wait as kswapd could be * blocked waiting on the same lock. Instead, throttle for up to a * second before continuing. */ if (!(gfp_mask & __GFP_FS)) wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, allow_direct_reclaim(pgdat), HZ); else /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, allow_direct_reclaim(pgdat)); if (fatal_signal_pending(current)) return true; out: return false; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .gfp_mask = current_gfp_context(gfp_mask), .reclaim_idx = gfp_zone(gfp_mask), .order = order, .nodemask = nodemask, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = 1, }; /* * scan_control uses s8 fields for order, priority, and reclaim_idx. * Confirm they are large enough for max values. */ BUILD_BUG_ON(MAX_ORDER >= S8_MAX); BUILD_BUG_ON(DEF_PRIORITY > S8_MAX); BUILD_BUG_ON(MAX_NR_ZONES > S8_MAX); /* * Do not enter reclaim if fatal signal was delivered while throttled. * 1 is returned so that the page allocator does not OOM kill at this * point. */ if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) return 1; set_task_reclaim_state(current, &sc.reclaim_state); trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); set_task_reclaim_state(current, NULL); return nr_reclaimed; } #ifdef CONFIG_MEMCG /* Only used by soft limit reclaim. Do not reuse for anything else. */ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned) { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX, .target_mem_cgroup = memcg, .may_writepage = !laptop_mode, .may_unmap = 1, .reclaim_idx = MAX_NR_ZONES - 1, .may_swap = !noswap, }; WARN_ON_ONCE(!current->reclaim_state); sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.gfp_mask); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. * if we don't reclaim here, the shrink_node from balance_pgdat * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ shrink_lruvec(lruvec, &sc); trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options) { unsigned long nr_reclaimed; unsigned int noreclaim_flag; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .reclaim_idx = MAX_NR_ZONES - 1, .target_mem_cgroup = memcg, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !!(reclaim_options & MEMCG_RECLAIM_MAY_SWAP), .proactive = !!(reclaim_options & MEMCG_RECLAIM_PROACTIVE), }; /* * Traverse the ZONELIST_FALLBACK zonelist of the current node to put * equal pressure on all the nodes. This is based on the assumption that * the reclaim does not bail out early. */ struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); set_task_reclaim_state(current, &sc.reclaim_state); trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); memalloc_noreclaim_restore(noreclaim_flag); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); set_task_reclaim_state(current, NULL); return nr_reclaimed; } #endif static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; struct lruvec *lruvec; if (lru_gen_enabled()) { lru_gen_age_node(pgdat, sc); return; } if (!can_age_anon_pages(pgdat, sc)) return; lruvec = mem_cgroup_lruvec(NULL, pgdat); if (!inactive_is_low(lruvec, LRU_INACTIVE_ANON)) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { lruvec = mem_cgroup_lruvec(memcg, pgdat); shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); memcg = mem_cgroup_iter(NULL, memcg, NULL); } while (memcg); } static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) { int i; struct zone *zone; /* * Check for watermark boosts top-down as the higher zones * are more likely to be boosted. Both watermarks and boosts * should not be checked at the same time as reclaim would * start prematurely when there is no boosting and a lower * zone is balanced. */ for (i = highest_zoneidx; i >= 0; i--) { zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; if (zone->watermark_boost) return true; } return false; } /* * Returns true if there is an eligible zone balanced for the request order * and highest_zoneidx */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) { int i; unsigned long mark = -1; struct zone *zone; /* * Check watermarks bottom-up as lower zones are more likely to * meet watermarks. */ for (i = 0; i <= highest_zoneidx; i++) { zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) mark = wmark_pages(zone, WMARK_PROMO); else mark = high_wmark_pages(zone); if (zone_watermark_ok_safe(zone, order, mark, highest_zoneidx)) return true; } /* * If a node has no managed zone within highest_zoneidx, it does not * need balancing by definition. This can happen if a zone-restricted * allocation tries to wake a remote kswapd. */ if (mark == -1) return true; return false; } /* Clear pgdat state for congested, dirty or under writeback. */ static void clear_pgdat_congested(pg_data_t *pgdat) { struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); clear_bit(PGDAT_DIRTY, &pgdat->flags); clear_bit(PGDAT_WRITEBACK, &pgdat->flags); } /* * Prepare kswapd for sleeping. This verifies that there are no processes * waiting in throttle_direct_reclaim() and that watermarks have been met. * * Returns true if kswapd is ready to sleep */ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int highest_zoneidx) { /* * The throttled processes are normally woken up in balance_pgdat() as * soon as allow_direct_reclaim() is true. But there is a potential * race between when kswapd checks the watermarks and a process gets * throttled. There is also a potential race if processes get * throttled, kswapd wakes, a large process exits thereby balancing the * zones, which causes kswapd to exit balance_pgdat() before reaching * the wake up checks. If kswapd is going to sleep, no process should * be sleeping on pfmemalloc_wait, so wake them now if necessary. If * the wake up is premature, processes will wake kswapd and get * throttled again. The difference from wake ups in balance_pgdat() is * that here we are under prepare_to_wait(). */ if (waitqueue_active(&pgdat->pfmemalloc_wait)) wake_up_all(&pgdat->pfmemalloc_wait); /* Hopeless node, leave it to direct reclaim */ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) return true; if (pgdat_balanced(pgdat, order, highest_zoneidx)) { clear_pgdat_congested(pgdat); return true; } return false; } /* * kswapd shrinks a node of pages that are at or below the highest usable * zone that is currently unbalanced. * * Returns true if kswapd scanned at least the requested number of pages to * reclaim or if the lack of progress was due to pages under writeback. * This is used to determine if the scanning priority needs to be raised. */ static bool kswapd_shrink_node(pg_data_t *pgdat, struct scan_control *sc) { struct zone *zone; int z; /* Reclaim a number of pages proportional to the number of zones */ sc->nr_to_reclaim = 0; for (z = 0; z <= sc->reclaim_idx; z++) { zone = pgdat->node_zones + z; if (!managed_zone(zone)) continue; sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); } /* * Historically care was taken to put equal pressure on all zones but * now pressure is applied based on node LRU order. */ shrink_node(pgdat, sc); /* * Fragmentation may mean that the system cannot be rebalanced for * high-order allocations. If twice the allocation size has been * reclaimed then recheck watermarks only at order-0 to prevent * excessive reclaim. Assume that a process requested a high-order * can direct reclaim/compact. */ if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) sc->order = 0; return sc->nr_scanned >= sc->nr_to_reclaim; } /* Page allocator PCP high watermark is lowered if reclaim is active. */ static inline void update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) { int i; struct zone *zone; for (i = 0; i <= highest_zoneidx; i++) { zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; if (active) set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); else clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); } } static inline void set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) { update_reclaim_active(pgdat, highest_zoneidx, true); } static inline void clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) { update_reclaim_active(pgdat, highest_zoneidx, false); } /* * For kswapd, balance_pgdat() will reclaim pages across a node from zones * that are eligible for use by the caller until at least one zone is * balanced. * * Returns the order kswapd finished reclaiming at. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > high_wmark_pages(zone), but once a zone is * found to have free_pages <= high_wmark_pages(zone), any page in that zone * or lower is eligible for reclaim until at least one usable zone is * balanced. */ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) { int i; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; unsigned long pflags; unsigned long nr_boost_reclaim; unsigned long zone_boosts[MAX_NR_ZONES] = { 0, }; bool boosted; struct zone *zone; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .order = order, .may_unmap = 1, }; set_task_reclaim_state(current, &sc.reclaim_state); psi_memstall_enter(&pflags); __fs_reclaim_acquire(_THIS_IP_); count_vm_event(PAGEOUTRUN); /* * Account for the reclaim boost. Note that the zone boost is left in * place so that parallel allocations that are near the watermark will * stall or direct reclaim until kswapd is finished. */ nr_boost_reclaim = 0; for (i = 0; i <= highest_zoneidx; i++) { zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; nr_boost_reclaim += zone->watermark_boost; zone_boosts[i] = zone->watermark_boost; } boosted = nr_boost_reclaim; restart: set_reclaim_active(pgdat, highest_zoneidx); sc.priority = DEF_PRIORITY; do { unsigned long nr_reclaimed = sc.nr_reclaimed; bool raise_priority = true; bool balanced; bool ret; sc.reclaim_idx = highest_zoneidx; /* * If the number of buffer_heads exceeds the maximum allowed * then consider reclaiming from all zones. This has a dual * purpose -- on 64-bit systems it is expected that * buffer_heads are stripped during active rotation. On 32-bit * systems, highmem pages can pin lowmem memory and shrinking * buffers can relieve lowmem pressure. Reclaim may still not * go ahead if all eligible zones for the original allocation * request are balanced to avoid excessive reclaim from kswapd. */ if (buffer_heads_over_limit) { for (i = MAX_NR_ZONES - 1; i >= 0; i--) { zone = pgdat->node_zones + i; if (!managed_zone(zone)) continue; sc.reclaim_idx = i; break; } } /* * If the pgdat is imbalanced then ignore boosting and preserve * the watermarks for a later time and restart. Note that the * zone watermarks will be still reset at the end of balancing * on the grounds that the normal reclaim should be enough to * re-evaluate if boosting is required when kswapd next wakes. */ balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx); if (!balanced && nr_boost_reclaim) { nr_boost_reclaim = 0; goto restart; } /* * If boosting is not active then only reclaim if there are no * eligible zones. Note that sc.reclaim_idx is not used as * buffer_heads_over_limit may have adjusted it. */ if (!nr_boost_reclaim && balanced) goto out; /* Limit the priority of boosting to avoid reclaim writeback */ if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) raise_priority = false; /* * Do not writeback or swap pages for boosted reclaim. The * intent is to relieve pressure not issue sub-optimal IO * from reclaim context. If no pages are reclaimed, the * reclaim will be aborted. */ sc.may_writepage = !laptop_mode && !nr_boost_reclaim; sc.may_swap = !nr_boost_reclaim; /* * Do some background aging, to give pages a chance to be * referenced before reclaiming. All pages are rotated * regardless of classzone as this is about consistent aging. */ kswapd_age_node(pgdat, &sc); /* * If we're getting trouble reclaiming, start doing writepage * even in laptop mode. */ if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; /* Call soft limit reclaim before calling shrink_node. */ sc.nr_scanned = 0; nr_soft_scanned = 0; nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, sc.gfp_mask, &nr_soft_scanned); sc.nr_reclaimed += nr_soft_reclaimed; /* * There should be no need to raise the scanning priority if * enough pages are already being scanned that that high * watermark would be met at 100% efficiency. */ if (kswapd_shrink_node(pgdat, &sc)) raise_priority = false; /* * If the low watermark is met there is no need for processes * to be throttled on pfmemalloc_wait as they should not be * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && allow_direct_reclaim(pgdat)) wake_up_all(&pgdat->pfmemalloc_wait); /* Check if kswapd should be suspending */ __fs_reclaim_release(_THIS_IP_); ret = try_to_freeze(); __fs_reclaim_acquire(_THIS_IP_); if (ret || kthread_should_stop()) break; /* * Raise priority if scanning rate is too low or there was no * progress in reclaiming pages */ nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); /* * If reclaim made no progress for a boost, stop reclaim as * IO cannot be queued and it could be an infinite loop in * extreme circumstances. */ if (nr_boost_reclaim && !nr_reclaimed) break; if (raise_priority || !nr_reclaimed) sc.priority--; } while (sc.priority >= 1); if (!sc.nr_reclaimed) pgdat->kswapd_failures++; out: clear_reclaim_active(pgdat, highest_zoneidx); /* If reclaim was boosted, account for the reclaim done in this pass */ if (boosted) { unsigned long flags; for (i = 0; i <= highest_zoneidx; i++) { if (!zone_boosts[i]) continue; /* Increments are under the zone lock */ zone = pgdat->node_zones + i; spin_lock_irqsave(&zone->lock, flags); zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); spin_unlock_irqrestore(&zone->lock, flags); } /* * As there is now likely space, wakeup kcompact to defragment * pageblocks. */ wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx); } snapshot_refaults(NULL, pgdat); __fs_reclaim_release(_THIS_IP_); psi_memstall_leave(&pflags); set_task_reclaim_state(current, NULL); /* * Return the order kswapd stopped reclaiming at as * prepare_kswapd_sleep() takes it into account. If another caller * entered the allocator slow path while kswapd was awake, order will * remain at the higher level. */ return sc.order; } /* * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to * be reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is * not a valid index then either kswapd runs for first time or kswapd couldn't * sleep after previous reclaim attempt (node is still unbalanced). In that * case return the zone index of the previous kswapd reclaim cycle. */ static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat, enum zone_type prev_highest_zoneidx) { enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); return curr_idx == MAX_NR_ZONES ? prev_highest_zoneidx : curr_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, unsigned int highest_zoneidx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); /* * Try to sleep for a short interval. Note that kcompactd will only be * woken if it is possible to sleep for a short interval. This is * deliberate on the assumption that if reclaim cannot keep an * eligible zone balanced that it's also unlikely that compaction will * succeed. */ if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ reset_isolation_suitable(pgdat); /* * We have freed the memory, now we should compact it to make * allocation of the requested order possible. */ wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx); remaining = schedule_timeout(HZ/10); /* * If woken prematurely then reset kswapd_highest_zoneidx and * order. The values will either be from a wakeup request or * the previous request that slept prematurely. */ if (remaining) { WRITE_ONCE(pgdat->kswapd_highest_zoneidx, kswapd_highest_zoneidx(pgdat, highest_zoneidx)); if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) WRITE_ONCE(pgdat->kswapd_order, reclaim_order); } finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (!remaining && prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) { trace_mm_vmscan_kswapd_sleep(pgdat->node_id); /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); if (!kthread_should_stop()) schedule(); set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold); } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY); } finish_wait(&pgdat->kswapd_wait, &wait); } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned int alloc_order, reclaim_order; unsigned int highest_zoneidx = MAX_NR_ZONES - 1; pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_KSWAPD; set_freezable(); WRITE_ONCE(pgdat->kswapd_order, 0); WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); atomic_set(&pgdat->nr_writeback_throttled, 0); for ( ; ; ) { bool ret; alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); highest_zoneidx = kswapd_highest_zoneidx(pgdat, highest_zoneidx); kswapd_try_sleep: kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, highest_zoneidx); /* Read the new order and highest_zoneidx */ alloc_order = READ_ONCE(pgdat->kswapd_order); highest_zoneidx = kswapd_highest_zoneidx(pgdat, highest_zoneidx); WRITE_ONCE(pgdat->kswapd_order, 0); WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); ret = try_to_freeze(); if (kthread_should_stop()) break; /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (ret) continue; /* * Reclaim begins at the requested order but if a high-order * reclaim fails then kswapd falls back to reclaiming for * order-0. If that happens, kswapd will consider sleeping * for the order it finished reclaiming at (reclaim_order) * but kcompactd is woken to compact for the original * request (alloc_order). */ trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, alloc_order); reclaim_order = balance_pgdat(pgdat, alloc_order, highest_zoneidx); if (reclaim_order < alloc_order) goto kswapd_try_sleep; } tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); return 0; } /* * A zone is low on free memory or too fragmented for high-order memory. If * kswapd should reclaim (direct reclaim is deferred), wake it up for the zone's * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim * has failed or is not needed, still wake up kcompactd if only compaction is * needed. */ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type highest_zoneidx) { pg_data_t *pgdat; enum zone_type curr_idx; if (!managed_zone(zone)) return; if (!cpuset_zone_allowed(zone, gfp_flags)) return; pgdat = zone->zone_pgdat; curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); if (curr_idx == MAX_NR_ZONES || curr_idx < highest_zoneidx) WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); if (READ_ONCE(pgdat->kswapd_order) < order) WRITE_ONCE(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; /* Hopeless node, leave it to direct reclaim if possible */ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || (pgdat_balanced(pgdat, order, highest_zoneidx) && !pgdat_watermark_boosted(pgdat, highest_zoneidx))) { /* * There may be plenty of free memory available, but it's too * fragmented for high-order allocations. Wake up kcompactd * and rely on compaction_suitable() to determine if it's * needed. If it fails, it will defer subsequent attempts to * ratelimit its work. */ if (!(gfp_flags & __GFP_DIRECT_RECLAIM)) wakeup_kcompactd(pgdat, order, highest_zoneidx); return; } trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, gfp_flags); wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { struct scan_control sc = { .nr_to_reclaim = nr_to_reclaim, .gfp_mask = GFP_HIGHUSER_MOVABLE, .reclaim_idx = MAX_NR_ZONES - 1, .priority = DEF_PRIORITY, .may_writepage = 1, .may_unmap = 1, .may_swap = 1, .hibernation_mode = 1, }; struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); unsigned long nr_reclaimed; unsigned int noreclaim_flag; fs_reclaim_acquire(sc.gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); set_task_reclaim_state(current, &sc.reclaim_state); nr_reclaimed = do_try_to_free_pages(zonelist, &sc); set_task_reclaim_state(current, NULL); memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ /* * This kswapd start function will be called by init and node-hot-add. */ void __meminit kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); pgdat_kswapd_lock(pgdat); if (!pgdat->kswapd) { pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state < SYSTEM_RUNNING); pr_err("Failed to start kswapd on node %d\n", nid); pgdat->kswapd = NULL; } } pgdat_kswapd_unlock(pgdat); } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ void __meminit kswapd_stop(int nid) { pg_data_t *pgdat = NODE_DATA(nid); struct task_struct *kswapd; pgdat_kswapd_lock(pgdat); kswapd = pgdat->kswapd; if (kswapd) { kthread_stop(kswapd); pgdat->kswapd = NULL; } pgdat_kswapd_unlock(pgdat); } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Node reclaim mode * * If non-zero call node_reclaim when the number of free pages falls below * the watermarks. */ int node_reclaim_mode __read_mostly; /* * Priority for NODE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define NODE_RECLAIM_PRIORITY 4 /* * Percentage of pages in a zone that must be unmapped for node_reclaim to * occur. */ int sysctl_min_unmapped_ratio = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int sysctl_min_slab_ratio = 5; static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat) { unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED); unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) + node_page_state(pgdat, NR_ACTIVE_FILE); /* * It's possible for there to be more file mapped pages than * accounted for by the pages on the file LRU lists because * tmpfs pages accounted for as ANON can also be FILE_MAPPED */ return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; } /* Work out how many page cache pages we can reclaim in this reclaim_mode */ static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat) { unsigned long nr_pagecache_reclaimable; unsigned long delta = 0; /* * If RECLAIM_UNMAP is set, then all file pages are considered * potentially reclaimable. Otherwise, we have to worry about * pages like swapcache and node_unmapped_file_pages() provides * a better estimate */ if (node_reclaim_mode & RECLAIM_UNMAP) nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES); else nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat); /* If we can't clean pages, remove dirty pages from consideration */ if (!(node_reclaim_mode & RECLAIM_WRITE)) delta += node_page_state(pgdat, NR_FILE_DIRTY); /* Watch for any possible underflows due to delta */ if (unlikely(delta > nr_pagecache_reclaimable)) delta = nr_pagecache_reclaimable; return nr_pagecache_reclaimable - delta; } /* * Try to free up some pages from this node through reclaim. */ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; unsigned int noreclaim_flag; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = current_gfp_context(gfp_mask), .order = order, .priority = NODE_RECLAIM_PRIORITY, .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), .may_swap = 1, .reclaim_idx = gfp_zone(gfp_mask), }; unsigned long pflags; trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, sc.gfp_mask); cond_resched(); psi_memstall_enter(&pflags); fs_reclaim_acquire(sc.gfp_mask); /* * We need to be able to allocate from the reserves for RECLAIM_UNMAP */ noreclaim_flag = memalloc_noreclaim_save(); set_task_reclaim_state(p, &sc.reclaim_state); if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { /* * Free memory by calling shrink node with increasing * priorities until we have enough memory freed. */ do { shrink_node(pgdat, &sc); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } set_task_reclaim_state(p, NULL); memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(sc.gfp_mask); psi_memstall_leave(&pflags); trace_mm_vmscan_node_reclaim_end(sc.nr_reclaimed); return sc.nr_reclaimed >= nr_pages; } int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) { int ret; /* * Node reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the node is overallocated. So we do not reclaim * if less than a specified percentage of the node is used by * unmapped file backed pages. */ if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <= pgdat->min_slab_pages) return NODE_RECLAIM_FULL; /* * Do not scan if the allocation should not be delayed. */ if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) return NODE_RECLAIM_NOSCAN; /* * Only run node reclaim on the local node or on nodes that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) return NODE_RECLAIM_NOSCAN; if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); return ret; } #endif /** * check_move_unevictable_folios - Move evictable folios to appropriate zone * lru list * @fbatch: Batch of lru folios to check. * * Checks folios for evictability, if an evictable folio is in the unevictable * lru list, moves it to the appropriate evictable lru list. This function * should be only used for lru folios. */ void check_move_unevictable_folios(struct folio_batch *fbatch) { struct lruvec *lruvec = NULL; int pgscanned = 0; int pgrescued = 0; int i; for (i = 0; i < fbatch->nr; i++) { struct folio *folio = fbatch->folios[i]; int nr_pages = folio_nr_pages(folio); pgscanned += nr_pages; /* block memcg migration while the folio moves between lrus */ if (!folio_test_clear_lru(folio)) continue; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (folio_evictable(folio) && folio_test_unevictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); pgrescued += nr_pages; } folio_set_lru(folio); } if (lruvec) { __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued); __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); unlock_page_lruvec_irq(lruvec); } else if (pgscanned) { count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned); } } EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
linux-master
mm/vmscan.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/mmu_notifier.c * * Copyright (C) 2008 Qumranet, Inc. * Copyright (C) 2008 SGI * Christoph Lameter <[email protected]> */ #include <linux/rculist.h> #include <linux/mmu_notifier.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/interval_tree.h> #include <linux/srcu.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/slab.h> /* global SRCU for all MMs */ DEFINE_STATIC_SRCU(srcu); #ifdef CONFIG_LOCKDEP struct lockdep_map __mmu_notifier_invalidate_range_start_map = { .name = "mmu_notifier_invalidate_range_start" }; #endif /* * The mmu_notifier_subscriptions structure is allocated and installed in * mm->notifier_subscriptions inside the mm_take_all_locks() protected * critical section and it's released only when mm_count reaches zero * in mmdrop(). */ struct mmu_notifier_subscriptions { /* all mmu notifiers registered in this mm are queued in this list */ struct hlist_head list; bool has_itree; /* to serialize the list modifications and hlist_unhashed */ spinlock_t lock; unsigned long invalidate_seq; unsigned long active_invalidate_ranges; struct rb_root_cached itree; wait_queue_head_t wq; struct hlist_head deferred_list; }; /* * This is a collision-retry read-side/write-side 'lock', a lot like a * seqcount, however this allows multiple write-sides to hold it at * once. Conceptually the write side is protecting the values of the PTEs in * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any * writer exists. * * Note that the core mm creates nested invalidate_range_start()/end() regions * within the same thread, and runs invalidate_range_start()/end() in parallel * on multiple CPUs. This is designed to not reduce concurrency or block * progress on the mm side. * * As a secondary function, holding the full write side also serves to prevent * writers for the itree, this is an optimization to avoid extra locking * during invalidate_range_start/end notifiers. * * The write side has two states, fully excluded: * - mm->active_invalidate_ranges != 0 * - subscriptions->invalidate_seq & 1 == True (odd) * - some range on the mm_struct is being invalidated * - the itree is not allowed to change * * And partially excluded: * - mm->active_invalidate_ranges != 0 * - subscriptions->invalidate_seq & 1 == False (even) * - some range on the mm_struct is being invalidated * - the itree is allowed to change * * Operations on notifier_subscriptions->invalidate_seq (under spinlock): * seq |= 1 # Begin writing * seq++ # Release the writing state * seq & 1 # True if a writer exists * * The later state avoids some expensive work on inv_end in the common case of * no mmu_interval_notifier monitoring the VA. */ static bool mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions) { lockdep_assert_held(&subscriptions->lock); return subscriptions->invalidate_seq & 1; } static struct mmu_interval_notifier * mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, const struct mmu_notifier_range *range, unsigned long *seq) { struct interval_tree_node *node; struct mmu_interval_notifier *res = NULL; spin_lock(&subscriptions->lock); subscriptions->active_invalidate_ranges++; node = interval_tree_iter_first(&subscriptions->itree, range->start, range->end - 1); if (node) { subscriptions->invalidate_seq |= 1; res = container_of(node, struct mmu_interval_notifier, interval_tree); } *seq = subscriptions->invalidate_seq; spin_unlock(&subscriptions->lock); return res; } static struct mmu_interval_notifier * mn_itree_inv_next(struct mmu_interval_notifier *interval_sub, const struct mmu_notifier_range *range) { struct interval_tree_node *node; node = interval_tree_iter_next(&interval_sub->interval_tree, range->start, range->end - 1); if (!node) return NULL; return container_of(node, struct mmu_interval_notifier, interval_tree); } static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) { struct mmu_interval_notifier *interval_sub; struct hlist_node *next; spin_lock(&subscriptions->lock); if (--subscriptions->active_invalidate_ranges || !mn_itree_is_invalidating(subscriptions)) { spin_unlock(&subscriptions->lock); return; } /* Make invalidate_seq even */ subscriptions->invalidate_seq++; /* * The inv_end incorporates a deferred mechanism like rtnl_unlock(). * Adds and removes are queued until the final inv_end happens then * they are progressed. This arrangement for tree updates is used to * avoid using a blocking lock during invalidate_range_start. */ hlist_for_each_entry_safe(interval_sub, next, &subscriptions->deferred_list, deferred_item) { if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) interval_tree_insert(&interval_sub->interval_tree, &subscriptions->itree); else interval_tree_remove(&interval_sub->interval_tree, &subscriptions->itree); hlist_del(&interval_sub->deferred_item); } spin_unlock(&subscriptions->lock); wake_up_all(&subscriptions->wq); } /** * mmu_interval_read_begin - Begin a read side critical section against a VA * range * @interval_sub: The interval subscription * * mmu_iterval_read_begin()/mmu_iterval_read_retry() implement a * collision-retry scheme similar to seqcount for the VA range under * subscription. If the mm invokes invalidation during the critical section * then mmu_interval_read_retry() will return true. * * This is useful to obtain shadow PTEs where teardown or setup of the SPTEs * require a blocking context. The critical region formed by this can sleep, * and the required 'user_lock' can also be a sleeping lock. * * The caller is required to provide a 'user_lock' to serialize both teardown * and setup. * * The return value should be passed to mmu_interval_read_retry(). */ unsigned long mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub) { struct mmu_notifier_subscriptions *subscriptions = interval_sub->mm->notifier_subscriptions; unsigned long seq; bool is_invalidating; /* * If the subscription has a different seq value under the user_lock * than we started with then it has collided. * * If the subscription currently has the same seq value as the * subscriptions seq, then it is currently between * invalidate_start/end and is colliding. * * The locking looks broadly like this: * mn_itree_inv_start(): mmu_interval_read_begin(): * spin_lock * seq = READ_ONCE(interval_sub->invalidate_seq); * seq == subs->invalidate_seq * spin_unlock * spin_lock * seq = ++subscriptions->invalidate_seq * spin_unlock * op->invalidate(): * user_lock * mmu_interval_set_seq() * interval_sub->invalidate_seq = seq * user_unlock * * [Required: mmu_interval_read_retry() == true] * * mn_itree_inv_end(): * spin_lock * seq = ++subscriptions->invalidate_seq * spin_unlock * * user_lock * mmu_interval_read_retry(): * interval_sub->invalidate_seq != seq * user_unlock * * Barriers are not needed here as any races here are closed by an * eventual mmu_interval_read_retry(), which provides a barrier via the * user_lock. */ spin_lock(&subscriptions->lock); /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */ seq = READ_ONCE(interval_sub->invalidate_seq); is_invalidating = seq == subscriptions->invalidate_seq; spin_unlock(&subscriptions->lock); /* * interval_sub->invalidate_seq must always be set to an odd value via * mmu_interval_set_seq() using the provided cur_seq from * mn_itree_inv_start_range(). This ensures that if seq does wrap we * will always clear the below sleep in some reasonable time as * subscriptions->invalidate_seq is even in the idle state. */ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); lock_map_release(&__mmu_notifier_invalidate_range_start_map); if (is_invalidating) wait_event(subscriptions->wq, READ_ONCE(subscriptions->invalidate_seq) != seq); /* * Notice that mmu_interval_read_retry() can already be true at this * point, avoiding loops here allows the caller to provide a global * time bound. */ return seq; } EXPORT_SYMBOL_GPL(mmu_interval_read_begin); static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, struct mm_struct *mm) { struct mmu_notifier_range range = { .flags = MMU_NOTIFIER_RANGE_BLOCKABLE, .event = MMU_NOTIFY_RELEASE, .mm = mm, .start = 0, .end = ULONG_MAX, }; struct mmu_interval_notifier *interval_sub; unsigned long cur_seq; bool ret; for (interval_sub = mn_itree_inv_start_range(subscriptions, &range, &cur_seq); interval_sub; interval_sub = mn_itree_inv_next(interval_sub, &range)) { ret = interval_sub->ops->invalidate(interval_sub, &range, cur_seq); WARN_ON(!ret); } mn_itree_inv_end(subscriptions); } /* * This function can't run concurrently against mmu_notifier_register * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap * runs with mm_users == 0. Other tasks may still invoke mmu notifiers * in parallel despite there being no task using this mm any more, * through the vmas outside of the exit_mmap context, such as with * vmtruncate. This serializes against mmu_notifier_unregister with * the notifier_subscriptions->lock in addition to SRCU and it serializes * against the other mmu notifiers with SRCU. struct mmu_notifier_subscriptions * can't go away from under us as exit_mmap holds an mm_count pin * itself. */ static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, struct mm_struct *mm) { struct mmu_notifier *subscription; int id; /* * SRCU here will block mmu_notifier_unregister until * ->release returns. */ id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, srcu_read_lock_held(&srcu)) /* * If ->release runs before mmu_notifier_unregister it must be * handled, as it's the only way for the driver to flush all * existing sptes and stop the driver from establishing any more * sptes before all the pages in the mm are freed. */ if (subscription->ops->release) subscription->ops->release(subscription, mm); spin_lock(&subscriptions->lock); while (unlikely(!hlist_empty(&subscriptions->list))) { subscription = hlist_entry(subscriptions->list.first, struct mmu_notifier, hlist); /* * We arrived before mmu_notifier_unregister so * mmu_notifier_unregister will do nothing other than to wait * for ->release to finish and for mmu_notifier_unregister to * return. */ hlist_del_init_rcu(&subscription->hlist); } spin_unlock(&subscriptions->lock); srcu_read_unlock(&srcu, id); /* * synchronize_srcu here prevents mmu_notifier_release from returning to * exit_mmap (which would proceed with freeing all pages in the mm) * until the ->release method returns, if it was invoked by * mmu_notifier_unregister. * * The notifier_subscriptions can't go away from under us because * one mm_count is held by exit_mmap. */ synchronize_srcu(&srcu); } void __mmu_notifier_release(struct mm_struct *mm) { struct mmu_notifier_subscriptions *subscriptions = mm->notifier_subscriptions; if (subscriptions->has_itree) mn_itree_release(subscriptions, mm); if (!hlist_empty(&subscriptions->list)) mn_hlist_release(subscriptions, mm); } /* * If no young bitflag is supported by the hardware, ->clear_flush_young can * unmap the address and return 1 or 0 depending if the mapping previously * existed or not. */ int __mmu_notifier_clear_flush_young(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *subscription; int young = 0, id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->clear_flush_young) young |= subscription->ops->clear_flush_young( subscription, mm, start, end); } srcu_read_unlock(&srcu, id); return young; } int __mmu_notifier_clear_young(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *subscription; int young = 0, id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->clear_young) young |= subscription->ops->clear_young(subscription, mm, start, end); } srcu_read_unlock(&srcu, id); return young; } int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) { struct mmu_notifier *subscription; int young = 0, id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->test_young) { young = subscription->ops->test_young(subscription, mm, address); if (young) break; } } srcu_read_unlock(&srcu, id); return young; } void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) { struct mmu_notifier *subscription; int id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->change_pte) subscription->ops->change_pte(subscription, mm, address, pte); } srcu_read_unlock(&srcu, id); } static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, const struct mmu_notifier_range *range) { struct mmu_interval_notifier *interval_sub; unsigned long cur_seq; for (interval_sub = mn_itree_inv_start_range(subscriptions, range, &cur_seq); interval_sub; interval_sub = mn_itree_inv_next(interval_sub, range)) { bool ret; ret = interval_sub->ops->invalidate(interval_sub, range, cur_seq); if (!ret) { if (WARN_ON(mmu_notifier_range_blockable(range))) continue; goto out_would_block; } } return 0; out_would_block: /* * On -EAGAIN the non-blocking caller is not allowed to call * invalidate_range_end() */ mn_itree_inv_end(subscriptions); return -EAGAIN; } static int mn_hlist_invalidate_range_start( struct mmu_notifier_subscriptions *subscriptions, struct mmu_notifier_range *range) { struct mmu_notifier *subscription; int ret = 0; int id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { const struct mmu_notifier_ops *ops = subscription->ops; if (ops->invalidate_range_start) { int _ret; if (!mmu_notifier_range_blockable(range)) non_block_start(); _ret = ops->invalidate_range_start(subscription, range); if (!mmu_notifier_range_blockable(range)) non_block_end(); if (_ret) { pr_info("%pS callback failed with %d in %sblockable context.\n", ops->invalidate_range_start, _ret, !mmu_notifier_range_blockable(range) ? "non-" : ""); WARN_ON(mmu_notifier_range_blockable(range) || _ret != -EAGAIN); /* * We call all the notifiers on any EAGAIN, * there is no way for a notifier to know if * its start method failed, thus a start that * does EAGAIN can't also do end. */ WARN_ON(ops->invalidate_range_end); ret = _ret; } } } if (ret) { /* * Must be non-blocking to get here. If there are multiple * notifiers and one or more failed start, any that succeeded * start are expecting their end to be called. Do so now. */ hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (!subscription->ops->invalidate_range_end) continue; subscription->ops->invalidate_range_end(subscription, range); } } srcu_read_unlock(&srcu, id); return ret; } int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { struct mmu_notifier_subscriptions *subscriptions = range->mm->notifier_subscriptions; int ret; if (subscriptions->has_itree) { ret = mn_itree_invalidate(subscriptions, range); if (ret) return ret; } if (!hlist_empty(&subscriptions->list)) return mn_hlist_invalidate_range_start(subscriptions, range); return 0; } static void mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, struct mmu_notifier_range *range) { struct mmu_notifier *subscription; int id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->invalidate_range_end) { if (!mmu_notifier_range_blockable(range)) non_block_start(); subscription->ops->invalidate_range_end(subscription, range); if (!mmu_notifier_range_blockable(range)) non_block_end(); } } srcu_read_unlock(&srcu, id); } void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) { struct mmu_notifier_subscriptions *subscriptions = range->mm->notifier_subscriptions; lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); if (subscriptions->has_itree) mn_itree_inv_end(subscriptions); if (!hlist_empty(&subscriptions->list)) mn_hlist_invalidate_end(subscriptions, range); lock_map_release(&__mmu_notifier_invalidate_range_start_map); } void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, unsigned long start, unsigned long end) { struct mmu_notifier *subscription; int id; id = srcu_read_lock(&srcu); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, srcu_read_lock_held(&srcu)) { if (subscription->ops->arch_invalidate_secondary_tlbs) subscription->ops->arch_invalidate_secondary_tlbs( subscription, mm, start, end); } srcu_read_unlock(&srcu, id); } /* * Same as mmu_notifier_register but here the caller must hold the mmap_lock in * write mode. A NULL mn signals the notifier is being registered for itree * mode. */ int __mmu_notifier_register(struct mmu_notifier *subscription, struct mm_struct *mm) { struct mmu_notifier_subscriptions *subscriptions = NULL; int ret; mmap_assert_write_locked(mm); BUG_ON(atomic_read(&mm->mm_users) <= 0); /* * Subsystems should only register for invalidate_secondary_tlbs() or * invalidate_range_start()/end() callbacks, not both. */ if (WARN_ON_ONCE(subscription && (subscription->ops->arch_invalidate_secondary_tlbs && (subscription->ops->invalidate_range_start || subscription->ops->invalidate_range_end)))) return -EINVAL; if (!mm->notifier_subscriptions) { /* * kmalloc cannot be called under mm_take_all_locks(), but we * know that mm->notifier_subscriptions can't change while we * hold the write side of the mmap_lock. */ subscriptions = kzalloc( sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL); if (!subscriptions) return -ENOMEM; INIT_HLIST_HEAD(&subscriptions->list); spin_lock_init(&subscriptions->lock); subscriptions->invalidate_seq = 2; subscriptions->itree = RB_ROOT_CACHED; init_waitqueue_head(&subscriptions->wq); INIT_HLIST_HEAD(&subscriptions->deferred_list); } ret = mm_take_all_locks(mm); if (unlikely(ret)) goto out_clean; /* * Serialize the update against mmu_notifier_unregister. A * side note: mmu_notifier_release can't run concurrently with * us because we hold the mm_users pin (either implicitly as * current->mm or explicitly with get_task_mm() or similar). * We can't race against any other mmu notifier method either * thanks to mm_take_all_locks(). * * release semantics on the initialization of the * mmu_notifier_subscriptions's contents are provided for unlocked * readers. acquire can only be used while holding the mmgrab or * mmget, and is safe because once created the * mmu_notifier_subscriptions is not freed until the mm is destroyed. * As above, users holding the mmap_lock or one of the * mm_take_all_locks() do not need to use acquire semantics. */ if (subscriptions) smp_store_release(&mm->notifier_subscriptions, subscriptions); if (subscription) { /* Pairs with the mmdrop in mmu_notifier_unregister_* */ mmgrab(mm); subscription->mm = mm; subscription->users = 1; spin_lock(&mm->notifier_subscriptions->lock); hlist_add_head_rcu(&subscription->hlist, &mm->notifier_subscriptions->list); spin_unlock(&mm->notifier_subscriptions->lock); } else mm->notifier_subscriptions->has_itree = true; mm_drop_all_locks(mm); BUG_ON(atomic_read(&mm->mm_users) <= 0); return 0; out_clean: kfree(subscriptions); return ret; } EXPORT_SYMBOL_GPL(__mmu_notifier_register); /** * mmu_notifier_register - Register a notifier on a mm * @subscription: The notifier to attach * @mm: The mm to attach the notifier to * * Must not hold mmap_lock nor any other VM related lock when calling * this registration function. Must also ensure mm_users can't go down * to zero while this runs to avoid races with mmu_notifier_release, * so mm has to be current->mm or the mm should be pinned safely such * as with get_task_mm(). If the mm is not current->mm, the mm_users * pin should be released by calling mmput after mmu_notifier_register * returns. * * mmu_notifier_unregister() or mmu_notifier_put() must be always called to * unregister the notifier. * * While the caller has a mmu_notifier get the subscription->mm pointer will remain * valid, and can be converted to an active mm pointer via mmget_not_zero(). */ int mmu_notifier_register(struct mmu_notifier *subscription, struct mm_struct *mm) { int ret; mmap_write_lock(mm); ret = __mmu_notifier_register(subscription, mm); mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(mmu_notifier_register); static struct mmu_notifier * find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops) { struct mmu_notifier *subscription; spin_lock(&mm->notifier_subscriptions->lock); hlist_for_each_entry_rcu(subscription, &mm->notifier_subscriptions->list, hlist, lockdep_is_held(&mm->notifier_subscriptions->lock)) { if (subscription->ops != ops) continue; if (likely(subscription->users != UINT_MAX)) subscription->users++; else subscription = ERR_PTR(-EOVERFLOW); spin_unlock(&mm->notifier_subscriptions->lock); return subscription; } spin_unlock(&mm->notifier_subscriptions->lock); return NULL; } /** * mmu_notifier_get_locked - Return the single struct mmu_notifier for * the mm & ops * @ops: The operations struct being subscribe with * @mm : The mm to attach notifiers too * * This function either allocates a new mmu_notifier via * ops->alloc_notifier(), or returns an already existing notifier on the * list. The value of the ops pointer is used to determine when two notifiers * are the same. * * Each call to mmu_notifier_get() must be paired with a call to * mmu_notifier_put(). The caller must hold the write side of mm->mmap_lock. * * While the caller has a mmu_notifier get the mm pointer will remain valid, * and can be converted to an active mm pointer via mmget_not_zero(). */ struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops, struct mm_struct *mm) { struct mmu_notifier *subscription; int ret; mmap_assert_write_locked(mm); if (mm->notifier_subscriptions) { subscription = find_get_mmu_notifier(mm, ops); if (subscription) return subscription; } subscription = ops->alloc_notifier(mm); if (IS_ERR(subscription)) return subscription; subscription->ops = ops; ret = __mmu_notifier_register(subscription, mm); if (ret) goto out_free; return subscription; out_free: subscription->ops->free_notifier(subscription); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(mmu_notifier_get_locked); /* this is called after the last mmu_notifier_unregister() returned */ void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm) { BUG_ON(!hlist_empty(&mm->notifier_subscriptions->list)); kfree(mm->notifier_subscriptions); mm->notifier_subscriptions = LIST_POISON1; /* debug */ } /* * This releases the mm_count pin automatically and frees the mm * structure if it was the last user of it. It serializes against * running mmu notifiers with SRCU and against mmu_notifier_unregister * with the unregister lock + SRCU. All sptes must be dropped before * calling mmu_notifier_unregister. ->release or any other notifier * method may be invoked concurrently with mmu_notifier_unregister, * and only after mmu_notifier_unregister returned we're guaranteed * that ->release or any other method can't run anymore. */ void mmu_notifier_unregister(struct mmu_notifier *subscription, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); if (!hlist_unhashed(&subscription->hlist)) { /* * SRCU here will force exit_mmap to wait for ->release to * finish before freeing the pages. */ int id; id = srcu_read_lock(&srcu); /* * exit_mmap will block in mmu_notifier_release to guarantee * that ->release is called before freeing the pages. */ if (subscription->ops->release) subscription->ops->release(subscription, mm); srcu_read_unlock(&srcu, id); spin_lock(&mm->notifier_subscriptions->lock); /* * Can not use list_del_rcu() since __mmu_notifier_release * can delete it before we hold the lock. */ hlist_del_init_rcu(&subscription->hlist); spin_unlock(&mm->notifier_subscriptions->lock); } /* * Wait for any running method to finish, of course including * ->release if it was run by mmu_notifier_release instead of us. */ synchronize_srcu(&srcu); BUG_ON(atomic_read(&mm->mm_count) <= 0); mmdrop(mm); } EXPORT_SYMBOL_GPL(mmu_notifier_unregister); static void mmu_notifier_free_rcu(struct rcu_head *rcu) { struct mmu_notifier *subscription = container_of(rcu, struct mmu_notifier, rcu); struct mm_struct *mm = subscription->mm; subscription->ops->free_notifier(subscription); /* Pairs with the get in __mmu_notifier_register() */ mmdrop(mm); } /** * mmu_notifier_put - Release the reference on the notifier * @subscription: The notifier to act on * * This function must be paired with each mmu_notifier_get(), it releases the * reference obtained by the get. If this is the last reference then process * to free the notifier will be run asynchronously. * * Unlike mmu_notifier_unregister() the get/put flow only calls ops->release * when the mm_struct is destroyed. Instead free_notifier is always called to * release any resources held by the user. * * As ops->release is not guaranteed to be called, the user must ensure that * all sptes are dropped, and no new sptes can be established before * mmu_notifier_put() is called. * * This function can be called from the ops->release callback, however the * caller must still ensure it is called pairwise with mmu_notifier_get(). * * Modules calling this function must call mmu_notifier_synchronize() in * their __exit functions to ensure the async work is completed. */ void mmu_notifier_put(struct mmu_notifier *subscription) { struct mm_struct *mm = subscription->mm; spin_lock(&mm->notifier_subscriptions->lock); if (WARN_ON(!subscription->users) || --subscription->users) goto out_unlock; hlist_del_init_rcu(&subscription->hlist); spin_unlock(&mm->notifier_subscriptions->lock); call_srcu(&srcu, &subscription->rcu, mmu_notifier_free_rcu); return; out_unlock: spin_unlock(&mm->notifier_subscriptions->lock); } EXPORT_SYMBOL_GPL(mmu_notifier_put); static int __mmu_interval_notifier_insert( struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, struct mmu_notifier_subscriptions *subscriptions, unsigned long start, unsigned long length, const struct mmu_interval_notifier_ops *ops) { interval_sub->mm = mm; interval_sub->ops = ops; RB_CLEAR_NODE(&interval_sub->interval_tree.rb); interval_sub->interval_tree.start = start; /* * Note that the representation of the intervals in the interval tree * considers the ending point as contained in the interval. */ if (length == 0 || check_add_overflow(start, length - 1, &interval_sub->interval_tree.last)) return -EOVERFLOW; /* Must call with a mmget() held */ if (WARN_ON(atomic_read(&mm->mm_users) <= 0)) return -EINVAL; /* pairs with mmdrop in mmu_interval_notifier_remove() */ mmgrab(mm); /* * If some invalidate_range_start/end region is going on in parallel * we don't know what VA ranges are affected, so we must assume this * new range is included. * * If the itree is invalidating then we are not allowed to change * it. Retrying until invalidation is done is tricky due to the * possibility for live lock, instead defer the add to * mn_itree_inv_end() so this algorithm is deterministic. * * In all cases the value for the interval_sub->invalidate_seq should be * odd, see mmu_interval_read_begin() */ spin_lock(&subscriptions->lock); if (subscriptions->active_invalidate_ranges) { if (mn_itree_is_invalidating(subscriptions)) hlist_add_head(&interval_sub->deferred_item, &subscriptions->deferred_list); else { subscriptions->invalidate_seq |= 1; interval_tree_insert(&interval_sub->interval_tree, &subscriptions->itree); } interval_sub->invalidate_seq = subscriptions->invalidate_seq; } else { WARN_ON(mn_itree_is_invalidating(subscriptions)); /* * The starting seq for a subscription not under invalidation * should be odd, not equal to the current invalidate_seq and * invalidate_seq should not 'wrap' to the new seq any time * soon. */ interval_sub->invalidate_seq = subscriptions->invalidate_seq - 1; interval_tree_insert(&interval_sub->interval_tree, &subscriptions->itree); } spin_unlock(&subscriptions->lock); return 0; } /** * mmu_interval_notifier_insert - Insert an interval notifier * @interval_sub: Interval subscription to register * @start: Starting virtual address to monitor * @length: Length of the range to monitor * @mm: mm_struct to attach to * @ops: Interval notifier operations to be called on matching events * * This function subscribes the interval notifier for notifications from the * mm. Upon return the ops related to mmu_interval_notifier will be called * whenever an event that intersects with the given range occurs. * * Upon return the range_notifier may not be present in the interval tree yet. * The caller must use the normal interval notifier read flow via * mmu_interval_read_begin() to establish SPTEs for this range. */ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, unsigned long start, unsigned long length, const struct mmu_interval_notifier_ops *ops) { struct mmu_notifier_subscriptions *subscriptions; int ret; might_lock(&mm->mmap_lock); subscriptions = smp_load_acquire(&mm->notifier_subscriptions); if (!subscriptions || !subscriptions->has_itree) { ret = mmu_notifier_register(NULL, mm); if (ret) return ret; subscriptions = mm->notifier_subscriptions; } return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, start, length, ops); } EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert); int mmu_interval_notifier_insert_locked( struct mmu_interval_notifier *interval_sub, struct mm_struct *mm, unsigned long start, unsigned long length, const struct mmu_interval_notifier_ops *ops) { struct mmu_notifier_subscriptions *subscriptions = mm->notifier_subscriptions; int ret; mmap_assert_write_locked(mm); if (!subscriptions || !subscriptions->has_itree) { ret = __mmu_notifier_register(NULL, mm); if (ret) return ret; subscriptions = mm->notifier_subscriptions; } return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, start, length, ops); } EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked); static bool mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, unsigned long seq) { bool ret; spin_lock(&subscriptions->lock); ret = subscriptions->invalidate_seq != seq; spin_unlock(&subscriptions->lock); return ret; } /** * mmu_interval_notifier_remove - Remove a interval notifier * @interval_sub: Interval subscription to unregister * * This function must be paired with mmu_interval_notifier_insert(). It cannot * be called from any ops callback. * * Once this returns ops callbacks are no longer running on other CPUs and * will not be called in future. */ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub) { struct mm_struct *mm = interval_sub->mm; struct mmu_notifier_subscriptions *subscriptions = mm->notifier_subscriptions; unsigned long seq = 0; might_sleep(); spin_lock(&subscriptions->lock); if (mn_itree_is_invalidating(subscriptions)) { /* * remove is being called after insert put this on the * deferred list, but before the deferred list was processed. */ if (RB_EMPTY_NODE(&interval_sub->interval_tree.rb)) { hlist_del(&interval_sub->deferred_item); } else { hlist_add_head(&interval_sub->deferred_item, &subscriptions->deferred_list); seq = subscriptions->invalidate_seq; } } else { WARN_ON(RB_EMPTY_NODE(&interval_sub->interval_tree.rb)); interval_tree_remove(&interval_sub->interval_tree, &subscriptions->itree); } spin_unlock(&subscriptions->lock); /* * The possible sleep on progress in the invalidation requires the * caller not hold any locks held by invalidation callbacks. */ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); lock_map_release(&__mmu_notifier_invalidate_range_start_map); if (seq) wait_event(subscriptions->wq, mmu_interval_seq_released(subscriptions, seq)); /* pairs with mmgrab in mmu_interval_notifier_insert() */ mmdrop(mm); } EXPORT_SYMBOL_GPL(mmu_interval_notifier_remove); /** * mmu_notifier_synchronize - Ensure all mmu_notifiers are freed * * This function ensures that all outstanding async SRU work from * mmu_notifier_put() is completed. After it returns any mmu_notifier_ops * associated with an unused mmu_notifier will no longer be called. * * Before using the caller must ensure that all of its mmu_notifiers have been * fully released via mmu_notifier_put(). * * Modules using the mmu_notifier_put() API should call this in their __exit * function to avoid module unloading races. */ void mmu_notifier_synchronize(void) { synchronize_srcu(&srcu); } EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
linux-master
mm/mmu_notifier.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/rmap.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/swapops.h> #include "internal.h" static inline bool not_found(struct page_vma_mapped_walk *pvmw) { page_vma_mapped_walk_done(pvmw); return false; } static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { pte_t ptent; if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, pvmw->address, &pvmw->ptl); *ptlp = pvmw->ptl; return !!pvmw->pte; } /* * It is important to return the ptl corresponding to pte, * in case *pvmw->pmd changes underneath us; so we need to * return it even when choosing not to lock, in case caller * proceeds to loop over next ptes, and finds a match later. * Though, in most cases, page lock already protects this. */ pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, pvmw->address, ptlp); if (!pvmw->pte) return false; ptent = ptep_get(pvmw->pte); if (pvmw->flags & PVMW_MIGRATION) { if (!is_swap_pte(ptent)) return false; } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* * Handle un-addressable ZONE_DEVICE memory. * * We get here when we are trying to unmap a private * device page from the process address space. Such * page is not CPU accessible and thus is mapped as * a special swap entry, nonetheless it still does * count as a valid regular mapping for the page * (and is accounted as such in page maps count). * * So handle this special case as if it was a normal * page mapping ie lock CPU page table and return true. * * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; } else if (!pte_present(ptent)) { return false; } pvmw->ptl = *ptlp; spin_lock(pvmw->ptl); return true; } /** * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is * mapped at the @pvmw->pte * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range * for checking * * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. * * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to * arbitrary page. * * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * Otherwise, return false. * */ static bool check_pte(struct page_vma_mapped_walk *pvmw) { unsigned long pfn; pte_t ptent = ptep_get(pvmw->pte); if (pvmw->flags & PVMW_MIGRATION) { swp_entry_t entry; if (!is_swap_pte(ptent)) return false; entry = pte_to_swp_entry(ptent); if (!is_migration_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* Handle un-addressable ZONE_DEVICE memory */ entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); } else { if (!pte_present(ptent)) return false; pfn = pte_pfn(ptent); } return (pfn - pvmw->pfn) < pvmw->nr_pages; } /* Returns true if the two ranges overlap. Careful to not overflow. */ static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) { if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) return false; if (pfn > pvmw->pfn + pvmw->nr_pages - 1) return false; return true; } static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) { pvmw->address = (pvmw->address + size) & ~(size - 1); if (!pvmw->address) pvmw->address = ULONG_MAX; } /** * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. * * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is * adjusted if needed (for PTE-mapped THPs). * * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in * a loop to find all PTEs that map the THP. * * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * * Returns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, * use page_vma_mapped_walk_done(). It will do the housekeeping. */ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) { struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long end; spinlock_t *ptl; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t pmde; /* The only possible pmd mapping has been handled on last iteration */ if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); if (unlikely(is_vm_hugetlb_page(vma))) { struct hstate *hstate = hstate_vma(vma); unsigned long size = huge_page_size(hstate); /* The only possible mapping was handled on last iteration */ if (pvmw->pte) return not_found(pvmw); /* * All callers that get here will already hold the * i_mmap_rwsem. Therefore, no additional locks need to be * taken before calling hugetlb_walk(). */ pvmw->pte = hugetlb_walk(vma, pvmw->address, size); if (!pvmw->pte) return false; pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); if (!check_pte(pvmw)) return not_found(pvmw); return true; } end = vma_address_end(pvmw); if (pvmw->pte) goto next_pte; restart: do { pgd = pgd_offset(mm, pvmw->address); if (!pgd_present(*pgd)) { step_forward(pvmw, PGDIR_SIZE); continue; } p4d = p4d_offset(pgd, pvmw->address); if (!p4d_present(*p4d)) { step_forward(pvmw, P4D_SIZE); continue; } pud = pud_offset(p4d, pvmw->address); if (!pud_present(*pud)) { step_forward(pvmw, PUD_SIZE); continue; } pvmw->pmd = pmd_offset(pud, pvmw->address); /* * Make sure the pmd value isn't cached in a register by the * compiler and used as a stale value after we've observed a * subsequent update. */ pmde = pmdp_get_lockless(pvmw->pmd); if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || (pmd_present(pmde) && pmd_devmap(pmde))) { pvmw->ptl = pmd_lock(mm, pvmw->pmd); pmde = *pvmw->pmd; if (!pmd_present(pmde)) { swp_entry_t entry; if (!thp_migration_supported() || !(pvmw->flags & PVMW_MIGRATION)) return not_found(pvmw); entry = pmd_to_swp_entry(pmde); if (!is_migration_entry(entry) || !check_pmd(swp_offset_pfn(entry), pvmw)) return not_found(pvmw); return true; } if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); if (!check_pmd(pmd_pfn(pmde), pvmw)) return not_found(pvmw); return true; } /* THP pmd was split under us: handle on pte level */ spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } else if (!pmd_present(pmde)) { /* * If PVMW_SYNC, take and drop THP pmd lock so that we * cannot return prematurely, while zap_huge_pmd() has * cleared *pmd but not decremented compound_mapcount(). */ if ((pvmw->flags & PVMW_SYNC) && transhuge_vma_suitable(vma, pvmw->address) && (pvmw->nr_pages >= HPAGE_PMD_NR)) { spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); spin_unlock(ptl); } step_forward(pvmw, PMD_SIZE); continue; } if (!map_pte(pvmw, &ptl)) { if (!pvmw->pte) goto restart; goto next_pte; } this_pte: if (check_pte(pvmw)) return true; next_pte: do { pvmw->address += PAGE_SIZE; if (pvmw->address >= end) return not_found(pvmw); /* Did we cross page table boundary? */ if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) { if (pvmw->ptl) { spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } pte_unmap(pvmw->pte); pvmw->pte = NULL; goto restart; } pvmw->pte++; } while (pte_none(ptep_get(pvmw->pte))); if (!pvmw->ptl) { pvmw->ptl = ptl; spin_lock(pvmw->ptl); } goto this_pte; } while (pvmw->address < end); return false; } /** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Returns 1 if the page is mapped into the page tables of the VMA, 0 * if the page is not mapped into the page tables of this VMA. Only * valid for normal file or anonymous VMAs. */ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .pfn = page_to_pfn(page), .nr_pages = 1, .vma = vma, .flags = PVMW_SYNC, }; pvmw.address = vma_address(page, vma); if (pvmw.address == -EFAULT) return 0; if (!page_vma_mapped_walk(&pvmw)) return 0; page_vma_mapped_walk_done(&pvmw); return 1; }
linux-master
mm/page_vma_mapped.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm_init.c - Memory initialisation verification and debugging * * Copyright 2008 IBM Corporation, 2008 * Author Mel Gorman <[email protected]> * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/kobject.h> #include <linux/export.h> #include <linux/memory.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/mman.h> #include <linux/memblock.h> #include <linux/page-isolation.h> #include <linux/padata.h> #include <linux/nmi.h> #include <linux/buffer_head.h> #include <linux/kmemleak.h> #include <linux/kfence.h> #include <linux/page_ext.h> #include <linux/pti.h> #include <linux/pgtable.h> #include <linux/swap.h> #include <linux/cma.h> #include "internal.h" #include "slab.h" #include "shuffle.h" #include <asm/setup.h> #ifdef CONFIG_DEBUG_MEMORY_INIT int __meminitdata mminit_loglevel; /* The zonelists are simply reported, validation is manual. */ void __init mminit_verify_zonelist(void) { int nid; if (mminit_loglevel < MMINIT_VERIFY) return; for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); struct zone *zone; struct zoneref *z; struct zonelist *zonelist; int i, listid, zoneid; BUILD_BUG_ON(MAX_ZONELISTS > 2); for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { /* Identify the zone and nodelist */ zoneid = i % MAX_NR_ZONES; listid = i / MAX_NR_ZONES; zonelist = &pgdat->node_zonelists[listid]; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; /* Print information about the zonelist */ printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", listid > 0 ? "thisnode" : "general", nid, zone->name); /* Iterate the zonelist */ for_each_zone_zonelist(zone, z, zonelist, zoneid) pr_cont("%d:%s ", zone_to_nid(zone), zone->name); pr_cont("\n"); } } } void __init mminit_verify_pageflags_layout(void) { int shift, width; unsigned long or_mask, add_mask; shift = BITS_PER_LONG; width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", SECTIONS_WIDTH, NODES_WIDTH, ZONES_WIDTH, LAST_CPUPID_WIDTH, KASAN_TAG_WIDTH, LRU_GEN_WIDTH, LRU_REFS_WIDTH, NR_PAGEFLAGS); mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", SECTIONS_SHIFT, NODES_SHIFT, ZONES_SHIFT, LAST_CPUPID_SHIFT, KASAN_TAG_WIDTH); mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", (unsigned long)SECTIONS_PGSHIFT, (unsigned long)NODES_PGSHIFT, (unsigned long)ZONES_PGSHIFT, (unsigned long)LAST_CPUPID_PGSHIFT, (unsigned long)KASAN_TAG_PGSHIFT); mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", "Node/Zone ID: %lu -> %lu\n", (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), (unsigned long)ZONEID_PGOFF); mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); #ifdef NODE_NOT_IN_PAGE_FLAGS mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", "Node not in page flags"); #endif #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", "Last cpupid not in page flags"); #endif if (SECTIONS_WIDTH) { shift -= SECTIONS_WIDTH; BUG_ON(shift != SECTIONS_PGSHIFT); } if (NODES_WIDTH) { shift -= NODES_WIDTH; BUG_ON(shift != NODES_PGSHIFT); } if (ZONES_WIDTH) { shift -= ZONES_WIDTH; BUG_ON(shift != ZONES_PGSHIFT); } /* Check for bitmask overlaps */ or_mask = (ZONES_MASK << ZONES_PGSHIFT) | (NODES_MASK << NODES_PGSHIFT) | (SECTIONS_MASK << SECTIONS_PGSHIFT); add_mask = (ZONES_MASK << ZONES_PGSHIFT) + (NODES_MASK << NODES_PGSHIFT) + (SECTIONS_MASK << SECTIONS_PGSHIFT); BUG_ON(or_mask != add_mask); } static __init int set_mminit_loglevel(char *str) { get_option(&str, &mminit_loglevel); return 0; } early_param("mminit_loglevel", set_mminit_loglevel); #endif /* CONFIG_DEBUG_MEMORY_INIT */ struct kobject *mm_kobj; #ifdef CONFIG_SMP s32 vm_committed_as_batch = 32; void mm_compute_batch(int overcommit_policy) { u64 memsized_batch; s32 nr = num_present_cpus(); s32 batch = max_t(s32, nr*2, 32); unsigned long ram_pages = totalram_pages(); /* * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of * (total memory/#cpus), and lift it to 25% for other policies * to easy the possible lock contention for percpu_counter * vm_committed_as, while the max limit is INT_MAX */ if (overcommit_policy == OVERCOMMIT_NEVER) memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); else memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); vm_committed_as_batch = max_t(s32, memsized_batch, batch); } static int __meminit mm_compute_batch_notifier(struct notifier_block *self, unsigned long action, void *arg) { switch (action) { case MEM_ONLINE: case MEM_OFFLINE: mm_compute_batch(sysctl_overcommit_memory); break; default: break; } return NOTIFY_OK; } static int __init mm_compute_batch_init(void) { mm_compute_batch(sysctl_overcommit_memory); hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); return 0; } __initcall(mm_compute_batch_init); #endif static int __init mm_sysfs_init(void) { mm_kobj = kobject_create_and_add("mm", kernel_kobj); if (!mm_kobj) return -ENOMEM; return 0; } postcore_initcall(mm_sysfs_init); static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; static unsigned long required_kernelcore __initdata; static unsigned long required_kernelcore_percent __initdata; static unsigned long required_movablecore __initdata; static unsigned long required_movablecore_percent __initdata; static unsigned long nr_kernel_pages __initdata; static unsigned long nr_all_pages __initdata; static unsigned long dma_reserve __initdata; static bool deferred_struct_pages __meminitdata; static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); static int __init cmdline_parse_core(char *p, unsigned long *core, unsigned long *percent) { unsigned long long coremem; char *endptr; if (!p) return -EINVAL; /* Value may be a percentage of total memory, otherwise bytes */ coremem = simple_strtoull(p, &endptr, 0); if (*endptr == '%') { /* Paranoid check for percent values greater than 100 */ WARN_ON(coremem > 100); *percent = coremem; } else { coremem = memparse(p, &p); /* Paranoid check that UL is enough for the coremem value */ WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); *core = coremem >> PAGE_SHIFT; *percent = 0UL; } return 0; } bool mirrored_kernelcore __initdata_memblock; /* * kernelcore=size sets the amount of memory for use for allocations that * cannot be reclaimed or migrated. */ static int __init cmdline_parse_kernelcore(char *p) { /* parse kernelcore=mirror */ if (parse_option_str(p, "mirror")) { mirrored_kernelcore = true; return 0; } return cmdline_parse_core(p, &required_kernelcore, &required_kernelcore_percent); } early_param("kernelcore", cmdline_parse_kernelcore); /* * movablecore=size sets the amount of memory for use for allocations that * can be reclaimed or migrated. */ static int __init cmdline_parse_movablecore(char *p) { return cmdline_parse_core(p, &required_movablecore, &required_movablecore_percent); } early_param("movablecore", cmdline_parse_movablecore); /* * early_calculate_totalpages() * Sum pages in active regions for movable zone. * Populate N_MEMORY for calculating usable_nodes. */ static unsigned long __init early_calculate_totalpages(void) { unsigned long totalpages = 0; unsigned long start_pfn, end_pfn; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { unsigned long pages = end_pfn - start_pfn; totalpages += pages; if (pages) node_set_state(nid, N_MEMORY); } return totalpages; } /* * This finds a zone that can be used for ZONE_MOVABLE pages. The * assumption is made that zones within a node are ordered in monotonic * increasing memory addresses so that the "highest" populated zone is used */ static void __init find_usable_zone_for_movable(void) { int zone_index; for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { if (zone_index == ZONE_MOVABLE) continue; if (arch_zone_highest_possible_pfn[zone_index] > arch_zone_lowest_possible_pfn[zone_index]) break; } VM_BUG_ON(zone_index == -1); movable_zone = zone_index; } /* * Find the PFN the Movable zone begins in each node. Kernel memory * is spread evenly between nodes as long as the nodes have enough * memory. When they don't, some nodes will have more kernelcore than * others */ static void __init find_zone_movable_pfns_for_nodes(void) { int i, nid; unsigned long usable_startpfn; unsigned long kernelcore_node, kernelcore_remaining; /* save the state before borrow the nodemask */ nodemask_t saved_node_state = node_states[N_MEMORY]; unsigned long totalpages = early_calculate_totalpages(); int usable_nodes = nodes_weight(node_states[N_MEMORY]); struct memblock_region *r; /* Need to find movable_zone earlier when movable_node is specified. */ find_usable_zone_for_movable(); /* * If movable_node is specified, ignore kernelcore and movablecore * options. */ if (movable_node_is_enabled()) { for_each_mem_region(r) { if (!memblock_is_hotpluggable(r)) continue; nid = memblock_get_region_node(r); usable_startpfn = PFN_DOWN(r->base); zone_movable_pfn[nid] = zone_movable_pfn[nid] ? min(usable_startpfn, zone_movable_pfn[nid]) : usable_startpfn; } goto out2; } /* * If kernelcore=mirror is specified, ignore movablecore option */ if (mirrored_kernelcore) { bool mem_below_4gb_not_mirrored = false; if (!memblock_has_mirror()) { pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n"); goto out; } for_each_mem_region(r) { if (memblock_is_mirror(r)) continue; nid = memblock_get_region_node(r); usable_startpfn = memblock_region_memory_base_pfn(r); if (usable_startpfn < PHYS_PFN(SZ_4G)) { mem_below_4gb_not_mirrored = true; continue; } zone_movable_pfn[nid] = zone_movable_pfn[nid] ? min(usable_startpfn, zone_movable_pfn[nid]) : usable_startpfn; } if (mem_below_4gb_not_mirrored) pr_warn("This configuration results in unmirrored kernel memory.\n"); goto out2; } /* * If kernelcore=nn% or movablecore=nn% was specified, calculate the * amount of necessary memory. */ if (required_kernelcore_percent) required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 10000UL; if (required_movablecore_percent) required_movablecore = (totalpages * 100 * required_movablecore_percent) / 10000UL; /* * If movablecore= was specified, calculate what size of * kernelcore that corresponds so that memory usable for * any allocation type is evenly spread. If both kernelcore * and movablecore are specified, then the value of kernelcore * will be used for required_kernelcore if it's greater than * what movablecore would have allowed. */ if (required_movablecore) { unsigned long corepages; /* * Round-up so that ZONE_MOVABLE is at least as large as what * was requested by the user */ required_movablecore = roundup(required_movablecore, MAX_ORDER_NR_PAGES); required_movablecore = min(totalpages, required_movablecore); corepages = totalpages - required_movablecore; required_kernelcore = max(required_kernelcore, corepages); } /* * If kernelcore was not specified or kernelcore size is larger * than totalpages, there is no ZONE_MOVABLE. */ if (!required_kernelcore || required_kernelcore >= totalpages) goto out; /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; restart: /* Spread kernelcore memory as evenly as possible throughout nodes */ kernelcore_node = required_kernelcore / usable_nodes; for_each_node_state(nid, N_MEMORY) { unsigned long start_pfn, end_pfn; /* * Recalculate kernelcore_node if the division per node * now exceeds what is necessary to satisfy the requested * amount of memory for the kernel */ if (required_kernelcore < kernelcore_node) kernelcore_node = required_kernelcore / usable_nodes; /* * As the map is walked, we track how much memory is usable * by the kernel using kernelcore_remaining. When it is * 0, the rest of the node is usable by ZONE_MOVABLE */ kernelcore_remaining = kernelcore_node; /* Go through each range of PFNs within this node */ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { unsigned long size_pages; start_pfn = max(start_pfn, zone_movable_pfn[nid]); if (start_pfn >= end_pfn) continue; /* Account for what is only usable for kernelcore */ if (start_pfn < usable_startpfn) { unsigned long kernel_pages; kernel_pages = min(end_pfn, usable_startpfn) - start_pfn; kernelcore_remaining -= min(kernel_pages, kernelcore_remaining); required_kernelcore -= min(kernel_pages, required_kernelcore); /* Continue if range is now fully accounted */ if (end_pfn <= usable_startpfn) { /* * Push zone_movable_pfn to the end so * that if we have to rebalance * kernelcore across nodes, we will * not double account here */ zone_movable_pfn[nid] = end_pfn; continue; } start_pfn = usable_startpfn; } /* * The usable PFN range for ZONE_MOVABLE is from * start_pfn->end_pfn. Calculate size_pages as the * number of pages used as kernelcore */ size_pages = end_pfn - start_pfn; if (size_pages > kernelcore_remaining) size_pages = kernelcore_remaining; zone_movable_pfn[nid] = start_pfn + size_pages; /* * Some kernelcore has been met, update counts and * break if the kernelcore for this node has been * satisfied */ required_kernelcore -= min(required_kernelcore, size_pages); kernelcore_remaining -= size_pages; if (!kernelcore_remaining) break; } } /* * If there is still required_kernelcore, we do another pass with one * less node in the count. This will push zone_movable_pfn[nid] further * along on the nodes that still have memory until kernelcore is * satisfied */ usable_nodes--; if (usable_nodes && required_kernelcore > usable_nodes) goto restart; out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ for (nid = 0; nid < MAX_NUMNODES; nid++) { unsigned long start_pfn, end_pfn; zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (zone_movable_pfn[nid] >= end_pfn) zone_movable_pfn[nid] = 0; } out: /* restore the node_state */ node_states[N_MEMORY] = saved_node_state; } static void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long zone, int nid) { mm_zero_struct_page(page); set_page_links(page, zone, nid, pfn); init_page_count(page); page_mapcount_reset(page); page_cpupid_reset_last(page); page_kasan_tag_reset(page); INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) set_page_address(page, __va(pfn << PAGE_SHIFT)); #endif } #ifdef CONFIG_NUMA /* * During memory init memblocks map pfns to nids. The search is expensive and * this caches recent lookups. The implementation of __early_pfn_to_nid * treats start/end as pfns. */ struct mminit_pfnnid_cache { unsigned long last_start; unsigned long last_end; int last_nid; }; static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. */ static int __meminit __early_pfn_to_nid(unsigned long pfn, struct mminit_pfnnid_cache *state) { unsigned long start_pfn, end_pfn; int nid; if (state->last_start <= pfn && pfn < state->last_end) return state->last_nid; nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); if (nid != NUMA_NO_NODE) { state->last_start = start_pfn; state->last_end = end_pfn; state->last_nid = nid; } return nid; } int __meminit early_pfn_to_nid(unsigned long pfn) { static DEFINE_SPINLOCK(early_pfn_lock); int nid; spin_lock(&early_pfn_lock); nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); if (nid < 0) nid = first_online_node; spin_unlock(&early_pfn_lock); return nid; } int hashdist = HASHDIST_DEFAULT; static int __init set_hashdist(char *str) { if (!str) return 0; hashdist = simple_strtoul(str, &str, 0); return 1; } __setup("hashdist=", set_hashdist); static inline void fixup_hashdist(void) { if (num_node_state(N_MEMORY) == 1) hashdist = 0; } #else static inline void fixup_hashdist(void) {} #endif /* CONFIG_NUMA */ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static inline void pgdat_set_deferred_range(pg_data_t *pgdat) { pgdat->first_deferred_pfn = ULONG_MAX; } /* Returns true if the struct page for the pfn is initialised */ static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) { if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) return false; return true; } /* * Returns true when the remaining initialisation should be deferred until * later in the boot cycle when it can be parallelised. */ static bool __meminit defer_init(int nid, unsigned long pfn, unsigned long end_pfn) { static unsigned long prev_end_pfn, nr_initialised; if (early_page_ext_enabled()) return false; /* * prev_end_pfn static that contains the end of previous zone * No need to protect because called very early in boot before smp_init. */ if (prev_end_pfn != end_pfn) { prev_end_pfn = end_pfn; nr_initialised = 0; } /* Always populate low zones for address-constrained allocations */ if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) return false; if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) return true; /* * We start only with one section of pages, more pages are added as * needed until the rest of deferred pages are initialized. */ nr_initialised++; if ((nr_initialised > PAGES_PER_SECTION) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { NODE_DATA(nid)->first_deferred_pfn = pfn; return true; } return false; } static void __meminit init_reserved_page(unsigned long pfn, int nid) { pg_data_t *pgdat; int zid; if (early_page_initialised(pfn, nid)) return; pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { struct zone *zone = &pgdat->node_zones[zid]; if (zone_spans_pfn(zone, pfn)) break; } __init_single_page(pfn_to_page(pfn), pfn, zid, nid); } #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} static inline bool early_page_initialised(unsigned long pfn, int nid) { return true; } static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) { return false; } static inline void init_reserved_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /* * Initialised pages do not have PageReserved set. This function is * called for each range allocated by the bootmem allocator and * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end, int nid) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); for (; start_pfn < end_pfn; start_pfn++) { if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); init_reserved_page(start_pfn, nid); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); /* * no need for atomic set_bit because the struct * page is not visible yet so nobody should * access it yet. */ __SetPageReserved(page); } } } /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ static bool __meminit overlap_memmap_init(unsigned long zone, unsigned long *pfn) { static struct memblock_region *r; if (mirrored_kernelcore && zone == ZONE_MOVABLE) { if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { for_each_mem_region(r) { if (*pfn < memblock_region_memory_end_pfn(r)) break; } } if (*pfn >= memblock_region_memory_base_pfn(r) && memblock_is_mirror(r)) { *pfn = memblock_region_memory_end_pfn(r); return true; } } return false; } /* * Only struct pages that correspond to ranges defined by memblock.memory * are zeroed and initialized by going through __init_single_page() during * memmap_init_zone_range(). * * But, there could be struct pages that correspond to holes in * memblock.memory. This can happen because of the following reasons: * - physical memory bank size is not necessarily the exact multiple of the * arbitrary section size * - early reserved memory may not be listed in memblock.memory * - memory layouts defined with memmap= kernel parameter may not align * nicely with memmap sections * * Explicitly initialize those struct pages so that: * - PG_Reserved is set * - zone and node links point to zone and node that span the page if the * hole is in the middle of a zone * - zone and node links point to adjacent zone/node if the hole falls on * the zone boundary; the pages in such holes will be prepended to the * zone/node above the hole except for the trailing pages in the last * section that will be appended to the zone/node below. */ static void __init init_unavailable_range(unsigned long spfn, unsigned long epfn, int zone, int node) { unsigned long pfn; u64 pgcnt = 0; for (pfn = spfn; pfn < epfn; pfn++) { if (!pfn_valid(pageblock_start_pfn(pfn))) { pfn = pageblock_end_pfn(pfn) - 1; continue; } __init_single_page(pfn_to_page(pfn), pfn, zone, node); __SetPageReserved(pfn_to_page(pfn)); pgcnt++; } if (pgcnt) pr_info("On node %d, zone %s: %lld pages in unavailable ranges", node, zone_names[zone], pgcnt); } /* * Initially all pages are reserved - free ones are freed * up by memblock_free_all() once the early boot process is * done. Non-atomic initialization, single-pass. * * All aligned pageblocks are initialized to the specified migratetype * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related * zone stats (e.g., nr_isolate_pageblock) are touched. */ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, unsigned long zone_end_pfn, enum meminit_context context, struct vmem_altmap *altmap, int migratetype) { unsigned long pfn, end_pfn = start_pfn + size; struct page *page; if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; #ifdef CONFIG_ZONE_DEVICE /* * Honor reservation requested by the driver for this ZONE_DEVICE * memory. We limit the total number of pages to initialize to just * those that might contain the memory mapping. We will defer the * ZONE_DEVICE page initialization until after we have released * the hotplug lock. */ if (zone == ZONE_DEVICE) { if (!altmap) return; if (start_pfn == altmap->base_pfn) start_pfn += altmap->reserve; end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); } #endif for (pfn = start_pfn; pfn < end_pfn; ) { /* * There can be holes in boot-time mem_map[]s handed to this * function. They do not exist on hotplugged memory. */ if (context == MEMINIT_EARLY) { if (overlap_memmap_init(zone, &pfn)) continue; if (defer_init(nid, pfn, zone_end_pfn)) { deferred_struct_pages = true; break; } } page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); if (context == MEMINIT_HOTPLUG) __SetPageReserved(page); /* * Usually, we want to mark the pageblock MIGRATE_MOVABLE, * such that unmovable allocations won't be scattered all * over the place during system boot. */ if (pageblock_aligned(pfn)) { set_pageblock_migratetype(page, migratetype); cond_resched(); } pfn++; } } static void __init memmap_init_zone_range(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn, unsigned long *hole_pfn) { unsigned long zone_start_pfn = zone->zone_start_pfn; unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; int nid = zone_to_nid(zone), zone_id = zone_idx(zone); start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); if (start_pfn >= end_pfn) return; memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); if (*hole_pfn < start_pfn) init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); *hole_pfn = end_pfn; } static void __init memmap_init(void) { unsigned long start_pfn, end_pfn; unsigned long hole_pfn = 0; int i, j, zone_id = 0, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { struct pglist_data *node = NODE_DATA(nid); for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = node->node_zones + j; if (!populated_zone(zone)) continue; memmap_init_zone_range(zone, start_pfn, end_pfn, &hole_pfn); zone_id = j; } } #ifdef CONFIG_SPARSEMEM /* * Initialize the memory map for hole in the range [memory_end, * section_end]. * Append the pages in this hole to the highest zone in the last * node. * The call to init_unavailable_range() is outside the ifdef to * silence the compiler warining about zone_id set but not used; * for FLATMEM it is a nop anyway */ end_pfn = round_up(end_pfn, PAGES_PER_SECTION); if (hole_pfn < end_pfn) #endif init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); } #ifdef CONFIG_ZONE_DEVICE static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, unsigned long zone_idx, int nid, struct dev_pagemap *pgmap) { __init_single_page(page, pfn, zone_idx, nid); /* * Mark page reserved as it will need to wait for onlining * phase for it to be fully associated with a zone. * * We can use the non-atomic __set_bit operation for setting * the flag as we are still initializing the pages. */ __SetPageReserved(page); /* * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer * and zone_device_data. It is a bug if a ZONE_DEVICE page is * ever freed or placed on a driver-private list. */ page->pgmap = pgmap; page->zone_device_data = NULL; /* * Mark the block movable so that blocks are reserved for * movable at startup. This will force kernel allocations * to reserve their blocks rather than leaking throughout * the address space during boot when many long-lived * kernel allocations are made. * * Please note that MEMINIT_HOTPLUG path doesn't clear memmap * because this is done early in section_activate() */ if (pageblock_aligned(pfn)) { set_pageblock_migratetype(page, MIGRATE_MOVABLE); cond_resched(); } /* * ZONE_DEVICE pages are released directly to the driver page allocator * which will set the page count to 1 when allocating the page. */ if (pgmap->type == MEMORY_DEVICE_PRIVATE || pgmap->type == MEMORY_DEVICE_COHERENT) set_page_count(page, 0); } /* * With compound page geometry and when struct pages are stored in ram most * tail pages are reused. Consequently, the amount of unique struct pages to * initialize is a lot smaller that the total amount of struct pages being * mapped. This is a paired / mild layering violation with explicit knowledge * of how the sparse_vmemmap internals handle compound pages in the lack * of an altmap. See vmemmap_populate_compound_pages(). */ static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (!vmemmap_can_optimize(altmap, pgmap)) return pgmap_vmemmap_nr(pgmap); return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page)); } static void __ref memmap_init_compound(struct page *head, unsigned long head_pfn, unsigned long zone_idx, int nid, struct dev_pagemap *pgmap, unsigned long nr_pages) { unsigned long pfn, end_pfn = head_pfn + nr_pages; unsigned int order = pgmap->vmemmap_shift; __SetPageHead(head); for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { struct page *page = pfn_to_page(pfn); __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); prep_compound_tail(head, pfn - head_pfn); set_page_count(page, 0); /* * The first tail page stores important compound page info. * Call prep_compound_head() after the first tail page has * been initialized, to not have the data overwritten. */ if (pfn == head_pfn + 1) prep_compound_head(head, order); } } void __ref memmap_init_zone_device(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct dev_pagemap *pgmap) { unsigned long pfn, end_pfn = start_pfn + nr_pages; struct pglist_data *pgdat = zone->zone_pgdat; struct vmem_altmap *altmap = pgmap_altmap(pgmap); unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); unsigned long zone_idx = zone_idx(zone); unsigned long start = jiffies; int nid = pgdat->node_id; if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) return; /* * The call to memmap_init should have already taken care * of the pages reserved for the memmap, so we can just jump to * the end of that region and start processing the device pages. */ if (altmap) { start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); nr_pages = end_pfn - start_pfn; } for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { struct page *page = pfn_to_page(pfn); __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); if (pfns_per_compound == 1) continue; memmap_init_compound(page, pfn, zone_idx, nid, pgmap, compound_nr_pages(altmap, pgmap)); } pr_debug("%s initialised %lu pages in %ums\n", __func__, nr_pages, jiffies_to_msecs(jiffies - start)); } #endif /* * The zone ranges provided by the architecture do not include ZONE_MOVABLE * because it is sized independent of architecture. Unlike the other zones, * the starting point for ZONE_MOVABLE is not fixed. It may be different * in each node depending on the size of each node and how evenly kernelcore * is distributed. This helper function adjusts the zone ranges * provided by the architecture for a given node by using the end of the * highest usable zone for ZONE_MOVABLE. This preserves the assumption that * zones within a node are in order of monotonic increases memory addresses */ static void __init adjust_zone_range_for_zone_movable(int nid, unsigned long zone_type, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) { /* Only adjust if ZONE_MOVABLE is on this node */ if (zone_movable_pfn[nid]) { /* Size ZONE_MOVABLE */ if (zone_type == ZONE_MOVABLE) { *zone_start_pfn = zone_movable_pfn[nid]; *zone_end_pfn = min(node_end_pfn, arch_zone_highest_possible_pfn[movable_zone]); /* Adjust for ZONE_MOVABLE starting within this range */ } else if (!mirrored_kernelcore && *zone_start_pfn < zone_movable_pfn[nid] && *zone_end_pfn > zone_movable_pfn[nid]) { *zone_end_pfn = zone_movable_pfn[nid]; /* Check if this whole range is within ZONE_MOVABLE */ } else if (*zone_start_pfn >= zone_movable_pfn[nid]) *zone_start_pfn = *zone_end_pfn; } } /* * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * then all holes in the requested range will be accounted for. */ unsigned long __init __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) { unsigned long nr_absent = range_end_pfn - range_start_pfn; unsigned long start_pfn, end_pfn; int i; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); nr_absent -= end_pfn - start_pfn; } return nr_absent; } /** * absent_pages_in_range - Return number of page frames in holes within a range * @start_pfn: The start PFN to start searching for holes * @end_pfn: The end PFN to stop searching for holes * * Return: the number of pages frames in memory holes within a range. */ unsigned long __init absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn) { return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); } /* Return the number of page frames in holes in a zone on a node */ static unsigned long __init zone_absent_pages_in_node(int nid, unsigned long zone_type, unsigned long zone_start_pfn, unsigned long zone_end_pfn) { unsigned long nr_absent; /* zone is empty, we don't have any absent pages */ if (zone_start_pfn == zone_end_pfn) return 0; nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); /* * ZONE_MOVABLE handling. * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages * and vice versa. */ if (mirrored_kernelcore && zone_movable_pfn[nid]) { unsigned long start_pfn, end_pfn; struct memblock_region *r; for_each_mem_region(r) { start_pfn = clamp(memblock_region_memory_base_pfn(r), zone_start_pfn, zone_end_pfn); end_pfn = clamp(memblock_region_memory_end_pfn(r), zone_start_pfn, zone_end_pfn); if (zone_type == ZONE_MOVABLE && memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn; if (zone_type == ZONE_NORMAL && !memblock_is_mirror(r)) nr_absent += end_pfn - start_pfn; } } return nr_absent; } /* * Return the number of pages a zone spans in a node, including holes * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() */ static unsigned long __init zone_spanned_pages_in_node(int nid, unsigned long zone_type, unsigned long node_start_pfn, unsigned long node_end_pfn, unsigned long *zone_start_pfn, unsigned long *zone_end_pfn) { unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; /* Get the start and end of the zone */ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn, zone_start_pfn, zone_end_pfn); /* Check that this node has pages within the zone's required range */ if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) return 0; /* Move the zone boundaries inside the node if necessary */ *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); /* Return the spanned pages */ return *zone_end_pfn - *zone_start_pfn; } static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) { struct zone *z; for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { z->zone_start_pfn = 0; z->spanned_pages = 0; z->present_pages = 0; #if defined(CONFIG_MEMORY_HOTPLUG) z->present_early_pages = 0; #endif } pgdat->node_spanned_pages = 0; pgdat->node_present_pages = 0; pr_debug("On node %d totalpages: 0\n", pgdat->node_id); } static void __init calculate_node_totalpages(struct pglist_data *pgdat, unsigned long node_start_pfn, unsigned long node_end_pfn) { unsigned long realtotalpages = 0, totalpages = 0; enum zone_type i; for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; unsigned long zone_start_pfn, zone_end_pfn; unsigned long spanned, absent; unsigned long real_size; spanned = zone_spanned_pages_in_node(pgdat->node_id, i, node_start_pfn, node_end_pfn, &zone_start_pfn, &zone_end_pfn); absent = zone_absent_pages_in_node(pgdat->node_id, i, zone_start_pfn, zone_end_pfn); real_size = spanned - absent; if (spanned) zone->zone_start_pfn = zone_start_pfn; else zone->zone_start_pfn = 0; zone->spanned_pages = spanned; zone->present_pages = real_size; #if defined(CONFIG_MEMORY_HOTPLUG) zone->present_early_pages = real_size; #endif totalpages += spanned; realtotalpages += real_size; } pgdat->node_spanned_pages = totalpages; pgdat->node_present_pages = realtotalpages; pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); } static unsigned long __init calc_memmap_size(unsigned long spanned_pages, unsigned long present_pages) { unsigned long pages = spanned_pages; /* * Provide a more accurate estimation if there are holes within * the zone and SPARSEMEM is in use. If there are holes within the * zone, each populated memory region may cost us one or two extra * memmap pages due to alignment because memmap pages for each * populated regions may not be naturally aligned on page boundary. * So the (present_pages >> 4) heuristic is a tradeoff for that. */ if (spanned_pages > present_pages + (present_pages >> 4) && IS_ENABLED(CONFIG_SPARSEMEM)) pages = present_pages; return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void pgdat_init_split_queue(struct pglist_data *pgdat) { struct deferred_split *ds_queue = &pgdat->deferred_split_queue; spin_lock_init(&ds_queue->split_queue_lock); INIT_LIST_HEAD(&ds_queue->split_queue); ds_queue->split_queue_len = 0; } #else static void pgdat_init_split_queue(struct pglist_data *pgdat) {} #endif #ifdef CONFIG_COMPACTION static void pgdat_init_kcompactd(struct pglist_data *pgdat) { init_waitqueue_head(&pgdat->kcompactd_wait); } #else static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} #endif static void __meminit pgdat_init_internals(struct pglist_data *pgdat) { int i; pgdat_resize_init(pgdat); pgdat_kswapd_lock_init(pgdat); pgdat_init_split_queue(pgdat); pgdat_init_kcompactd(pgdat); init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); for (i = 0; i < NR_VMSCAN_THROTTLE; i++) init_waitqueue_head(&pgdat->reclaim_wait[i]); pgdat_page_ext_init(pgdat); lruvec_init(&pgdat->__lruvec); } static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, unsigned long remaining_pages) { atomic_long_set(&zone->managed_pages, remaining_pages); zone_set_nid(zone, nid); zone->name = zone_names[idx]; zone->zone_pgdat = NODE_DATA(nid); spin_lock_init(&zone->lock); zone_seqlock_init(zone); zone_pcp_init(zone); } static void __meminit zone_init_free_lists(struct zone *zone) { unsigned int order, t; for_each_migratetype_order(order, t) { INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); zone->free_area[order].nr_free = 0; } #ifdef CONFIG_UNACCEPTED_MEMORY INIT_LIST_HEAD(&zone->unaccepted_pages); #endif } void __meminit init_currently_empty_zone(struct zone *zone, unsigned long zone_start_pfn, unsigned long size) { struct pglist_data *pgdat = zone->zone_pgdat; int zone_idx = zone_idx(zone) + 1; if (zone_idx > pgdat->nr_zones) pgdat->nr_zones = zone_idx; zone->zone_start_pfn = zone_start_pfn; mminit_dprintk(MMINIT_TRACE, "memmap_init", "Initialising map node %d zone %lu pfns %lu -> %lu\n", pgdat->node_id, (unsigned long)zone_idx(zone), zone_start_pfn, (zone_start_pfn + size)); zone_init_free_lists(zone); zone->initialized = 1; } #ifndef CONFIG_SPARSEMEM /* * Calculate the size of the zone->blockflags rounded to an unsigned long * Start by making sure zonesize is a multiple of pageblock_order by rounding * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally * round what is now in bits to nearest long in bits, then return it in * bytes. */ static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) { unsigned long usemapsize; zonesize += zone_start_pfn & (pageblock_nr_pages-1); usemapsize = roundup(zonesize, pageblock_nr_pages); usemapsize = usemapsize >> pageblock_order; usemapsize *= NR_PAGEBLOCK_BITS; usemapsize = roundup(usemapsize, BITS_PER_LONG); return usemapsize / BITS_PER_BYTE; } static void __ref setup_usemap(struct zone *zone) { unsigned long usemapsize = usemap_size(zone->zone_start_pfn, zone->spanned_pages); zone->pageblock_flags = NULL; if (usemapsize) { zone->pageblock_flags = memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, zone_to_nid(zone)); if (!zone->pageblock_flags) panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", usemapsize, zone->name, zone_to_nid(zone)); } } #else static inline void setup_usemap(struct zone *zone) {} #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ void __init set_pageblock_order(void) { unsigned int order = MAX_ORDER; /* Check that pageblock_nr_pages has not already been setup */ if (pageblock_order) return; /* Don't let pageblocks exceed the maximum allocation granularity. */ if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) order = HUGETLB_PAGE_ORDER; /* * Assume the largest contiguous order of interest is a huge page. * This value may be variable depending on boot parameters on IA64 and * powerpc. */ pageblock_order = order; } #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() * is unused as pageblock_order is set at compile-time. See * include/linux/pageblock-flags.h for the values of pageblock_order based on * the kernel config */ void __init set_pageblock_order(void) { } #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ /* * Set up the zone data structures * - init pgdat internals * - init all zones belonging to this node * * NOTE: this function is only called during memory hotplug */ #ifdef CONFIG_MEMORY_HOTPLUG void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) { int nid = pgdat->node_id; enum zone_type z; int cpu; pgdat_init_internals(pgdat); if (pgdat->per_cpu_nodestats == &boot_nodestats) pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); /* * Reset the nr_zones, order and highest_zoneidx before reuse. * Note that kswapd will init kswapd_highest_zoneidx properly * when it starts in the near future. */ pgdat->nr_zones = 0; pgdat->kswapd_order = 0; pgdat->kswapd_highest_zoneidx = 0; pgdat->node_start_pfn = 0; pgdat->node_present_pages = 0; for_each_online_cpu(cpu) { struct per_cpu_nodestat *p; p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); memset(p, 0, sizeof(*p)); } /* * When memory is hot-added, all the memory is in offline state. So * clear all zones' present_pages and managed_pages because they will * be updated in online_pages() and offline_pages(). */ for (z = 0; z < MAX_NR_ZONES; z++) { struct zone *zone = pgdat->node_zones + z; zone->present_pages = 0; zone_init_internals(zone, z, nid, 0); } } #endif /* * Set up the zone data structures: * - mark all pages reserved * - mark all memory queues empty * - clear the memory bitmaps * * NOTE: pgdat should get zeroed by caller. * NOTE: this function is only called during early init. */ static void __init free_area_init_core(struct pglist_data *pgdat) { enum zone_type j; int nid = pgdat->node_id; pgdat_init_internals(pgdat); pgdat->per_cpu_nodestats = &boot_nodestats; for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long size, freesize, memmap_pages; size = zone->spanned_pages; freesize = zone->present_pages; /* * Adjust freesize so that it accounts for how much memory * is used by this zone for memmap. This affects the watermark * and per-cpu initialisations */ memmap_pages = calc_memmap_size(size, freesize); if (!is_highmem_idx(j)) { if (freesize >= memmap_pages) { freesize -= memmap_pages; if (memmap_pages) pr_debug(" %s zone: %lu pages used for memmap\n", zone_names[j], memmap_pages); } else pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", zone_names[j], memmap_pages, freesize); } /* Account for reserved pages */ if (j == 0 && freesize > dma_reserve) { freesize -= dma_reserve; pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); } if (!is_highmem_idx(j)) nr_kernel_pages += freesize; /* Charge for highmem memmap if there are enough kernel pages */ else if (nr_kernel_pages > memmap_pages * 2) nr_kernel_pages -= memmap_pages; nr_all_pages += freesize; /* * Set an approximate value for lowmem here, it will be adjusted * when the bootmem allocator frees pages into the buddy system. * And all highmem pages will be managed by the buddy system. */ zone_init_internals(zone, j, nid, freesize); if (!size) continue; setup_usemap(zone); init_currently_empty_zone(zone, zone->zone_start_pfn, size); } } void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, int nid, bool exact_nid) { void *ptr; if (exact_nid) ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); else ptr = memblock_alloc_try_nid_raw(size, align, min_addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (ptr && size > 0) page_init_poison(ptr, size); return ptr; } #ifdef CONFIG_FLATMEM static void __init alloc_node_mem_map(struct pglist_data *pgdat) { unsigned long __maybe_unused start = 0; unsigned long __maybe_unused offset = 0; /* Skip empty nodes */ if (!pgdat->node_spanned_pages) return; start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); offset = pgdat->node_start_pfn - start; /* ia64 gets its own node_mem_map, before this, without bootmem */ if (!pgdat->node_mem_map) { unsigned long size, end; struct page *map; /* * The zone's endpoints aren't required to be MAX_ORDER * aligned but the node_mem_map endpoints must be in order * for the buddy allocator to function correctly. */ end = pgdat_end_pfn(pgdat); end = ALIGN(end, MAX_ORDER_NR_PAGES); size = (end - start) * sizeof(struct page); map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, pgdat->node_id, false); if (!map) panic("Failed to allocate %ld bytes for node %d memory map\n", size, pgdat->node_id); pgdat->node_mem_map = map + offset; } pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", __func__, pgdat->node_id, (unsigned long)pgdat, (unsigned long)pgdat->node_mem_map); #ifndef CONFIG_NUMA /* * With no DISCONTIG, the global mem_map is just set as node 0's */ if (pgdat == NODE_DATA(0)) { mem_map = NODE_DATA(0)->node_mem_map; if (page_to_pfn(mem_map) != pgdat->node_start_pfn) mem_map -= offset; } #endif } #else static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } #endif /* CONFIG_FLATMEM */ /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. * @start_pfn: Passed by reference. On return, it will have the node start_pfn. * @end_pfn: Passed by reference. On return, it will have the node end_pfn. * * It returns the start and end page frame of a node based on information * provided by memblock_set_node(). If called for a node * with no available memory, the start and end PFNs will be 0. */ void __init get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long this_start_pfn, this_end_pfn; int i; *start_pfn = -1UL; *end_pfn = 0; for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { *start_pfn = min(*start_pfn, this_start_pfn); *end_pfn = max(*end_pfn, this_end_pfn); } if (*start_pfn == -1UL) *start_pfn = 0; } static void __init free_area_init_node(int nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long start_pfn = 0; unsigned long end_pfn = 0; /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); pgdat->node_id = nid; pgdat->node_start_pfn = start_pfn; pgdat->per_cpu_nodestats = NULL; if (start_pfn != end_pfn) { pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); calculate_node_totalpages(pgdat, start_pfn, end_pfn); } else { pr_info("Initmem setup node %d as memoryless\n", nid); reset_memoryless_node_totalpages(pgdat); } alloc_node_mem_map(pgdat); pgdat_set_deferred_range(pgdat); free_area_init_core(pgdat); lru_gen_init_pgdat(pgdat); } /* Any regular or high memory on that node ? */ static void __init check_for_memory(pg_data_t *pgdat) { enum zone_type zone_type; for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { struct zone *zone = &pgdat->node_zones[zone_type]; if (populated_zone(zone)) { if (IS_ENABLED(CONFIG_HIGHMEM)) node_set_state(pgdat->node_id, N_HIGH_MEMORY); if (zone_type <= ZONE_NORMAL) node_set_state(pgdat->node_id, N_NORMAL_MEMORY); break; } } } #if MAX_NUMNODES > 1 /* * Figure out the number of possible node ids. */ void __init setup_nr_node_ids(void) { unsigned int highest; highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); nr_node_ids = highest + 1; } #endif /* * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order */ static bool arch_has_descending_max_zone_pfns(void) { return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); } /** * free_area_init - Initialise all pg_data_t and zone data * @max_zone_pfn: an array of max PFNs for each zone * * This will call free_area_init_node() for each active node in the system. * Using the page ranges provided by memblock_set_node(), the size of each * zone in each node and their holes is calculated. If the maximum PFN * between two adjacent zones match, it is assumed that the zone is empty. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed * that arch_max_dma32_pfn has no pages. It is also assumed that a zone * starts where the previous one ended. For example, ZONE_DMA32 starts * at arch_max_dma_pfn. */ void __init free_area_init(unsigned long *max_zone_pfn) { unsigned long start_pfn, end_pfn; int i, nid, zone; bool descending; /* Record where the zone boundaries are */ memset(arch_zone_lowest_possible_pfn, 0, sizeof(arch_zone_lowest_possible_pfn)); memset(arch_zone_highest_possible_pfn, 0, sizeof(arch_zone_highest_possible_pfn)); start_pfn = PHYS_PFN(memblock_start_of_DRAM()); descending = arch_has_descending_max_zone_pfns(); for (i = 0; i < MAX_NR_ZONES; i++) { if (descending) zone = MAX_NR_ZONES - i - 1; else zone = i; if (zone == ZONE_MOVABLE) continue; end_pfn = max(max_zone_pfn[zone], start_pfn); arch_zone_lowest_possible_pfn[zone] = start_pfn; arch_zone_highest_possible_pfn[zone] = end_pfn; start_pfn = end_pfn; } /* Find the PFNs that ZONE_MOVABLE begins at in each node */ memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); find_zone_movable_pfns_for_nodes(); /* Print out the zone ranges */ pr_info("Zone ranges:\n"); for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; pr_info(" %-8s ", zone_names[i]); if (arch_zone_lowest_possible_pfn[i] == arch_zone_highest_possible_pfn[i]) pr_cont("empty\n"); else pr_cont("[mem %#018Lx-%#018Lx]\n", (u64)arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, ((u64)arch_zone_highest_possible_pfn[i] << PAGE_SHIFT) - 1); } /* Print out the PFNs ZONE_MOVABLE begins at in each node */ pr_info("Movable zone start for each node\n"); for (i = 0; i < MAX_NUMNODES; i++) { if (zone_movable_pfn[i]) pr_info(" Node %d: %#018Lx\n", i, (u64)zone_movable_pfn[i] << PAGE_SHIFT); } /* * Print out the early node map, and initialize the * subsection-map relative to active online memory ranges to * enable future "sub-section" extensions of the memory map. */ pr_info("Early memory node ranges\n"); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, ((u64)end_pfn << PAGE_SHIFT) - 1); subsection_map_init(start_pfn, end_pfn - start_pfn); } /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); set_pageblock_order(); for_each_node(nid) { pg_data_t *pgdat; if (!node_online(nid)) { pr_info("Initializing node %d as memoryless\n", nid); /* Allocator not initialized yet */ pgdat = arch_alloc_nodedata(nid); if (!pgdat) panic("Cannot allocate %zuB for node %d.\n", sizeof(*pgdat), nid); arch_refresh_nodedata(nid, pgdat); free_area_init_node(nid); /* * We do not want to confuse userspace by sysfs * files/directories for node without any memory * attached to it, so this node is not marked as * N_MEMORY and not marked online so that no sysfs * hierarchy will be created via register_one_node for * it. The pgdat will get fully initialized by * hotadd_init_pgdat() when memory is hotplugged into * this node. */ continue; } pgdat = NODE_DATA(nid); free_area_init_node(nid); /* Any memory on that node */ if (pgdat->node_present_pages) node_set_state(nid, N_MEMORY); check_for_memory(pgdat); } memmap_init(); /* disable hash distribution for systems with a single node */ fixup_hashdist(); } /** * node_map_pfn_alignment - determine the maximum internode alignment * * This function should be called after node map is populated and sorted. * It calculates the maximum power of two alignment which can distinguish * all the nodes. * * For example, if all nodes are 1GiB and aligned to 1GiB, the return value * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is * shifted, 1GiB is enough and this function will indicate so. * * This is used to test whether pfn -> nid mapping of the chosen memory * model has fine enough granularity to avoid incorrect mapping for the * populated node map. * * Return: the determined alignment in pfn's. 0 if there is no alignment * requirement (single node). */ unsigned long __init node_map_pfn_alignment(void) { unsigned long accl_mask = 0, last_end = 0; unsigned long start, end, mask; int last_nid = NUMA_NO_NODE; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { if (!start || last_nid < 0 || last_nid == nid) { last_nid = nid; last_end = end; continue; } /* * Start with a mask granular enough to pin-point to the * start pfn and tick off bits one-by-one until it becomes * too coarse to separate the current node from the last. */ mask = ~((1 << __ffs(start)) - 1); while (mask && last_end <= (start & (mask << 1))) mask <<= 1; /* accumulate all internode masks */ accl_mask |= mask; } /* convert mask to number of pages */ return ~accl_mask + 1; } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT static void __init deferred_free_range(unsigned long pfn, unsigned long nr_pages) { struct page *page; unsigned long i; if (!nr_pages) return; page = pfn_to_page(pfn); /* Free a large naturally-aligned chunk if possible */ if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { for (i = 0; i < nr_pages; i += pageblock_nr_pages) set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); __free_pages_core(page, MAX_ORDER); return; } /* Accept chunks smaller than MAX_ORDER upfront */ accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages)); for (i = 0; i < nr_pages; i++, page++, pfn++) { if (pageblock_aligned(pfn)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); __free_pages_core(page, 0); } } /* Completion tracking for deferred_init_memmap() threads */ static atomic_t pgdat_init_n_undone __initdata; static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); static inline void __init pgdat_init_report_one_done(void) { if (atomic_dec_and_test(&pgdat_init_n_undone)) complete(&pgdat_init_all_done_comp); } /* * Returns true if page needs to be initialized or freed to buddy allocator. * * We check if a current MAX_ORDER block is valid by only checking the validity * of the head pfn. */ static inline bool __init deferred_pfn_valid(unsigned long pfn) { if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) return false; return true; } /* * Free pages to buddy allocator. Try to free aligned pages in * MAX_ORDER_NR_PAGES sizes. */ static void __init deferred_free_pages(unsigned long pfn, unsigned long end_pfn) { unsigned long nr_free = 0; for (; pfn < end_pfn; pfn++) { if (!deferred_pfn_valid(pfn)) { deferred_free_range(pfn - nr_free, nr_free); nr_free = 0; } else if (IS_MAX_ORDER_ALIGNED(pfn)) { deferred_free_range(pfn - nr_free, nr_free); nr_free = 1; } else { nr_free++; } } /* Free the last block of pages to allocator */ deferred_free_range(pfn - nr_free, nr_free); } /* * Initialize struct pages. We minimize pfn page lookups and scheduler checks * by performing it only once every MAX_ORDER_NR_PAGES. * Return number of pages initialized. */ static unsigned long __init deferred_init_pages(struct zone *zone, unsigned long pfn, unsigned long end_pfn) { int nid = zone_to_nid(zone); unsigned long nr_pages = 0; int zid = zone_idx(zone); struct page *page = NULL; for (; pfn < end_pfn; pfn++) { if (!deferred_pfn_valid(pfn)) { page = NULL; continue; } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { page = pfn_to_page(pfn); } else { page++; } __init_single_page(page, pfn, zid, nid); nr_pages++; } return (nr_pages); } /* * This function is meant to pre-load the iterator for the zone init. * Specifically it walks through the ranges until we are caught up to the * first_init_pfn value and exits there. If we never encounter the value we * return false indicating there are no valid ranges left. */ static bool __init deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, unsigned long *spfn, unsigned long *epfn, unsigned long first_init_pfn) { u64 j; /* * Start out by walking through the ranges in this zone that have * already been initialized. We don't need to do anything with them * so we just need to flush them out of the system. */ for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { if (*epfn <= first_init_pfn) continue; if (*spfn < first_init_pfn) *spfn = first_init_pfn; *i = j; return true; } return false; } /* * Initialize and free pages. We do it in two loops: first we initialize * struct page, then free to buddy allocator, because while we are * freeing pages we can access pages that are ahead (computing buddy * page in __free_one_page()). * * In order to try and keep some memory in the cache we have the loop * broken along max page order boundaries. This way we will not cause * any issues with the buddy page computation. */ static unsigned long __init deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); unsigned long spfn = *start_pfn, epfn = *end_pfn; unsigned long nr_pages = 0; u64 j = *i; /* First we loop through and initialize the page values */ for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { unsigned long t; if (mo_pfn <= *start_pfn) break; t = min(mo_pfn, *end_pfn); nr_pages += deferred_init_pages(zone, *start_pfn, t); if (mo_pfn < *end_pfn) { *start_pfn = mo_pfn; break; } } /* Reset values and now loop through freeing pages as needed */ swap(j, *i); for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { unsigned long t; if (mo_pfn <= spfn) break; t = min(mo_pfn, epfn); deferred_free_pages(spfn, t); if (mo_pfn <= epfn) break; } return nr_pages; } static void __init deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, void *arg) { unsigned long spfn, epfn; struct zone *zone = arg; u64 i; deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); /* * Initialize and free pages in MAX_ORDER sized increments so that we * can avoid introducing any issues with the buddy allocator. */ while (spfn < end_pfn) { deferred_init_maxorder(&i, zone, &spfn, &epfn); cond_resched(); } } /* An arch may override for more concurrency. */ __weak int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask) { return 1; } /* Initialise remaining memory on a node */ static int __init deferred_init_memmap(void *data) { pg_data_t *pgdat = data; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); unsigned long spfn = 0, epfn = 0; unsigned long first_init_pfn, flags; unsigned long start = jiffies; struct zone *zone; int zid, max_threads; u64 i; /* Bind memory initialisation thread to a local node if possible */ if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(current, cpumask); pgdat_resize_lock(pgdat, &flags); first_init_pfn = pgdat->first_deferred_pfn; if (first_init_pfn == ULONG_MAX) { pgdat_resize_unlock(pgdat, &flags); pgdat_init_report_one_done(); return 0; } /* Sanity check boundaries */ BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); pgdat->first_deferred_pfn = ULONG_MAX; /* * Once we unlock here, the zone cannot be grown anymore, thus if an * interrupt thread must allocate this early in boot, zone must be * pre-grown prior to start of deferred page initialization. */ pgdat_resize_unlock(pgdat, &flags); /* Only the highest zone is deferred so find it */ for (zid = 0; zid < MAX_NR_ZONES; zid++) { zone = pgdat->node_zones + zid; if (first_init_pfn < zone_end_pfn(zone)) break; } /* If the zone is empty somebody else may have cleared out the zone */ if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) goto zone_empty; max_threads = deferred_page_init_max_threads(cpumask); while (spfn < epfn) { unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); struct padata_mt_job job = { .thread_fn = deferred_init_memmap_chunk, .fn_arg = zone, .start = spfn, .size = epfn_align - spfn, .align = PAGES_PER_SECTION, .min_chunk = PAGES_PER_SECTION, .max_threads = max_threads, }; padata_do_multithreaded(&job); deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, epfn_align); } zone_empty: /* Sanity check that the next zone really is unpopulated */ WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); pr_info("node %d deferred pages initialised in %ums\n", pgdat->node_id, jiffies_to_msecs(jiffies - start)); pgdat_init_report_one_done(); return 0; } /* * If this zone has deferred pages, try to grow it by initializing enough * deferred pages to satisfy the allocation specified by order, rounded up to * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments * of SECTION_SIZE bytes by initializing struct pages in increments of * PAGES_PER_SECTION * sizeof(struct page) bytes. * * Return true when zone was grown, otherwise return false. We return true even * when we grow less than requested, to let the caller decide if there are * enough pages to satisfy the allocation. * * Note: We use noinline because this function is needed only during boot, and * it is called from a __ref function _deferred_grow_zone. This way we are * making sure that it is not inlined into permanent text section. */ bool __init deferred_grow_zone(struct zone *zone, unsigned int order) { unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); pg_data_t *pgdat = zone->zone_pgdat; unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; unsigned long spfn, epfn, flags; unsigned long nr_pages = 0; u64 i; /* Only the last zone may have deferred pages */ if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) return false; pgdat_resize_lock(pgdat, &flags); /* * If someone grew this zone while we were waiting for spinlock, return * true, as there might be enough pages already. */ if (first_deferred_pfn != pgdat->first_deferred_pfn) { pgdat_resize_unlock(pgdat, &flags); return true; } /* If the zone is empty somebody else may have cleared out the zone */ if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_deferred_pfn)) { pgdat->first_deferred_pfn = ULONG_MAX; pgdat_resize_unlock(pgdat, &flags); /* Retry only once. */ return first_deferred_pfn != ULONG_MAX; } /* * Initialize and free pages in MAX_ORDER sized increments so * that we can avoid introducing any issues with the buddy * allocator. */ while (spfn < epfn) { /* update our first deferred PFN for this section */ first_deferred_pfn = spfn; nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); touch_nmi_watchdog(); /* We should only stop along section boundaries */ if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) continue; /* If our quota has been met we can stop here */ if (nr_pages >= nr_pages_needed) break; } pgdat->first_deferred_pfn = spfn; pgdat_resize_unlock(pgdat, &flags); return nr_pages > 0; } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_CMA void __init init_cma_reserved_pageblock(struct page *page) { unsigned i = pageblock_nr_pages; struct page *p = page; do { __ClearPageReserved(p); set_page_count(p, 0); } while (++p, --i); set_pageblock_migratetype(page, MIGRATE_CMA); set_page_refcounted(page); __free_pages(page, pageblock_order); adjust_managed_page_count(page, pageblock_nr_pages); page_zone(page)->cma_pages += pageblock_nr_pages; } #endif void set_zone_contiguous(struct zone *zone) { unsigned long block_start_pfn = zone->zone_start_pfn; unsigned long block_end_pfn; block_end_pfn = pageblock_end_pfn(block_start_pfn); for (; block_start_pfn < zone_end_pfn(zone); block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); if (!__pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone)) return; cond_resched(); } /* We confirm that there is no hole */ zone->contiguous = true; } void __init page_alloc_init_late(void) { struct zone *zone; int nid; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* There will be num_node_state(N_MEMORY) threads */ atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state(nid, N_MEMORY) { kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } /* Block until all are initialised */ wait_for_completion(&pgdat_init_all_done_comp); /* * We initialized the rest of the deferred pages. Permanently disable * on-demand struct page initialization. */ static_branch_disable(&deferred_pages); /* Reinit limits that are based on free pages after the kernel is up */ files_maxfiles_init(); #endif buffer_init(); /* Discard memblock private memory */ memblock_discard(); for_each_node_state(nid, N_MEMORY) shuffle_free_memory(NODE_DATA(nid)); for_each_populated_zone(zone) set_zone_contiguous(zone); /* Initialize page ext after all struct pages are initialized. */ if (deferred_struct_pages) page_ext_init(); page_alloc_sysctl_init(); } #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES /* * Returns the number of pages that arch has reserved but * is not known to alloc_large_system_hash(). */ static unsigned long __init arch_reserved_kernel_pages(void) { return 0; } #endif /* * Adaptive scale is meant to reduce sizes of hash tables on large memory * machines. As memory size is increased the scale is also increased but at * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory * quadruples the scale is increased by one, which means the size of hash table * only doubles, instead of quadrupling as well. * Because 32-bit systems cannot have large physical memory, where this scaling * makes sense, it is disabled on such platforms. */ #if __BITS_PER_LONG > 32 #define ADAPT_SCALE_BASE (64ul << 30) #define ADAPT_SCALE_SHIFT 2 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) #endif /* * allocate a large system hash table from bootmem * - it is assumed that the hash table must contain an exact power-of-2 * quantity of entries * - limit is the number of hash buckets, not the total allocation size */ void *__init alloc_large_system_hash(const char *tablename, unsigned long bucketsize, unsigned long numentries, int scale, int flags, unsigned int *_hash_shift, unsigned int *_hash_mask, unsigned long low_limit, unsigned long high_limit) { unsigned long long max = high_limit; unsigned long log2qty, size; void *table; gfp_t gfp_flags; bool virt; bool huge; /* allow the kernel cmdline to have a say */ if (!numentries) { /* round applicable memory size up to nearest megabyte */ numentries = nr_kernel_pages; numentries -= arch_reserved_kernel_pages(); /* It isn't necessary when PAGE_SIZE >= 1MB */ if (PAGE_SIZE < SZ_1M) numentries = round_up(numentries, SZ_1M / PAGE_SIZE); #if __BITS_PER_LONG > 32 if (!high_limit) { unsigned long adapt; for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; adapt <<= ADAPT_SCALE_SHIFT) scale++; } #endif /* limit to 1 bucket per 2^scale bytes of low memory */ if (scale > PAGE_SHIFT) numentries >>= (scale - PAGE_SHIFT); else numentries <<= (PAGE_SHIFT - scale); if (unlikely((numentries * bucketsize) < PAGE_SIZE)) numentries = PAGE_SIZE / bucketsize; } numentries = roundup_pow_of_two(numentries); /* limit allocation size to 1/16 total memory by default */ if (max == 0) { max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; do_div(max, bucketsize); } max = min(max, 0x80000000ULL); if (numentries < low_limit) numentries = low_limit; if (numentries > max) numentries = max; log2qty = ilog2(numentries); gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; do { virt = false; size = bucketsize << log2qty; if (flags & HASH_EARLY) { if (flags & HASH_ZERO) table = memblock_alloc(size, SMP_CACHE_BYTES); else table = memblock_alloc_raw(size, SMP_CACHE_BYTES); } else if (get_order(size) > MAX_ORDER || hashdist) { table = vmalloc_huge(size, gfp_flags); virt = true; if (table) huge = is_vm_area_hugepages(table); } else { /* * If bucketsize is not a power-of-two, we may free * some pages at the end of hash table which * alloc_pages_exact() automatically does */ table = alloc_pages_exact(size, gfp_flags); kmemleak_alloc(table, size, 1, gfp_flags); } } while (!table && size > PAGE_SIZE && --log2qty); if (!table) panic("Failed to allocate %s hash table\n", tablename); pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); if (_hash_shift) *_hash_shift = log2qty; if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; return table; } /** * set_dma_reserve - set the specified number of pages reserved in the first zone * @new_dma_reserve: The number of pages to mark reserved * * The per-cpu batchsize and zone watermarks are determined by managed_pages. * In the DMA zone, a significant percentage may be consumed by kernel image * and other unfreeable allocations which can skew the watermarks badly. This * function may optionally be used to account for unfreeable pages in the * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and * smaller per-cpu batchsize. */ void __init set_dma_reserve(unsigned long new_dma_reserve) { dma_reserve = new_dma_reserve; } void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { int nid = early_pfn_to_nid(pfn); if (!early_page_initialised(pfn, nid)) return; } if (!kmsan_memblock_free_pages(page, order)) { /* KMSAN will take care of these pages. */ return; } __free_pages_core(page, order); } DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); EXPORT_SYMBOL(init_on_alloc); DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); EXPORT_SYMBOL(init_on_free); static bool _init_on_alloc_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); static int __init early_init_on_alloc(char *buf) { return kstrtobool(buf, &_init_on_alloc_enabled_early); } early_param("init_on_alloc", early_init_on_alloc); static bool _init_on_free_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); static int __init early_init_on_free(char *buf) { return kstrtobool(buf, &_init_on_free_enabled_early); } early_param("init_on_free", early_init_on_free); DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); /* * Enable static keys related to various memory debugging and hardening options. * Some override others, and depend on early params that are evaluated in the * order of appearance. So we need to first gather the full picture of what was * enabled, and then make decisions. */ static void __init mem_debugging_and_hardening_init(void) { bool page_poisoning_requested = false; bool want_check_pages = false; #ifdef CONFIG_PAGE_POISONING /* * Page poisoning is debug page alloc for some arches. If * either of those options are enabled, enable poisoning. */ if (page_poisoning_enabled() || (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && debug_pagealloc_enabled())) { static_branch_enable(&_page_poisoning_enabled); page_poisoning_requested = true; want_check_pages = true; } #endif if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && page_poisoning_requested) { pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " "will take precedence over init_on_alloc and init_on_free\n"); _init_on_alloc_enabled_early = false; _init_on_free_enabled_early = false; } if (_init_on_alloc_enabled_early) { want_check_pages = true; static_branch_enable(&init_on_alloc); } else { static_branch_disable(&init_on_alloc); } if (_init_on_free_enabled_early) { want_check_pages = true; static_branch_enable(&init_on_free); } else { static_branch_disable(&init_on_free); } if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled()) { want_check_pages = true; static_branch_enable(&_debug_pagealloc_enabled); if (debug_guardpage_minorder()) static_branch_enable(&_debug_guardpage_enabled); } #endif /* * Any page debugging or hardening option also enables sanity checking * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's * enabled already. */ if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) static_branch_enable(&check_pages_enabled); } /* Report memory auto-initialization states for this boot. */ static void __init report_meminit(void) { const char *stack; if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) stack = "all(pattern)"; else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) stack = "all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) stack = "byref_all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) stack = "byref(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) stack = "__user(zero)"; else stack = "off"; pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", want_init_on_free() ? "on" : "off"); if (want_init_on_free()) pr_info("mem auto-init: clearing system memory may take some time...\n"); } static void __init mem_init_print_info(void) { unsigned long physpages, codesize, datasize, rosize, bss_size; unsigned long init_code_size, init_data_size; physpages = get_num_physpages(); codesize = _etext - _stext; datasize = _edata - _sdata; rosize = __end_rodata - __start_rodata; bss_size = __bss_stop - __bss_start; init_data_size = __init_end - __init_begin; init_code_size = _einittext - _sinittext; /* * Detect special cases and adjust section sizes accordingly: * 1) .init.* may be embedded into .data sections * 2) .init.text.* may be out of [__init_begin, __init_end], * please refer to arch/tile/kernel/vmlinux.lds.S. * 3) .rodata.* may be embedded into .text or .data sections. */ #define adj_init_size(start, end, size, pos, adj) \ do { \ if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ size -= adj; \ } while (0) adj_init_size(__init_begin, __init_end, init_data_size, _sinittext, init_code_size); adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); #undef adj_init_size pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" #ifdef CONFIG_HIGHMEM ", %luK highmem" #endif ")\n", K(nr_free_pages()), K(physpages), codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, K(physpages - totalram_pages() - totalcma_pages), K(totalcma_pages) #ifdef CONFIG_HIGHMEM , K(totalhigh_pages()) #endif ); } /* * Set up kernel memory allocators */ void __init mm_core_init(void) { /* Initializations relying on SMP setup */ build_all_zonelists(NULL); page_alloc_init_cpuhp(); /* * page_ext requires contiguous pages, * bigger than MAX_ORDER unless SPARSEMEM. */ page_ext_init_flatmem(); mem_debugging_and_hardening_init(); kfence_alloc_pool_and_metadata(); report_meminit(); kmsan_init_shadow(); stack_depot_early_init(); mem_init(); mem_init_print_info(); kmem_cache_init(); /* * page_owner must be initialized after buddy is ready, and also after * slab is ready so that stack_depot_init() works properly */ page_ext_init_flatmem_late(); kmemleak_init(); ptlock_cache_init(); pgtable_cache_init(); debug_objects_mem_init(); vmalloc_init(); /* If no deferred init page_ext now, as vmap is fully initialized */ if (!deferred_struct_pages) page_ext_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ pti_init(); kmsan_init_runtime(); mm_cache_init(); }
linux-master
mm/mm_init.c
// SPDX-License-Identifier: GPL-2.0-only /* * Simple NUMA memory policy for the Linux kernel. * * Copyright 2003,2004 Andi Kleen, SuSE Labs. * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc. * * NUMA policy allows the user to give hints in which node(s) memory should * be allocated. * * Support four policies per VMA and per process: * * The VMA policy has priority over the process policy for a page fault. * * interleave Allocate memory interleaved over a set of nodes, * with normal fallback if it fails. * For VMA based allocations this interleaves based on the * offset into the backing object or offset into the mapping * for anonymous memory. For process policy an process counter * is used. * * bind Only allocate memory on a specific set of nodes, * no fallback. * FIXME: memory is allocated starting with the first node * to the last. It would be better if bind would truly restrict * the allocation to memory nodes instead * * preferred Try a specific node first before normal fallback. * As a special case NUMA_NO_NODE here means do the allocation * on the local CPU. This is normally identical to default, * but useful to set in a VMA when you have a non default * process policy. * * preferred many Try a set of nodes first before normal fallback. This is * similar to preferred without the special case. * * default Allocate on the local node first, or when on a VMA * use the process policy. This is what Linux always did * in a NUMA aware kernel and still does by, ahem, default. * * The process policy is applied for most non interrupt memory allocations * in that process' context. Interrupts ignore the policies and always * try to allocate on the local CPU. The VMA policy is only applied for memory * allocations for a VMA in the VM. * * Currently there are a few corner cases in swapping where the policy * is not applied, but the majority should be handled. When process policy * is used it is not remembered over swap outs/swap ins. * * Only the highest zone in the zone hierarchy gets policied. Allocations * requesting a lower zone just use default policy. This implies that * on systems with highmem kernel lowmem allocation don't get policied. * Same with GFP_DMA allocations. * * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between * all users and remembered even when nobody has memory mapped. */ /* Notebook: fix mmap readahead to honour policy and enable policy for any page cache object statistics for bigpages global policy for page cache? currently it uses process policy. Requires first item above. handle mremap for shared memory (currently ignored for the policy) grows down? make bind policy root only? It can trigger oom much faster and the kernel is not always grateful with that. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mempolicy.h> #include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task.h> #include <linux/nodemask.h> #include <linux/cpuset.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/nsproxy.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/ptrace.h> #include <linux/swap.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/migrate.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/ctype.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/printk.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <linux/uaccess.h> #include "internal.h" /* Internal flags */ #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; /* Highest zone. An specific allocation for a zone below that is not policied. */ enum zone_type policy_zone = 0; /* * run-time system-wide default policy => local allocation */ static struct mempolicy default_policy = { .refcnt = ATOMIC_INIT(1), /* never free it */ .mode = MPOL_LOCAL, }; static struct mempolicy preferred_node_policy[MAX_NUMNODES]; /** * numa_map_to_online_node - Find closest online node * @node: Node id to start the search * * Lookup the next closest node by distance if @nid is not online. * * Return: this @node if it is online, otherwise the closest node by distance */ int numa_map_to_online_node(int node) { int min_dist = INT_MAX, dist, n, min_node; if (node == NUMA_NO_NODE || node_online(node)) return node; min_node = node; for_each_online_node(n) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; min_node = n; } } return min_node; } EXPORT_SYMBOL_GPL(numa_map_to_online_node); struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; if (pol) return pol; node = numa_node_id(); if (node != NUMA_NO_NODE) { pol = &preferred_node_policy[node]; /* preferred_node_policy is not initialised early in boot */ if (pol->mode) return pol; } return &default_policy; } static const struct mempolicy_operations { int (*create)(struct mempolicy *pol, const nodemask_t *nodes); void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); } mpol_ops[MPOL_MAX]; static inline int mpol_store_user_nodemask(const struct mempolicy *pol) { return pol->flags & MPOL_MODE_FLAGS; } static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig, const nodemask_t *rel) { nodemask_t tmp; nodes_fold(tmp, *orig, nodes_weight(*rel)); nodes_onto(*ret, tmp, *rel); } static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; pol->nodes = *nodes; return 0; } static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; nodes_clear(pol->nodes); node_set(first_node(*nodes), pol->nodes); return 0; } /* * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if * any, for the new policy. mpol_new() has already validated the nodes * parameter with respect to the policy mode and flags. * * Must be called holding task's alloc_lock to protect task's mems_allowed * and mempolicy. May also be called holding the mmap_lock for write. */ static int mpol_set_nodemask(struct mempolicy *pol, const nodemask_t *nodes, struct nodemask_scratch *nsc) { int ret; /* * Default (pol==NULL) resp. local memory policies are not a * subject of any remapping. They also do not need any special * constructor. */ if (!pol || pol->mode == MPOL_LOCAL) return 0; /* Check N_MEMORY */ nodes_and(nsc->mask1, cpuset_current_mems_allowed, node_states[N_MEMORY]); VM_BUG_ON(!nodes); if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1); else nodes_and(nsc->mask2, *nodes, nsc->mask1); if (mpol_store_user_nodemask(pol)) pol->w.user_nodemask = *nodes; else pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed; ret = mpol_ops[pol->mode].create(pol, &nsc->mask2); return ret; } /* * This function just creates a new policy, does some check and simple * initialization. You must invoke mpol_set_nodemask() to set nodes. */ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *policy; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE); if (mode == MPOL_DEFAULT) { if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); return NULL; } VM_BUG_ON(!nodes); /* * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). * All other modes require a valid pointer to a non-empty nodemask. */ if (mode == MPOL_PREFERRED) { if (nodes_empty(*nodes)) { if (((flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES))) return ERR_PTR(-EINVAL); mode = MPOL_LOCAL; } } else if (mode == MPOL_LOCAL) { if (!nodes_empty(*nodes) || (flags & MPOL_F_STATIC_NODES) || (flags & MPOL_F_RELATIVE_NODES)) return ERR_PTR(-EINVAL); } else if (nodes_empty(*nodes)) return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->mode = mode; policy->flags = flags; policy->home_node = NUMA_NO_NODE; return policy; } /* Slow path of a mpol destructor. */ void __mpol_put(struct mempolicy *p) { if (!atomic_dec_and_test(&p->refcnt)) return; kmem_cache_free(policy_cache, p); } static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes) { } static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) { nodemask_t tmp; if (pol->flags & MPOL_F_STATIC_NODES) nodes_and(tmp, pol->w.user_nodemask, *nodes); else if (pol->flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); else { nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed, *nodes); pol->w.cpuset_mems_allowed = *nodes; } if (nodes_empty(tmp)) tmp = *nodes; pol->nodes = tmp; } static void mpol_rebind_preferred(struct mempolicy *pol, const nodemask_t *nodes) { pol->w.cpuset_mems_allowed = *nodes; } /* * mpol_rebind_policy - Migrate a policy to a different set of nodes * * Per-vma policies are protected by mmap_lock. Allocations using per-task * policies are protected by task->mems_allowed_seq to prevent a premature * OOM/allocation failure due to parallel nodemask modification. */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { if (!pol || pol->mode == MPOL_LOCAL) return; if (!mpol_store_user_nodemask(pol) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) return; mpol_ops[pol->mode].rebind(pol, newmask); } /* * Wrapper for mpol_rebind_policy() that just requires task * pointer, and updates task mempolicy. * * Called with task's alloc_lock held. */ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) { mpol_rebind_policy(tsk->mempolicy, new); } /* * Rebind each vma in mm to new nodemask. * * Call holding a reference to mm. Takes mm->mmap_lock during call. */ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); for_each_vma(vmi, vma) { vma_start_write(vma); mpol_rebind_policy(vma->vm_policy, new); } mmap_write_unlock(mm); } static const struct mempolicy_operations mpol_ops[MPOL_MAX] = { [MPOL_DEFAULT] = { .rebind = mpol_rebind_default, }, [MPOL_INTERLEAVE] = { .create = mpol_new_nodemask, .rebind = mpol_rebind_nodemask, }, [MPOL_PREFERRED] = { .create = mpol_new_preferred, .rebind = mpol_rebind_preferred, }, [MPOL_BIND] = { .create = mpol_new_nodemask, .rebind = mpol_rebind_nodemask, }, [MPOL_LOCAL] = { .rebind = mpol_rebind_default, }, [MPOL_PREFERRED_MANY] = { .create = mpol_new_nodemask, .rebind = mpol_rebind_preferred, }, }; static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, unsigned long flags); struct queue_pages { struct list_head *pagelist; unsigned long flags; nodemask_t *nmask; unsigned long start; unsigned long end; struct vm_area_struct *first; }; /* * Check if the folio's nid is in qp->nmask. * * If MPOL_MF_INVERT is set in qp->flags, check if the nid is * in the invert of qp->nmask. */ static inline bool queue_folio_required(struct folio *folio, struct queue_pages *qp) { int nid = folio_nid(folio); unsigned long flags = qp->flags; return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); } /* * queue_folios_pmd() has three possible return values: * 0 - folios are placed on the right node or queued successfully, or * special page is met, i.e. huge zero page. * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an * existing folio was already on a node that does not follow the * policy. */ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, unsigned long end, struct mm_walk *walk) __releases(ptl) { int ret = 0; struct folio *folio; struct queue_pages *qp = walk->private; unsigned long flags; if (unlikely(is_pmd_migration_entry(*pmd))) { ret = -EIO; goto unlock; } folio = pfn_folio(pmd_pfn(*pmd)); if (is_huge_zero_page(&folio->page)) { walk->action = ACTION_CONTINUE; goto unlock; } if (!queue_folio_required(folio, qp)) goto unlock; flags = qp->flags; /* go to folio migration */ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (!vma_migratable(walk->vma) || migrate_folio_add(folio, qp->pagelist, flags)) { ret = 1; goto unlock; } } else ret = -EIO; unlock: spin_unlock(ptl); return ret; } /* * Scan through pages checking if pages follow certain conditions, * and move them to the pagelist if they do. * * queue_folios_pte_range() has three possible return values: * 0 - folios are placed on the right node or queued successfully, or * special page is met, i.e. zero page. * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already * on a node that does not follow the policy. */ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; struct folio *folio; struct queue_pages *qp = walk->private; unsigned long flags = qp->flags; bool has_unmovable = false; pte_t *pte, *mapped_pte; pte_t ptent; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) return queue_folios_pmd(pmd, ptl, addr, end, walk); mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte) { walk->action = ACTION_AGAIN; return 0; } for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* * vm_normal_folio() filters out zero pages, but there might * still be reserved folios to skip, perhaps in a VDSO. */ if (folio_test_reserved(folio)) continue; if (!queue_folio_required(folio, qp)) continue; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { /* MPOL_MF_STRICT must be specified if we get here */ if (!vma_migratable(vma)) { has_unmovable = true; break; } /* * Do not abort immediately since there may be * temporary off LRU pages in the range. Still * need migrate other LRU pages. */ if (migrate_folio_add(folio, qp->pagelist, flags)) has_unmovable = true; } else break; } pte_unmap_unlock(mapped_pte, ptl); cond_resched(); if (has_unmovable) return 1; return addr != end ? -EIO : 0; } static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { int ret = 0; #ifdef CONFIG_HUGETLB_PAGE struct queue_pages *qp = walk->private; unsigned long flags = (qp->flags & MPOL_MF_VALID); struct folio *folio; spinlock_t *ptl; pte_t entry; ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); entry = huge_ptep_get(pte); if (!pte_present(entry)) goto unlock; folio = pfn_folio(pte_pfn(entry)); if (!queue_folio_required(folio, qp)) goto unlock; if (flags == MPOL_MF_STRICT) { /* * STRICT alone means only detecting misplaced folio and no * need to further check other vma. */ ret = -EIO; goto unlock; } if (!vma_migratable(walk->vma)) { /* * Must be STRICT with MOVE*, otherwise .test_walk() have * stopped walking current vma. * Detecting misplaced folio but allow migrating folios which * have been queued. */ ret = 1; goto unlock; } /* * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it * is shared it is likely not worth migrating. * * To check if the folio is shared, ideally we want to make sure * every page is mapped to the same process. Doing that is very * expensive, so check the estimated mapcount of the folio instead. */ if (flags & (MPOL_MF_MOVE_ALL) || (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte))) { if (!isolate_hugetlb(folio, qp->pagelist) && (flags & MPOL_MF_STRICT)) /* * Failed to isolate folio but allow migrating pages * which have been queued. */ ret = 1; } unlock: spin_unlock(ptl); #else BUG(); #endif return ret; } #ifdef CONFIG_NUMA_BALANCING /* * This is used to mark a range of virtual addresses to be inaccessible. * These are later cleared by a NUMA hinting fault. Depending on these * faults, pages may be migrated for better NUMA placement. * * This is assuming that NUMA faults are handled using PROT_NONE. If * an architecture makes a different choice, it will need further * changes to the core. */ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { struct mmu_gather tlb; long nr_updated; tlb_gather_mmu(&tlb, vma->vm_mm); nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); if (nr_updated > 0) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); tlb_finish_mmu(&tlb); return nr_updated; } #else static unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ static int queue_pages_test_walk(unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *next, *vma = walk->vma; struct queue_pages *qp = walk->private; unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; /* range check first */ VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma); if (!qp->first) { qp->first = vma; if (!(flags & MPOL_MF_DISCONTIG_OK) && (qp->start < vma->vm_start)) /* hole at head side of range */ return -EFAULT; } next = find_vma(vma->vm_mm, vma->vm_end); if (!(flags & MPOL_MF_DISCONTIG_OK) && ((vma->vm_end < qp->end) && (!next || vma->vm_end < next->vm_start))) /* hole at middle or tail of range */ return -EFAULT; /* * Need check MPOL_MF_STRICT to return -EIO if possible * regardless of vma_migratable */ if (!vma_migratable(vma) && !(flags & MPOL_MF_STRICT)) return 1; if (endvma > end) endvma = end; if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) && !(vma->vm_flags & VM_MIXEDMAP)) change_prot_numa(vma, start, endvma); return 1; } /* queue pages from current vma */ if (flags & MPOL_MF_VALID) return 0; return 1; } static const struct mm_walk_ops queue_pages_walk_ops = { .hugetlb_entry = queue_folios_hugetlb, .pmd_entry = queue_folios_pte_range, .test_walk = queue_pages_test_walk, .walk_lock = PGWALK_RDLOCK, }; static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = { .hugetlb_entry = queue_folios_hugetlb, .pmd_entry = queue_folios_pte_range, .test_walk = queue_pages_test_walk, .walk_lock = PGWALK_WRLOCK, }; /* * Walk through page tables and collect pages to be migrated. * * If pages found in a given range are on a set of nodes (determined by * @nodes and @flags,) it's isolated and queued to the pagelist which is * passed via @private. * * queue_pages_range() has three possible return values: * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were * specified. * 0 - queue pages successfully or no misplaced page. * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or * memory range specified by nodemask and maxnode points outside * your accessible address space (-EFAULT) */ static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, nodemask_t *nodes, unsigned long flags, struct list_head *pagelist, bool lock_vma) { int err; struct queue_pages qp = { .pagelist = pagelist, .flags = flags, .nmask = nodes, .start = start, .end = end, .first = NULL, }; const struct mm_walk_ops *ops = lock_vma ? &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; err = walk_page_range(mm, start, end, ops, &qp); if (!qp.first) /* whole range in hole */ err = -EFAULT; return err; } /* * Apply policy to a single VMA * This must be called with the mmap_lock held for writing. */ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) { int err; struct mempolicy *old; struct mempolicy *new; vma_assert_write_locked(vma); pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, vma->vm_ops ? vma->vm_ops->set_policy : NULL); new = mpol_dup(pol); if (IS_ERR(new)) return PTR_ERR(new); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) goto err_out; } old = vma->vm_policy; vma->vm_policy = new; /* protected by mmap_lock */ mpol_put(old); return 0; err_out: mpol_put(new); return err; } /* Split or merge the VMA (if required) and apply the new policy */ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, struct mempolicy *new_pol) { struct vm_area_struct *merged; unsigned long vmstart, vmend; pgoff_t pgoff; int err; vmend = min(end, vma->vm_end); if (start > vma->vm_start) { *prev = vma; vmstart = start; } else { vmstart = vma->vm_start; } if (mpol_equal(vma_policy(vma), new_pol)) { *prev = vma; return 0; } pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (merged) { *prev = merged; return vma_replace_policy(merged, new_pol); } if (vma->vm_start != vmstart) { err = split_vma(vmi, vma, vmstart, 1); if (err) return err; } if (vma->vm_end != vmend) { err = split_vma(vmi, vma, vmend, 0); if (err) return err; } *prev = vma; return vma_replace_policy(vma, new_pol); } /* Set the process memory policy */ static long do_set_mempolicy(unsigned short mode, unsigned short flags, nodemask_t *nodes) { struct mempolicy *new, *old; NODEMASK_SCRATCH(scratch); int ret; if (!scratch) return -ENOMEM; new = mpol_new(mode, flags, nodes); if (IS_ERR(new)) { ret = PTR_ERR(new); goto out; } task_lock(current); ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { task_unlock(current); mpol_put(new); goto out; } old = current->mempolicy; current->mempolicy = new; if (new && new->mode == MPOL_INTERLEAVE) current->il_prev = MAX_NUMNODES-1; task_unlock(current); mpol_put(old); ret = 0; out: NODEMASK_SCRATCH_FREE(scratch); return ret; } /* * Return nodemask for policy for get_mempolicy() query * * Called with task's alloc_lock held */ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes) { nodes_clear(*nodes); if (p == &default_policy) return; switch (p->mode) { case MPOL_BIND: case MPOL_INTERLEAVE: case MPOL_PREFERRED: case MPOL_PREFERRED_MANY: *nodes = p->nodes; break; case MPOL_LOCAL: /* return empty node mask for local allocation */ break; default: BUG(); } } static int lookup_node(struct mm_struct *mm, unsigned long addr) { struct page *p = NULL; int ret; ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p); if (ret > 0) { ret = page_to_nid(p); put_page(p); } return ret; } /* Retrieve NUMA policy */ static long do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) { int err; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED)) return -EINVAL; if (flags & MPOL_F_MEMS_ALLOWED) { if (flags & (MPOL_F_NODE|MPOL_F_ADDR)) return -EINVAL; *policy = 0; /* just so it's initialized */ task_lock(current); *nmask = cpuset_current_mems_allowed; task_unlock(current); return 0; } if (flags & MPOL_F_ADDR) { /* * Do NOT fall back to task policy if the * vma/shared policy at addr is NULL. We * want to return MPOL_DEFAULT in this case. */ mmap_read_lock(mm); vma = vma_lookup(mm, addr); if (!vma) { mmap_read_unlock(mm); return -EFAULT; } if (vma->vm_ops && vma->vm_ops->get_policy) pol = vma->vm_ops->get_policy(vma, addr); else pol = vma->vm_policy; } else if (addr) return -EINVAL; if (!pol) pol = &default_policy; /* indicates default behavior */ if (flags & MPOL_F_NODE) { if (flags & MPOL_F_ADDR) { /* * Take a refcount on the mpol, because we are about to * drop the mmap_lock, after which only "pol" remains * valid, "vma" is stale. */ pol_refcount = pol; vma = NULL; mpol_get(pol); mmap_read_unlock(mm); err = lookup_node(mm, addr); if (err < 0) goto out; *policy = err; } else if (pol == current->mempolicy && pol->mode == MPOL_INTERLEAVE) { *policy = next_node_in(current->il_prev, pol->nodes); } else { err = -EINVAL; goto out; } } else { *policy = pol == &default_policy ? MPOL_DEFAULT : pol->mode; /* * Internal mempolicy flags must be masked off before exposing * the policy to userspace. */ *policy |= (pol->flags & MPOL_MODE_FLAGS); } err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { *nmask = pol->w.user_nodemask; } else { task_lock(current); get_policy_nodemask(pol, nmask); task_unlock(current); } } out: mpol_cond_put(pol); if (vma) mmap_read_unlock(mm); if (pol_refcount) mpol_put(pol_refcount); return err; } #ifdef CONFIG_MIGRATION static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, unsigned long flags) { /* * We try to migrate only unshared folios. If it is shared it * is likely not worth migrating. * * To check if the folio is shared, ideally we want to make sure * every page is mapped to the same process. Doing that is very * expensive, so check the estimated mapcount of the folio instead. */ if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } else if (flags & MPOL_MF_STRICT) { /* * Non-movable folio may reach here. And, there may be * temporary off LRU folios or non-LRU movable folios. * Treat them as unmovable folios since they can't be * isolated, so they can't be moved at the moment. It * should return -EIO for this case too. */ return -EIO; } } return 0; } /* * Migrate pages from one node to a target node. * Returns error or the number of pages not migrated. */ static int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags) { nodemask_t nmask; struct vm_area_struct *vma; LIST_HEAD(pagelist); int err = 0; struct migration_target_control mtc = { .nid = dest, .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, }; nodes_clear(nmask); node_set(source, nmask); /* * This does not "check" the range but isolates all pages that * need migration. Between passing in the full user address * space range and MPOL_MF_DISCONTIG_OK, this call can not fail. */ vma = find_vma(mm, 0); VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, flags | MPOL_MF_DISCONTIG_OK, &pagelist, false); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, alloc_migration_target, NULL, (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL); if (err) putback_movable_pages(&pagelist); } return err; } /* * Move pages between the two nodesets so as to preserve the physical * layout as much as possible. * * Returns the number of page that could not be moved. */ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { int busy = 0; int err = 0; nodemask_t tmp; lru_cache_disable(); mmap_read_lock(mm); /* * Find a 'source' bit set in 'tmp' whose corresponding 'dest' * bit in 'to' is not also set in 'tmp'. Clear the found 'source' * bit in 'tmp', and return that <source, dest> pair for migration. * The pair of nodemasks 'to' and 'from' define the map. * * If no pair of bits is found that way, fallback to picking some * pair of 'source' and 'dest' bits that are not the same. If the * 'source' and 'dest' bits are the same, this represents a node * that will be migrating to itself, so no pages need move. * * If no bits are left in 'tmp', or if all remaining bits left * in 'tmp' correspond to the same bit in 'to', return false * (nothing left to migrate). * * This lets us pick a pair of nodes to migrate between, such that * if possible the dest node is not already occupied by some other * source node, minimizing the risk of overloading the memory on a * node that would happen if we migrated incoming memory to a node * before migrating outgoing memory source that same node. * * A single scan of tmp is sufficient. As we go, we remember the * most recent <s, d> pair that moved (s != d). If we find a pair * that not only moved, but what's better, moved to an empty slot * (d is not set in tmp), then we break out then, with that pair. * Otherwise when we finish scanning from_tmp, we at least have the * most recent <s, d> pair that moved. If we get all the way through * the scan of tmp without finding any node that moved, much less * moved to an empty node, then there is nothing left worth migrating. */ tmp = *from; while (!nodes_empty(tmp)) { int s, d; int source = NUMA_NO_NODE; int dest = 0; for_each_node_mask(s, tmp) { /* * do_migrate_pages() tries to maintain the relative * node relationship of the pages established between * threads and memory areas. * * However if the number of source nodes is not equal to * the number of destination nodes we can not preserve * this node relative relationship. In that case, skip * copying memory from a node that is in the destination * mask. * * Example: [2,3,4] -> [3,4,5] moves everything. * [0-7] - > [3,4,5] moves only 0,1,2,6,7. */ if ((nodes_weight(*from) != nodes_weight(*to)) && (node_isset(s, *to))) continue; d = node_remap(s, *from, *to); if (s == d) continue; source = s; /* Node moved. Memorize */ dest = d; /* dest not in remaining from nodes? */ if (!node_isset(dest, tmp)) break; } if (source == NUMA_NO_NODE) break; node_clear(source, tmp); err = migrate_to_node(mm, source, dest, flags); if (err > 0) busy += err; if (err < 0) break; } mmap_read_unlock(mm); lru_cache_enable(); if (err < 0) return err; return busy; } /* * Allocate a new page for page migration based on vma policy. * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ static struct folio *new_folio(struct folio *src, unsigned long start) { struct vm_area_struct *vma; unsigned long address; VMA_ITERATOR(vmi, current->mm, start); gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; for_each_vma(vmi, vma) { address = page_address_in_vma(&src->page, vma); if (address != -EFAULT) break; } if (folio_test_hugetlb(src)) { return alloc_hugetlb_folio_vma(folio_hstate(src), vma, address); } if (folio_test_large(src)) gfp = GFP_TRANSHUGE; /* * if !vma, vma_alloc_folio() will use task or system default policy */ return vma_alloc_folio(gfp, folio_order(src), vma, address, folio_test_large(src)); } #else static int migrate_folio_add(struct folio *folio, struct list_head *foliolist, unsigned long flags) { return -EIO; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return -ENOSYS; } static struct folio *new_folio(struct folio *src, unsigned long start) { return NULL; } #endif static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vma_iterator vmi; struct mempolicy *new; unsigned long end; int err; int ret; LIST_HEAD(pagelist); if (flags & ~(unsigned long)MPOL_MF_VALID) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; if (start & ~PAGE_MASK) return -EINVAL; if (mode == MPOL_DEFAULT) flags &= ~MPOL_MF_STRICT; len = PAGE_ALIGN(len); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; new = mpol_new(mode, mode_flags, nmask); if (IS_ERR(new)) return PTR_ERR(new); if (flags & MPOL_MF_LAZY) new->flags |= MPOL_F_MOF; /* * If we are using the default policy then operation * on discontinuous address spaces is okay after all */ if (!new) flags |= MPOL_MF_DISCONTIG_OK; pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n", start, start + len, mode, mode_flags, nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { lru_cache_disable(); } { NODEMASK_SCRATCH(scratch); if (scratch) { mmap_write_lock(mm); err = mpol_set_nodemask(new, nmask, scratch); if (err) mmap_write_unlock(mm); } else err = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); } if (err) goto mpol_out; /* * Lock the VMAs before scanning for pages to migrate, to ensure we don't * miss a concurrently inserted page. */ ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist, true); if (ret < 0) { err = ret; goto up_out; } vma_iter_init(&vmi, mm, start); prev = vma_prev(&vmi); for_each_vma_range(vmi, vma, end) { err = mbind_range(&vmi, vma, &prev, start, end, new); if (err) break; } if (!err) { int nr_failed = 0; if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); nr_failed = migrate_pages(&pagelist, new_folio, NULL, start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); if (nr_failed) putback_movable_pages(&pagelist); } if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) err = -EIO; } else { up_out: if (!list_empty(&pagelist)) putback_movable_pages(&pagelist); } mmap_write_unlock(mm); mpol_out: mpol_put(new); if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) lru_cache_enable(); return err; } /* * User space interface with variable sized bitmaps for nodelists. */ static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask, unsigned long maxnode) { unsigned long nlongs = BITS_TO_LONGS(maxnode); int ret; if (in_compat_syscall()) ret = compat_get_bitmap(mask, (const compat_ulong_t __user *)nmask, maxnode); else ret = copy_from_user(mask, nmask, nlongs * sizeof(unsigned long)); if (ret) return -EFAULT; if (maxnode % BITS_PER_LONG) mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1; return 0; } /* Copy a node mask from user space. */ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, unsigned long maxnode) { --maxnode; nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; /* * When the user specified more nodes than supported just check * if the non supported part is all zero, one word at a time, * starting at the end. */ while (maxnode > MAX_NUMNODES) { unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG); unsigned long t; if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) return -EFAULT; if (maxnode - bits >= MAX_NUMNODES) { maxnode -= bits; } else { maxnode = MAX_NUMNODES; t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); } if (t) return -EINVAL; } return get_bitmap(nodes_addr(*nodes), nmask, maxnode); } /* Copy a kernel node mask to user space */ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); bool compat = in_compat_syscall(); if (compat) nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); if (copy > nbytes) { if (copy > PAGE_SIZE) return -EINVAL; if (clear_user((char __user *)mask + nbytes, copy - nbytes)) return -EFAULT; copy = nbytes; maxnode = nr_node_ids; } if (compat) return compat_put_bitmap((compat_ulong_t __user *)mask, nodes_addr(*nodes), maxnode); return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0; } /* Basic parameter sanity check used by both mbind() and set_mempolicy() */ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) { *flags = *mode & MPOL_MODE_FLAGS; *mode &= ~MPOL_MODE_FLAGS; if ((unsigned int)(*mode) >= MPOL_MAX) return -EINVAL; if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; if (*flags & MPOL_F_NUMA_BALANCING) { if (*mode != MPOL_BIND) return -EINVAL; *flags |= (MPOL_F_MOF | MPOL_F_MORON); } return 0; } static long kernel_mbind(unsigned long start, unsigned long len, unsigned long mode, const unsigned long __user *nmask, unsigned long maxnode, unsigned int flags) { unsigned short mode_flags; nodemask_t nodes; int lmode = mode; int err; start = untagged_addr(start); err = sanitize_mpol_flags(&lmode, &mode_flags); if (err) return err; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_mbind(start, len, lmode, mode_flags, &nodes, flags); } SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len, unsigned long, home_node, unsigned long, flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct mempolicy *new, *old; unsigned long end; int err = -ENOENT; VMA_ITERATOR(vmi, mm, start); start = untagged_addr(start); if (start & ~PAGE_MASK) return -EINVAL; /* * flags is used for future extension if any. */ if (flags != 0) return -EINVAL; /* * Check home_node is online to avoid accessing uninitialized * NODE_DATA. */ if (home_node >= MAX_NUMNODES || !node_online(home_node)) return -EINVAL; len = PAGE_ALIGN(len); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; mmap_write_lock(mm); prev = vma_prev(&vmi); for_each_vma_range(vmi, vma, end) { /* * If any vma in the range got policy other than MPOL_BIND * or MPOL_PREFERRED_MANY we return error. We don't reset * the home node for vmas we already updated before. */ old = vma_policy(vma); if (!old) continue; if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) { err = -EOPNOTSUPP; break; } new = mpol_dup(old); if (IS_ERR(new)) { err = PTR_ERR(new); break; } vma_start_write(vma); new->home_node = home_node; err = mbind_range(&vmi, vma, &prev, start, end, new); mpol_put(new); if (err) break; } mmap_write_unlock(mm); return err; } SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len, unsigned long, mode, const unsigned long __user *, nmask, unsigned long, maxnode, unsigned int, flags) { return kernel_mbind(start, len, mode, nmask, maxnode, flags); } /* Set the process memory policy */ static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask, unsigned long maxnode) { unsigned short mode_flags; nodemask_t nodes; int lmode = mode; int err; err = sanitize_mpol_flags(&lmode, &mode_flags); if (err) return err; err = get_nodes(&nodes, nmask, maxnode); if (err) return err; return do_set_mempolicy(lmode, mode_flags, &nodes); } SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask, unsigned long, maxnode) { return kernel_set_mempolicy(mode, nmask, maxnode); } static int kernel_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { struct mm_struct *mm = NULL; struct task_struct *task; nodemask_t task_nodes; int err; nodemask_t *old; nodemask_t *new; NODEMASK_SCRATCH(scratch); if (!scratch) return -ENOMEM; old = &scratch->mask1; new = &scratch->mask2; err = get_nodes(old, old_nodes, maxnode); if (err) goto out; err = get_nodes(new, new_nodes, maxnode); if (err) goto out; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); err = -ESRCH; goto out; } get_task_struct(task); err = -EINVAL; /* * Check if this process has the right to modify the specified process. * Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out_put; } rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out_put; } task_nodes = cpuset_mems_allowed(current); nodes_and(*new, *new, task_nodes); if (nodes_empty(*new)) goto out_put; err = security_task_movememory(task); if (err) goto out_put; mm = get_task_mm(task); put_task_struct(task); if (!mm) { err = -EINVAL; goto out; } err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: NODEMASK_SCRATCH_FREE(scratch); return err; out_put: put_task_struct(task); goto out; } SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, const unsigned long __user *, old_nodes, const unsigned long __user *, new_nodes) { return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes); } /* Retrieve NUMA policy */ static int kernel_get_mempolicy(int __user *policy, unsigned long __user *nmask, unsigned long maxnode, unsigned long addr, unsigned long flags) { int err; int pval; nodemask_t nodes; if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; addr = untagged_addr(addr); err = do_get_mempolicy(&pval, &nodes, addr, flags); if (err) return err; if (policy && put_user(pval, policy)) return -EFAULT; if (nmask) err = copy_nodes_to_user(nmask, maxnode, &nodes); return err; } SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long __user *, nmask, unsigned long, maxnode, unsigned long, addr, unsigned long, flags) { return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags); } bool vma_migratable(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_IO | VM_PFNMAP)) return false; /* * DAX device mappings require predictable access latency, so avoid * incurring periodic faults. */ if (vma_is_dax(vma)) return false; if (is_vm_hugetlb_page(vma) && !hugepage_migration_supported(hstate_vma(vma))) return false; /* * Migration allocates pages in the highest zone. If we cannot * do so then migration (at least from node to node) is not * possible. */ if (vma->vm_file && gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) < policy_zone) return false; return true; } struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = NULL; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) { pol = vma->vm_ops->get_policy(vma, addr); } else if (vma->vm_policy) { pol = vma->vm_policy; /* * shmem_alloc_page() passes MPOL_F_SHARED policy with * a pseudo vma whose vma->vm_ops=NULL. Take a reference * count on these policies which will be dropped by * mpol_cond_put() later */ if (mpol_needs_cond_ref(pol)) mpol_get(pol); } } return pol; } /* * get_vma_policy(@vma, @addr) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. * Falls back to current->mempolicy or system default policy, as necessary. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); if (!pol) pol = get_task_policy(current); return pol; } bool vma_policy_mof(struct vm_area_struct *vma) { struct mempolicy *pol; if (vma->vm_ops && vma->vm_ops->get_policy) { bool ret = false; pol = vma->vm_ops->get_policy(vma, vma->vm_start); if (pol && (pol->flags & MPOL_F_MOF)) ret = true; mpol_cond_put(pol); return ret; } pol = vma->vm_policy; if (!pol) pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; } bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone) { enum zone_type dynamic_policy_zone = policy_zone; BUG_ON(dynamic_policy_zone == ZONE_MOVABLE); /* * if policy->nodes has movable memory only, * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. * * policy->nodes is intersect with node_states[N_MEMORY]. * so if the following test fails, it implies * policy->nodes has movable memory only. */ if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) dynamic_policy_zone = ZONE_MOVABLE; return zone >= dynamic_policy_zone; } /* * Return a nodemask representing a mempolicy for filtering nodes for * page allocation */ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) { int mode = policy->mode; /* Lower zones don't get a nodemask applied for MPOL_BIND */ if (unlikely(mode == MPOL_BIND) && apply_policy_zone(policy, gfp_zone(gfp)) && cpuset_nodemask_valid_mems_allowed(&policy->nodes)) return &policy->nodes; if (mode == MPOL_PREFERRED_MANY) return &policy->nodes; return NULL; } /* * Return the preferred node id for 'prefer' mempolicy, and return * the given id for all other policies. * * policy_node() is always coupled with policy_nodemask(), which * secures the nodemask limit for 'bind' and 'prefer-many' policy. */ static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd) { if (policy->mode == MPOL_PREFERRED) { nd = first_node(policy->nodes); } else { /* * __GFP_THISNODE shouldn't even be used with the bind policy * because we might easily break the expectation to stay on the * requested node and not break the policy. */ WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE)); } if ((policy->mode == MPOL_BIND || policy->mode == MPOL_PREFERRED_MANY) && policy->home_node != NUMA_NO_NODE) return policy->home_node; return nd; } /* Do dynamic interleaving for a process */ static unsigned interleave_nodes(struct mempolicy *policy) { unsigned next; struct task_struct *me = current; next = next_node_in(me->il_prev, policy->nodes); if (next < MAX_NUMNODES) me->il_prev = next; return next; } /* * Depending on the memory policy provide a node from which to allocate the * next slab entry. */ unsigned int mempolicy_slab_node(void) { struct mempolicy *policy; int node = numa_mem_id(); if (!in_task()) return node; policy = current->mempolicy; if (!policy) return node; switch (policy->mode) { case MPOL_PREFERRED: return first_node(policy->nodes); case MPOL_INTERLEAVE: return interleave_nodes(policy); case MPOL_BIND: case MPOL_PREFERRED_MANY: { struct zoneref *z; /* * Follow bind policy behavior and start allocation at the * first node. */ struct zonelist *zonelist; enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL); zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->nodes); return z->zone ? zone_to_nid(z->zone) : node; } case MPOL_LOCAL: return node; default: BUG(); } } /* * Do static interleaving for a VMA with known offset @n. Returns the n'th * node in pol->nodes (starting from n=0), wrapping around if n exceeds the * number of present nodes. */ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) { nodemask_t nodemask = pol->nodes; unsigned int target, nnodes; int i; int nid; /* * The barrier will stabilize the nodemask in a register or on * the stack so that it will stop changing under the code. * * Between first_node() and next_node(), pol->nodes could be changed * by other threads. So we put pol->nodes in a local stack. */ barrier(); nnodes = nodes_weight(nodemask); if (!nnodes) return numa_node_id(); target = (unsigned int)n % nnodes; nid = first_node(nodemask); for (i = 0; i < target; i++) nid = next_node(nid, nodemask); return nid; } /* Determine a node number for interleave */ static inline unsigned interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) { if (vma) { unsigned long off; /* * for small pages, there is no difference between * shift and PAGE_SHIFT, so the bit-shift is safe. * for huge pages, since vm_pgoff is in units of small * pages, we need to shift off the always 0 bits to get * a useful offset. */ BUG_ON(shift < PAGE_SHIFT); off = vma->vm_pgoff >> (shift - PAGE_SHIFT); off += (addr - vma->vm_start) >> shift; return offset_il_node(pol, off); } else return interleave_nodes(pol); } #ifdef CONFIG_HUGETLBFS /* * huge_node(@vma, @addr, @gfp_flags, @mpol) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup and interleave policy * @gfp_flags: for requested zone * @mpol: pointer to mempolicy pointer for reference counted mempolicy * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy * * Returns a nid suitable for a huge page allocation and a pointer * to the struct mempolicy for conditional unref after allocation. * If the effective policy is 'bind' or 'prefer-many', returns a pointer * to the mempolicy's @nodemask for filtering the zonelist. * * Must be protected by read_mems_allowed_begin() */ int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { int nid; int mode; *mpol = get_vma_policy(vma, addr); *nodemask = NULL; mode = (*mpol)->mode; if (unlikely(mode == MPOL_INTERLEAVE)) { nid = interleave_nid(*mpol, vma, addr, huge_page_shift(hstate_vma(vma))); } else { nid = policy_node(gfp_flags, *mpol, numa_node_id()); if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY) *nodemask = &(*mpol)->nodes; } return nid; } /* * init_nodemask_of_mempolicy * * If the current task's mempolicy is "default" [NULL], return 'false' * to indicate default policy. Otherwise, extract the policy nodemask * for 'bind' or 'interleave' policy into the argument nodemask, or * initialize the argument nodemask to contain the single node for * 'preferred' or 'local' policy and return 'true' to indicate presence * of non-default mempolicy. * * We don't bother with reference counting the mempolicy [mpol_get/put] * because the current task is examining it's own mempolicy and a task's * mempolicy is only ever changed by the task itself. * * N.B., it is the caller's responsibility to free a returned nodemask. */ bool init_nodemask_of_mempolicy(nodemask_t *mask) { struct mempolicy *mempolicy; if (!(mask && current->mempolicy)) return false; task_lock(current); mempolicy = current->mempolicy; switch (mempolicy->mode) { case MPOL_PREFERRED: case MPOL_PREFERRED_MANY: case MPOL_BIND: case MPOL_INTERLEAVE: *mask = mempolicy->nodes; break; case MPOL_LOCAL: init_nodemask_of_node(mask, numa_node_id()); break; default: BUG(); } task_unlock(current); return true; } #endif /* * mempolicy_in_oom_domain * * If tsk's mempolicy is "bind", check for intersection between mask and * the policy nodemask. Otherwise, return true for all other policies * including "interleave", as a tsk with "interleave" policy may have * memory allocated from all nodes in system. * * Takes task_lock(tsk) to prevent freeing of its mempolicy. */ bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask) { struct mempolicy *mempolicy; bool ret = true; if (!mask) return ret; task_lock(tsk); mempolicy = tsk->mempolicy; if (mempolicy && mempolicy->mode == MPOL_BIND) ret = nodes_intersects(mempolicy->nodes, *mask); task_unlock(tsk); return ret; } /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct page *page; page = __alloc_pages(gfp, order, nid, NULL); /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return page; if (page && page_to_nid(page) == nid) { preempt_disable(); __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT); preempt_enable(); } return page; } static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, int nid, struct mempolicy *pol) { struct page *page; gfp_t preferred_gfp; /* * This is a two pass approach. The first pass will only try the * preferred nodes but skip the direct reclaim and allow the * allocation to fail, while the second pass will try all the * nodes in system. */ preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes); if (!page) page = __alloc_pages(gfp, order, nid, NULL); return page; } /** * vma_alloc_folio - Allocate a folio for a VMA. * @gfp: GFP flags. * @order: Order of the folio. * @vma: Pointer to VMA or NULL if not available. * @addr: Virtual address of the allocation. Must be inside @vma. * @hugepage: For hugepages try only the preferred node if possible. * * Allocate a folio for a specific address in @vma, using the appropriate * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock * of the mm_struct of the VMA to prevent it from going away. Should be * used for all allocations for folios that will be mapped into user space. * * Return: The folio on success or NULL if allocation fails. */ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage) { struct mempolicy *pol; int node = numa_node_id(); struct folio *folio; int preferred_nid; nodemask_t *nmask; pol = get_vma_policy(vma, addr); if (pol->mode == MPOL_INTERLEAVE) { struct page *page; unsigned nid; nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); mpol_cond_put(pol); gfp |= __GFP_COMP; page = alloc_page_interleave(gfp, order, nid); folio = (struct folio *)page; if (folio && order > 1) folio_prep_large_rmappable(folio); goto out; } if (pol->mode == MPOL_PREFERRED_MANY) { struct page *page; node = policy_node(gfp, pol, node); gfp |= __GFP_COMP; page = alloc_pages_preferred_many(gfp, order, node, pol); mpol_cond_put(pol); folio = (struct folio *)page; if (folio && order > 1) folio_prep_large_rmappable(folio); goto out; } if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) { int hpage_node = node; /* * For hugepage allocation and non-interleave policy which * allows the current node (or other explicitly preferred * node) we only try to allocate from the current/preferred * node and don't fall back to other nodes, as the cost of * remote accesses would likely offset THP benefits. * * If the policy is interleave or does not allow the current * node in its nodemask, we allocate the standard way. */ if (pol->mode == MPOL_PREFERRED) hpage_node = first_node(pol->nodes); nmask = policy_nodemask(gfp, pol); if (!nmask || node_isset(hpage_node, *nmask)) { mpol_cond_put(pol); /* * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ folio = __folio_alloc_node(gfp | __GFP_THISNODE | __GFP_NORETRY, order, hpage_node); /* * If hugepage allocations are configured to always * synchronous compact or the vma has been madvised * to prefer hugepage backing, retry allowing remote * memory with both reclaim and compact as well. */ if (!folio && (gfp & __GFP_DIRECT_RECLAIM)) folio = __folio_alloc(gfp, order, hpage_node, nmask); goto out; } } nmask = policy_nodemask(gfp, pol); preferred_nid = policy_node(gfp, pol, node); folio = __folio_alloc(gfp, order, preferred_nid, nmask); mpol_cond_put(pol); out: return folio; } EXPORT_SYMBOL(vma_alloc_folio); /** * alloc_pages - Allocate pages. * @gfp: GFP flags. * @order: Power of two of number of pages to allocate. * * Allocate 1 << @order contiguous pages. The physical address of the * first page is naturally aligned (eg an order-3 allocation will be aligned * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current * process is honoured when in process context. * * Context: Can be called from any context, providing the appropriate GFP * flags are used. * Return: The page on success or NULL if allocation fails. */ struct page *alloc_pages(gfp_t gfp, unsigned order) { struct mempolicy *pol = &default_policy; struct page *page; if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); /* * No reference counting needed for current->mempolicy * nor system default_policy */ if (pol->mode == MPOL_INTERLEAVE) page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); else if (pol->mode == MPOL_PREFERRED_MANY) page = alloc_pages_preferred_many(gfp, order, policy_node(gfp, pol, numa_node_id()), pol); else page = __alloc_pages(gfp, order, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol)); return page; } EXPORT_SYMBOL(alloc_pages); struct folio *folio_alloc(gfp_t gfp, unsigned order) { struct page *page = alloc_pages(gfp | __GFP_COMP, order); struct folio *folio = (struct folio *)page; if (folio && order > 1) folio_prep_large_rmappable(folio); return folio; } EXPORT_SYMBOL(folio_alloc); static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) { int nodes; unsigned long nr_pages_per_node; int delta; int i; unsigned long nr_allocated; unsigned long total_allocated = 0; nodes = nodes_weight(pol->nodes); nr_pages_per_node = nr_pages / nodes; delta = nr_pages - nodes * nr_pages_per_node; for (i = 0; i < nodes; i++) { if (delta) { nr_allocated = __alloc_pages_bulk(gfp, interleave_nodes(pol), NULL, nr_pages_per_node + 1, NULL, page_array); delta--; } else { nr_allocated = __alloc_pages_bulk(gfp, interleave_nodes(pol), NULL, nr_pages_per_node, NULL, page_array); } page_array += nr_allocated; total_allocated += nr_allocated; } return total_allocated; } static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) { gfp_t preferred_gfp; unsigned long nr_allocated = 0; preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, nr_pages, NULL, page_array); if (nr_allocated < nr_pages) nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, nr_pages - nr_allocated, NULL, page_array + nr_allocated); return nr_allocated; } /* alloc pages bulk and mempolicy should be considered at the * same time in some situation such as vmalloc. * * It can accelerate memory allocation especially interleaving * allocate memory. */ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, unsigned long nr_pages, struct page **page_array) { struct mempolicy *pol = &default_policy; if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); if (pol->mode == MPOL_INTERLEAVE) return alloc_pages_bulk_array_interleave(gfp, pol, nr_pages, page_array); if (pol->mode == MPOL_PREFERRED_MANY) return alloc_pages_bulk_array_preferred_many(gfp, numa_node_id(), pol, nr_pages, page_array); return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()), policy_nodemask(gfp, pol), nr_pages, NULL, page_array); } int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { struct mempolicy *pol = mpol_dup(vma_policy(src)); if (IS_ERR(pol)) return PTR_ERR(pol); dst->vm_policy = pol; return 0; } /* * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it * rebinds the mempolicy its copying by calling mpol_rebind_policy() * with the mems_allowed returned by cpuset_mems_allowed(). This * keeps mempolicies cpuset relative after its cpuset moves. See * further kernel/cpuset.c update_nodemask(). * * current's mempolicy may be rebinded by the other task(the task that changes * cpuset's mems), so we needn't do rebind work for current task. */ /* Slow path of a mempolicy duplicate */ struct mempolicy *__mpol_dup(struct mempolicy *old) { struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); /* task's mempolicy is protected by alloc_lock */ if (old == current->mempolicy) { task_lock(current); *new = *old; task_unlock(current); } else *new = *old; if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); mpol_rebind_policy(new, &mems); } atomic_set(&new->refcnt, 1); return new; } /* Slow path of a mempolicy comparison */ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (!a || !b) return false; if (a->mode != b->mode) return false; if (a->flags != b->flags) return false; if (a->home_node != b->home_node) return false; if (mpol_store_user_nodemask(a)) if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) return false; switch (a->mode) { case MPOL_BIND: case MPOL_INTERLEAVE: case MPOL_PREFERRED: case MPOL_PREFERRED_MANY: return !!nodes_equal(a->nodes, b->nodes); case MPOL_LOCAL: return true; default: BUG(); return false; } } /* * Shared memory backing store policy support. * * Remember policies even when nobody has shared memory mapped. * The policies are kept in Red-Black tree linked from the inode. * They are protected by the sp->lock rwlock, which should be held * for any accesses to the tree. */ /* * lookup first element intersecting start-end. Caller holds sp->lock for * reading or for writing */ static struct sp_node * sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) { struct rb_node *n = sp->root.rb_node; while (n) { struct sp_node *p = rb_entry(n, struct sp_node, nd); if (start >= p->end) n = n->rb_right; else if (end <= p->start) n = n->rb_left; else break; } if (!n) return NULL; for (;;) { struct sp_node *w = NULL; struct rb_node *prev = rb_prev(n); if (!prev) break; w = rb_entry(prev, struct sp_node, nd); if (w->end <= start) break; n = prev; } return rb_entry(n, struct sp_node, nd); } /* * Insert a new shared policy into the list. Caller holds sp->lock for * writing. */ static void sp_insert(struct shared_policy *sp, struct sp_node *new) { struct rb_node **p = &sp->root.rb_node; struct rb_node *parent = NULL; struct sp_node *nd; while (*p) { parent = *p; nd = rb_entry(parent, struct sp_node, nd); if (new->start < nd->start) p = &(*p)->rb_left; else if (new->end > nd->end) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new->nd, parent, p); rb_insert_color(&new->nd, &sp->root); pr_debug("inserting %lx-%lx: %d\n", new->start, new->end, new->policy ? new->policy->mode : 0); } /* Find shared policy intersecting idx */ struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) { struct mempolicy *pol = NULL; struct sp_node *sn; if (!sp->root.rb_node) return NULL; read_lock(&sp->lock); sn = sp_lookup(sp, idx, idx+1); if (sn) { mpol_get(sn->policy); pol = sn->policy; } read_unlock(&sp->lock); return pol; } static void sp_free(struct sp_node *n) { mpol_put(n->policy); kmem_cache_free(sn_cache, n); } /** * mpol_misplaced - check whether current page node is valid in policy * * @page: page to be checked * @vma: vm area where page mapped * @addr: virtual address where page mapped * * Lookup current policy node id for vma,addr and "compare to" page's * node id. Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. * * Return: NUMA_NO_NODE if the page is in a node that is valid for this * policy, or a suitable node ID to allocate a replacement page from. */ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol; struct zoneref *z; int curnid = page_to_nid(page); unsigned long pgoff; int thiscpu = raw_smp_processor_id(); int thisnid = cpu_to_node(thiscpu); int polnid = NUMA_NO_NODE; int ret = NUMA_NO_NODE; pol = get_vma_policy(vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; switch (pol->mode) { case MPOL_INTERLEAVE: pgoff = vma->vm_pgoff; pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; polnid = offset_il_node(pol, pgoff); break; case MPOL_PREFERRED: if (node_isset(curnid, pol->nodes)) goto out; polnid = first_node(pol->nodes); break; case MPOL_LOCAL: polnid = numa_node_id(); break; case MPOL_BIND: /* Optimize placement among multiple nodes via NUMA balancing */ if (pol->flags & MPOL_F_MORON) { if (node_isset(thisnid, pol->nodes)) break; goto out; } fallthrough; case MPOL_PREFERRED_MANY: /* * use current page if in policy nodemask, * else select nearest allowed node, if any. * If no allowed nodes, use current [!misplaced]. */ if (node_isset(curnid, pol->nodes)) goto out; z = first_zones_zonelist( node_zonelist(numa_node_id(), GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); polnid = zone_to_nid(z->zone); break; default: BUG(); } /* Migrate the page towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { polnid = thisnid; if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) goto out; } if (curnid != polnid) ret = polnid; out: mpol_cond_put(pol); return ret; } /* * Drop the (possibly final) reference to task->mempolicy. It needs to be * dropped after task->mempolicy is set to NULL so that any allocation done as * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed * policy. */ void mpol_put_task_policy(struct task_struct *task) { struct mempolicy *pol; task_lock(task); pol = task->mempolicy; task->mempolicy = NULL; task_unlock(task); mpol_put(pol); } static void sp_delete(struct shared_policy *sp, struct sp_node *n) { pr_debug("deleting %lx-l%lx\n", n->start, n->end); rb_erase(&n->nd, &sp->root); sp_free(n); } static void sp_node_init(struct sp_node *node, unsigned long start, unsigned long end, struct mempolicy *pol) { node->start = start; node->end = end; node->policy = pol; } static struct sp_node *sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol) { struct sp_node *n; struct mempolicy *newpol; n = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n) return NULL; newpol = mpol_dup(pol); if (IS_ERR(newpol)) { kmem_cache_free(sn_cache, n); return NULL; } newpol->flags |= MPOL_F_SHARED; sp_node_init(n, start, end, newpol); return n; } /* Replace a policy range. */ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, unsigned long end, struct sp_node *new) { struct sp_node *n; struct sp_node *n_new = NULL; struct mempolicy *mpol_new = NULL; int ret = 0; restart: write_lock(&sp->lock); n = sp_lookup(sp, start, end); /* Take care of old policies in the same range. */ while (n && n->start < end) { struct rb_node *next = rb_next(&n->nd); if (n->start >= start) { if (n->end <= end) sp_delete(sp, n); else n->start = end; } else { /* Old policy spanning whole new range. */ if (n->end > end) { if (!n_new) goto alloc_new; *mpol_new = *n->policy; atomic_set(&mpol_new->refcnt, 1); sp_node_init(n_new, end, n->end, mpol_new); n->end = start; sp_insert(sp, n_new); n_new = NULL; mpol_new = NULL; break; } else n->end = start; } if (!next) break; n = rb_entry(next, struct sp_node, nd); } if (new) sp_insert(sp, new); write_unlock(&sp->lock); ret = 0; err_out: if (mpol_new) mpol_put(mpol_new); if (n_new) kmem_cache_free(sn_cache, n_new); return ret; alloc_new: write_unlock(&sp->lock); ret = -ENOMEM; n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL); if (!n_new) goto err_out; mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; atomic_set(&mpol_new->refcnt, 1); goto restart; } /** * mpol_shared_policy_init - initialize shared policy for inode * @sp: pointer to inode shared policy * @mpol: struct mempolicy to install * * Install non-NULL @mpol in inode's shared policy rb-tree. * On entry, the current task has a reference on a non-NULL @mpol. * This must be released on exit. * This is called at get_inode() calls and we can use GFP_KERNEL. */ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { int ret; sp->root = RB_ROOT; /* empty tree == default mempolicy */ rwlock_init(&sp->lock); if (mpol) { struct vm_area_struct pvma; struct mempolicy *new; NODEMASK_SCRATCH(scratch); if (!scratch) goto put_mpol; /* contextualize the tmpfs mount point mempolicy */ new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); if (IS_ERR(new)) goto free_scratch; /* no valid nodemask intersection */ task_lock(current); ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch); task_unlock(current); if (ret) goto put_new; /* Create pseudo-vma that contains just the policy */ vma_init(&pvma, NULL); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ put_new: mpol_put(new); /* drop initial ref */ free_scratch: NODEMASK_SCRATCH_FREE(scratch); put_mpol: mpol_put(mpol); /* drop our incoming ref on sb mpol */ } } int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) { int err; struct sp_node *new = NULL; unsigned long sz = vma_pages(vma); pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n", vma->vm_pgoff, sz, npol ? npol->mode : -1, npol ? npol->flags : -1, npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE); if (npol) { new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); if (!new) return -ENOMEM; } err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); if (err && new) sp_free(new); return err; } /* Free a backing policy store on inode delete. */ void mpol_free_shared_policy(struct shared_policy *p) { struct sp_node *n; struct rb_node *next; if (!p->root.rb_node) return; write_lock(&p->lock); next = rb_first(&p->root); while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); sp_delete(p, n); } write_unlock(&p->lock); } #ifdef CONFIG_NUMA_BALANCING static int __initdata numabalancing_override; static void __init check_numabalancing_enable(void) { bool numabalancing_default = false; if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED)) numabalancing_default = true; /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */ if (numabalancing_override) set_numabalancing_state(numabalancing_override == 1); if (num_online_nodes() > 1 && !numabalancing_override) { pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n", numabalancing_default ? "Enabling" : "Disabling"); set_numabalancing_state(numabalancing_default); } } static int __init setup_numabalancing(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "enable")) { numabalancing_override = 1; ret = 1; } else if (!strcmp(str, "disable")) { numabalancing_override = -1; ret = 1; } out: if (!ret) pr_warn("Unable to parse numa_balancing=\n"); return ret; } __setup("numa_balancing=", setup_numabalancing); #else static inline void __init check_numabalancing_enable(void) { } #endif /* CONFIG_NUMA_BALANCING */ /* assumes fs == KERNEL_DS */ void __init numa_policy_init(void) { nodemask_t interleave_nodes; unsigned long largest = 0; int nid, prefer = 0; policy_cache = kmem_cache_create("numa_policy", sizeof(struct mempolicy), 0, SLAB_PANIC, NULL); sn_cache = kmem_cache_create("shared_policy_node", sizeof(struct sp_node), 0, SLAB_PANIC, NULL); for_each_node(nid) { preferred_node_policy[nid] = (struct mempolicy) { .refcnt = ATOMIC_INIT(1), .mode = MPOL_PREFERRED, .flags = MPOL_F_MOF | MPOL_F_MORON, .nodes = nodemask_of_node(nid), }; } /* * Set interleaving policy for system init. Interleaving is only * enabled across suitably sized nodes (default is >= 16MB), or * fall back to the largest node if they're all smaller. */ nodes_clear(interleave_nodes); for_each_node_state(nid, N_MEMORY) { unsigned long total_pages = node_present_pages(nid); /* Preserve the largest node */ if (largest < total_pages) { largest = total_pages; prefer = nid; } /* Interleave this node? */ if ((total_pages << PAGE_SHIFT) >= (16 << 20)) node_set(nid, interleave_nodes); } /* All too small, use the largest */ if (unlikely(nodes_empty(interleave_nodes))) node_set(prefer, interleave_nodes); if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes)) pr_err("%s: interleaving failed\n", __func__); check_numabalancing_enable(); } /* Reset policy of current process to default */ void numa_default_policy(void) { do_set_mempolicy(MPOL_DEFAULT, 0, NULL); } /* * Parse and format mempolicy from/to strings */ static const char * const policy_modes[] = { [MPOL_DEFAULT] = "default", [MPOL_PREFERRED] = "prefer", [MPOL_BIND] = "bind", [MPOL_INTERLEAVE] = "interleave", [MPOL_LOCAL] = "local", [MPOL_PREFERRED_MANY] = "prefer (many)", }; #ifdef CONFIG_TMPFS /** * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option. * @str: string containing mempolicy to parse * @mpol: pointer to struct mempolicy pointer, returned on success. * * Format of input: * <mode>[=<flags>][:<nodelist>] * * Return: %0 on success, else %1 */ int mpol_parse_str(char *str, struct mempolicy **mpol) { struct mempolicy *new = NULL; unsigned short mode_flags; nodemask_t nodes; char *nodelist = strchr(str, ':'); char *flags = strchr(str, '='); int err = 1, mode; if (flags) *flags++ = '\0'; /* terminate mode string */ if (nodelist) { /* NUL-terminate mode or flags string */ *nodelist++ = '\0'; if (nodelist_parse(nodelist, nodes)) goto out; if (!nodes_subset(nodes, node_states[N_MEMORY])) goto out; } else nodes_clear(nodes); mode = match_string(policy_modes, MPOL_MAX, str); if (mode < 0) goto out; switch (mode) { case MPOL_PREFERRED: /* * Insist on a nodelist of one node only, although later * we use first_node(nodes) to grab a single node, so here * nodelist (or nodes) cannot be empty. */ if (nodelist) { char *rest = nodelist; while (isdigit(*rest)) rest++; if (*rest) goto out; if (nodes_empty(nodes)) goto out; } break; case MPOL_INTERLEAVE: /* * Default to online nodes with memory if no nodelist */ if (!nodelist) nodes = node_states[N_MEMORY]; break; case MPOL_LOCAL: /* * Don't allow a nodelist; mpol_new() checks flags */ if (nodelist) goto out; break; case MPOL_DEFAULT: /* * Insist on a empty nodelist */ if (!nodelist) err = 0; goto out; case MPOL_PREFERRED_MANY: case MPOL_BIND: /* * Insist on a nodelist */ if (!nodelist) goto out; } mode_flags = 0; if (flags) { /* * Currently, we only support two mutually exclusive * mode flags. */ if (!strcmp(flags, "static")) mode_flags |= MPOL_F_STATIC_NODES; else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) goto out; /* * Save nodes for mpol_to_str() to show the tmpfs mount options * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo. */ if (mode != MPOL_PREFERRED) { new->nodes = nodes; } else if (nodelist) { nodes_clear(new->nodes); node_set(first_node(nodes), new->nodes); } else { new->mode = MPOL_LOCAL; } /* * Save nodes for contextualization: this will be used to "clone" * the mempolicy in a specific context [cpuset] at a later time. */ new->w.user_nodemask = nodes; err = 0; out: /* Restore string for error message */ if (nodelist) *--nodelist = ':'; if (flags) *--flags = '='; if (!err) *mpol = new; return err; } #endif /* CONFIG_TMPFS */ /** * mpol_to_str - format a mempolicy structure for printing * @buffer: to contain formatted mempolicy string * @maxlen: length of @buffer * @pol: pointer to mempolicy to be formatted * * Convert @pol into a string. If @buffer is too short, truncate the string. * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the * longest flag, "relative", and to display at least a few node ids. */ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol) { char *p = buffer; nodemask_t nodes = NODE_MASK_NONE; unsigned short mode = MPOL_DEFAULT; unsigned short flags = 0; if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) { mode = pol->mode; flags = pol->flags; } switch (mode) { case MPOL_DEFAULT: case MPOL_LOCAL: break; case MPOL_PREFERRED: case MPOL_PREFERRED_MANY: case MPOL_BIND: case MPOL_INTERLEAVE: nodes = pol->nodes; break; default: WARN_ON_ONCE(1); snprintf(p, maxlen, "unknown"); return; } p += snprintf(p, maxlen, "%s", policy_modes[mode]); if (flags & MPOL_MODE_FLAGS) { p += snprintf(p, buffer + maxlen - p, "="); /* * Currently, the only defined flags are mutually exclusive */ if (flags & MPOL_F_STATIC_NODES) p += snprintf(p, buffer + maxlen - p, "static"); else if (flags & MPOL_F_RELATIVE_NODES) p += snprintf(p, buffer + maxlen - p, "relative"); } if (!nodes_empty(nodes)) p += scnprintf(p, buffer + maxlen - p, ":%*pbl", nodemask_pr_args(&nodes)); }
linux-master
mm/mempolicy.c
// SPDX-License-Identifier: GPL-2.0 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/mman.h> #include <linux/hashtable.h> #include <linux/userfaultfd_k.h> #include <linux/page_idle.h> #include <linux/page_table_check.h> #include <linux/swapops.h> #include <linux/shmem_fs.h> #include <linux/ksm.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" #include "mm_slot.h" enum scan_result { SCAN_FAIL, SCAN_SUCCEED, SCAN_PMD_NULL, SCAN_PMD_NONE, SCAN_PMD_MAPPED, SCAN_EXCEED_NONE_PTE, SCAN_EXCEED_SWAP_PTE, SCAN_EXCEED_SHARED_PTE, SCAN_PTE_NON_PRESENT, SCAN_PTE_UFFD_WP, SCAN_PTE_MAPPED_HUGEPAGE, SCAN_PAGE_RO, SCAN_LACK_REFERENCED_PAGE, SCAN_PAGE_NULL, SCAN_SCAN_ABORT, SCAN_PAGE_COUNT, SCAN_PAGE_LRU, SCAN_PAGE_LOCK, SCAN_PAGE_ANON, SCAN_PAGE_COMPOUND, SCAN_ANY_PROCESS, SCAN_VMA_NULL, SCAN_VMA_CHECK, SCAN_ADDRESS_RANGE, SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, SCAN_CGROUP_CHARGE_FAIL, SCAN_TRUNCATED, SCAN_PAGE_HAS_PRIVATE, SCAN_STORE_FAILED, SCAN_COPY_MC, SCAN_PAGE_FILLED, }; #define CREATE_TRACE_POINTS #include <trace/events/huge_memory.h> static struct task_struct *khugepaged_thread __read_mostly; static DEFINE_MUTEX(khugepaged_mutex); /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; static unsigned int khugepaged_full_scans; static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; /* during fragmentation poll the hugepage allocator once every minute */ static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; static unsigned long khugepaged_sleep_expire; static DEFINE_SPINLOCK(khugepaged_mm_lock); static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); /* * default collapse hugepages if there is at least one pte mapped like * it would have happened if the vma was large enough during page * fault. * * Note that these are only respected if collapse was initiated by khugepaged. */ static unsigned int khugepaged_max_ptes_none __read_mostly; static unsigned int khugepaged_max_ptes_swap __read_mostly; static unsigned int khugepaged_max_ptes_shared __read_mostly; #define MM_SLOTS_HASH_BITS 10 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct kmem_cache *mm_slot_cache __read_mostly; struct collapse_control { bool is_khugepaged; /* Num pages scanned per node */ u32 node_load[MAX_NUMNODES]; /* nodemask for allocation fallback */ nodemask_t alloc_nmask; }; /** * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned * @slot: hash lookup from mm to mm_slot */ struct khugepaged_mm_slot { struct mm_slot slot; }; /** * struct khugepaged_scan - cursor for scanning * @mm_head: the head of the mm list to scan * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * * There is only the one khugepaged_scan instance of this cursor structure. */ struct khugepaged_scan { struct list_head mm_head; struct khugepaged_mm_slot *mm_slot; unsigned long address; }; static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; #ifdef CONFIG_SYSFS static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs); } static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; khugepaged_scan_sleep_millisecs = msecs; khugepaged_sleep_expire = 0; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute scan_sleep_millisecs_attr = __ATTR_RW(scan_sleep_millisecs); static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs); } static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; khugepaged_alloc_sleep_millisecs = msecs; khugepaged_sleep_expire = 0; wake_up_interruptible(&khugepaged_wait); return count; } static struct kobj_attribute alloc_sleep_millisecs_attr = __ATTR_RW(alloc_sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int pages; int err; err = kstrtouint(buf, 10, &pages); if (err || !pages) return -EINVAL; khugepaged_pages_to_scan = pages; return count; } static struct kobj_attribute pages_to_scan_attr = __ATTR_RW(pages_to_scan); static ssize_t pages_collapsed_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed); } static struct kobj_attribute pages_collapsed_attr = __ATTR_RO(pages_collapsed); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_full_scans); } static struct kobj_attribute full_scans_attr = __ATTR_RO(full_scans); static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static struct kobj_attribute khugepaged_defrag_attr = __ATTR_RW(defrag); /* * max_ptes_none controls if khugepaged should collapse hugepages over * any unmapped ptes in turn potentially increasing the memory * footprint of the vmas. When max_ptes_none is 0 khugepaged will not * reduce the available free memory in the system as it * runs. Increasing max_ptes_none will instead potentially reduce the * free memory in the system during the khugepaged scan. */ static ssize_t max_ptes_none_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); } static ssize_t max_ptes_none_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_none; err = kstrtoul(buf, 10, &max_ptes_none); if (err || max_ptes_none > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; return count; } static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR_RW(max_ptes_none); static ssize_t max_ptes_swap_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); } static ssize_t max_ptes_swap_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_swap; err = kstrtoul(buf, 10, &max_ptes_swap); if (err || max_ptes_swap > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_swap = max_ptes_swap; return count; } static struct kobj_attribute khugepaged_max_ptes_swap_attr = __ATTR_RW(max_ptes_swap); static ssize_t max_ptes_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); } static ssize_t max_ptes_shared_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long max_ptes_shared; err = kstrtoul(buf, 10, &max_ptes_shared); if (err || max_ptes_shared > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_shared = max_ptes_shared; return count; } static struct kobj_attribute khugepaged_max_ptes_shared_attr = __ATTR_RW(max_ptes_shared); static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, &khugepaged_max_ptes_swap_attr.attr, &khugepaged_max_ptes_shared_attr.attr, &pages_to_scan_attr.attr, &pages_collapsed_attr.attr, &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, NULL, }; struct attribute_group khugepaged_attr_group = { .attrs = khugepaged_attr, .name = "khugepaged", }; #endif /* CONFIG_SYSFS */ int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { switch (advice) { case MADV_HUGEPAGE: #ifdef CONFIG_S390 /* * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 * can't handle this properly after s390_enable_sie, so we simply * ignore the madvise to prevent qemu from causing a SIGSEGV. */ if (mm_has_pgste(vma->vm_mm)) return 0; #endif *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* * If the vma become good for khugepaged to scan, * register it here without waiting a page fault that * may not happen any time soon. */ khugepaged_enter_vma(vma, *vm_flags); break; case MADV_NOHUGEPAGE: *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning * this vma even if we leave the mm registered in khugepaged if * it got registered before VM_NOHUGEPAGE was set. */ break; } return 0; } int __init khugepaged_init(void) { mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", sizeof(struct khugepaged_mm_slot), __alignof__(struct khugepaged_mm_slot), 0, NULL); if (!mm_slot_cache) return -ENOMEM; khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; return 0; } void __init khugepaged_destroy(void) { kmem_cache_destroy(mm_slot_cache); } static inline int hpage_collapse_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } void __khugepaged_enter(struct mm_struct *mm) { struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; int wakeup; /* __khugepaged_exit() must not run from under us */ VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) return; mm_slot = mm_slot_alloc(mm_slot_cache); if (!mm_slot) return; slot = &mm_slot->slot; spin_lock(&khugepaged_mm_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* * Insert just behind the scanning cursor, to let the area settle * down a little. */ wakeup = list_empty(&khugepaged_scan.mm_head); list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head); spin_unlock(&khugepaged_mm_lock); mmgrab(mm); if (wakeup) wake_up_interruptible(&khugepaged_wait); } void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && hugepage_flags_enabled()) { if (hugepage_vma_check(vma, vm_flags, false, false, true)) __khugepaged_enter(vma->vm_mm); } } void __khugepaged_exit(struct mm_struct *mm) { struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; int free = 0; spin_lock(&khugepaged_mm_lock); slot = mm_slot_lookup(mm_slots_hash, mm); mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { hash_del(&slot->hash); list_del(&slot->mm_node); free = 1; } spin_unlock(&khugepaged_mm_lock); if (free) { clear_bit(MMF_VM_HUGEPAGE, &mm->flags); mm_slot_free(mm_slot_cache, mm_slot); mmdrop(mm); } else if (mm_slot) { /* * This is required to serialize against * hpage_collapse_test_exit() (which is guaranteed to run * under mmap sem read mode). Stop here (after we return all * pagetables will be destroyed) until khugepaged has finished * working on the pagetables under the mmap_lock. */ mmap_write_lock(mm); mmap_write_unlock(mm); } } static void release_pte_folio(struct folio *folio) { node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), -folio_nr_pages(folio)); folio_unlock(folio); folio_putback_lru(folio); } static void release_pte_page(struct page *page) { release_pte_folio(page_folio(page)); } static void release_pte_pages(pte_t *pte, pte_t *_pte, struct list_head *compound_pagelist) { struct folio *folio, *tmp; while (--_pte >= pte) { pte_t pteval = ptep_get(_pte); unsigned long pfn; if (pte_none(pteval)) continue; pfn = pte_pfn(pteval); if (is_zero_pfn(pfn)) continue; folio = pfn_folio(pfn); if (folio_test_large(folio)) continue; release_pte_folio(folio); } list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) { list_del(&folio->lru); release_pte_folio(folio); } } static bool is_refcount_suitable(struct page *page) { int expected_refcount; expected_refcount = total_mapcount(page); if (PageSwapCache(page)) expected_refcount += compound_nr(page); return page_count(page) == expected_refcount; } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte, struct collapse_control *cc, struct list_head *compound_pagelist) { struct page *page = NULL; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; bool writable = false; for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { ++none_or_zero; if (!userfaultfd_armed(vma) && (!cc->is_khugepaged || none_or_zero <= khugepaged_max_ptes_none)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); goto out; } } if (!pte_present(pteval)) { result = SCAN_PTE_NON_PRESENT; goto out; } if (pte_uffd_wp(pteval)) { result = SCAN_PTE_UFFD_WP; goto out; } page = vm_normal_page(vma, address, pteval); if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out; } VM_BUG_ON_PAGE(!PageAnon(page), page); if (page_mapcount(page) > 1) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out; } } if (PageCompound(page)) { struct page *p; page = compound_head(page); /* * Check if we have dealt with the compound page * already */ list_for_each_entry(p, compound_pagelist, lru) { if (page == p) goto next; } } /* * We can do it before isolate_lru_page because the * page can't be freed from under us. NOTE: PG_lock * is needed to serialize against split_huge_page * when invoked from the VM. */ if (!trylock_page(page)) { result = SCAN_PAGE_LOCK; goto out; } /* * Check if the page has any GUP (or other external) pins. * * The page table that maps the page has been already unlinked * from the page table tree and this process cannot get * an additional pin on the page. * * New pins can come later if the page is shared across fork, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ if (!is_refcount_suitable(page)) { unlock_page(page); result = SCAN_PAGE_COUNT; goto out; } /* * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ if (!isolate_lru_page(page)) { unlock_page(page); result = SCAN_DEL_PAGE_LRU; goto out; } mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), compound_nr(page)); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); if (PageCompound(page)) list_add_tail(&page->lru, compound_pagelist); next: /* * If collapse was initiated by khugepaged, check that there is * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; if (pte_write(pteval)) writable = true; } if (unlikely(!writable)) { result = SCAN_PAGE_RO; } else if (unlikely(cc->is_khugepaged && !referenced)) { result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); return result; } out: release_pte_pages(pte, _pte, compound_pagelist); trace_mm_collapse_huge_page_isolate(page, none_or_zero, referenced, writable, result); return result; } static void __collapse_huge_page_copy_succeeded(pte_t *pte, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl, struct list_head *compound_pagelist) { struct page *src_page; struct page *tmp; pte_t *_pte; pte_t pteval; for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); if (is_zero_pfn(pte_pfn(pteval))) { /* * ptl mostly unnecessary. */ spin_lock(ptl); ptep_clear(vma->vm_mm, address, _pte); spin_unlock(ptl); ksm_might_unmap_zero_page(vma->vm_mm, pteval); } } else { src_page = pte_page(pteval); if (!PageCompound(src_page)) release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats * inside page_remove_rmap(). */ spin_lock(ptl); ptep_clear(vma->vm_mm, address, _pte); page_remove_rmap(src_page, vma, false); spin_unlock(ptl); free_page_and_swap_cache(src_page); } } list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) { list_del(&src_page->lru); mod_node_page_state(page_pgdat(src_page), NR_ISOLATED_ANON + page_is_file_lru(src_page), -compound_nr(src_page)); unlock_page(src_page); free_swap_cache(src_page); putback_lru_page(src_page); } } static void __collapse_huge_page_copy_failed(pte_t *pte, pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, struct list_head *compound_pagelist) { spinlock_t *pmd_ptl; /* * Re-establish the PMD to point to the original page table * entry. Restoring PMD needs to be done prior to releasing * pages. Since pages are still isolated and locked here, * acquiring anon_vma_lock_write is unnecessary. */ pmd_ptl = pmd_lock(vma->vm_mm, pmd); pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd)); spin_unlock(pmd_ptl); /* * Release both raw and compound pages isolated * in __collapse_huge_page_isolate. */ release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist); } /* * __collapse_huge_page_copy - attempts to copy memory contents from raw * pages to a hugepage. Cleans up the raw pages if copying succeeds; * otherwise restores the original page table and releases isolated raw pages. * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. * * @pte: starting of the PTEs to copy from * @page: the new hugepage to copy contents to * @pmd: pointer to the new hugepage's PMD * @orig_pmd: the original raw pages' PMD * @vma: the original raw pages' virtual memory area * @address: starting address to copy * @ptl: lock on raw pages' PTEs * @compound_pagelist: list that stores compound pages */ static int __collapse_huge_page_copy(pte_t *pte, struct page *page, pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl, struct list_head *compound_pagelist) { struct page *src_page; pte_t *_pte; pte_t pteval; unsigned long _address; int result = SCAN_SUCCEED; /* * Copying pages' contents is subject to memory poison at any iteration. */ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; _pte++, page++, _address += PAGE_SIZE) { pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { clear_user_highpage(page, _address); continue; } src_page = pte_page(pteval); if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { result = SCAN_COPY_MC; break; } } if (likely(result == SCAN_SUCCEED)) __collapse_huge_page_copy_succeeded(pte, vma, address, ptl, compound_pagelist); else __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma, compound_pagelist); return result; } static void khugepaged_alloc_sleep(void) { DEFINE_WAIT(wait); add_wait_queue(&khugepaged_wait, &wait); __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); remove_wait_queue(&khugepaged_wait, &wait); } struct collapse_control khugepaged_collapse_control = { .is_khugepaged = true, }; static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc) { int i; /* * If node_reclaim_mode is disabled, then no extra effort is made to * allocate memory locally. */ if (!node_reclaim_enabled()) return false; /* If there is a count for this node already, it must be acceptable */ if (cc->node_load[nid]) return false; for (i = 0; i < MAX_NUMNODES; i++) { if (!cc->node_load[i]) continue; if (node_distance(nid, i) > node_reclaim_distance) return true; } return false; } #define khugepaged_defrag() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) { return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; } #ifdef CONFIG_NUMA static int hpage_collapse_find_target_node(struct collapse_control *cc) { int nid, target_node = 0, max_value = 0; /* find first node with max normal pages hit */ for (nid = 0; nid < MAX_NUMNODES; nid++) if (cc->node_load[nid] > max_value) { max_value = cc->node_load[nid]; target_node = nid; } for_each_online_node(nid) { if (max_value == cc->node_load[nid]) node_set(nid, cc->alloc_nmask); } return target_node; } #else static int hpage_collapse_find_target_node(struct collapse_control *cc) { return 0; } #endif static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node, nodemask_t *nmask) { *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); return false; } folio_prep_large_rmappable((struct folio *)*hpage); count_vm_event(THP_COLLAPSE_ALLOC); return true; } /* * If mmap_lock temporarily dropped, revalidate vma * before taking mmap_lock. * Returns enum scan_result value. */ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc) { struct vm_area_struct *vma; if (unlikely(hpage_collapse_test_exit(mm))) return SCAN_ANY_PROCESS; *vmap = vma = find_vma(mm, address); if (!vma) return SCAN_VMA_NULL; if (!transhuge_vma_suitable(vma, address)) return SCAN_ADDRESS_RANGE; if (!hugepage_vma_check(vma, vma->vm_flags, false, false, cc->is_khugepaged)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then * remapped to file after khugepaged reaquired the mmap_lock. * * hugepage_vma_check may return true for qualified file * vmas. */ if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) return SCAN_PAGE_ANON; return SCAN_SUCCEED; } static int find_pmd_or_thp_or_none(struct mm_struct *mm, unsigned long address, pmd_t **pmd) { pmd_t pmde; *pmd = mm_find_pmd(mm, address); if (!*pmd) return SCAN_PMD_NULL; pmde = pmdp_get_lockless(*pmd); if (pmd_none(pmde)) return SCAN_PMD_NONE; if (!pmd_present(pmde)) return SCAN_PMD_NULL; if (pmd_trans_huge(pmde)) return SCAN_PMD_MAPPED; if (pmd_devmap(pmde)) return SCAN_PMD_NULL; if (pmd_bad(pmde)) return SCAN_PMD_NULL; return SCAN_SUCCEED; } static int check_pmd_still_valid(struct mm_struct *mm, unsigned long address, pmd_t *pmd) { pmd_t *new_pmd; int result = find_pmd_or_thp_or_none(mm, address, &new_pmd); if (result != SCAN_SUCCEED) return result; if (new_pmd != pmd) return SCAN_FAIL; return SCAN_SUCCEED; } /* * Bring missing pages in from swap, to complete THP collapse. * Only done if hpage_collapse_scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held. * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. */ static int __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, int referenced) { int swapped_in = 0; vm_fault_t ret = 0; unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); int result; pte_t *pte = NULL; spinlock_t *ptl; for (address = haddr; address < end; address += PAGE_SIZE) { struct vm_fault vmf = { .vma = vma, .address = address, .pgoff = linear_page_index(vma, address), .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, }; if (!pte++) { pte = pte_offset_map_nolock(mm, pmd, address, &ptl); if (!pte) { mmap_read_unlock(mm); result = SCAN_PMD_NULL; goto out; } } vmf.orig_pte = ptep_get_lockless(pte); if (!is_swap_pte(vmf.orig_pte)) continue; vmf.pte = pte; vmf.ptl = ptl; ret = do_swap_page(&vmf); /* Which unmaps pte (after perhaps re-checking the entry) */ pte = NULL; /* * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because * we do not retry here and swap entry will remain in pagetable * resulting in later failure. */ if (ret & VM_FAULT_RETRY) { /* Likely, but not guaranteed, that page lock failed */ result = SCAN_PAGE_LOCK; goto out; } if (ret & VM_FAULT_ERROR) { mmap_read_unlock(mm); result = SCAN_FAIL; goto out; } swapped_in++; } if (pte) pte_unmap(pte); /* Drain LRU cache to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); result = SCAN_SUCCEED; out: trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result); return result; } static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, struct collapse_control *cc) { gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : GFP_TRANSHUGE); int node = hpage_collapse_find_target_node(cc); struct folio *folio; if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) return SCAN_ALLOC_HUGE_PAGE_FAIL; folio = page_folio(*hpage); if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { folio_put(folio); *hpage = NULL; return SCAN_CGROUP_CHARGE_FAIL; } count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC); return SCAN_SUCCEED; } static int collapse_huge_page(struct mm_struct *mm, unsigned long address, int referenced, int unmapped, struct collapse_control *cc) { LIST_HEAD(compound_pagelist); pmd_t *pmd, _pmd; pte_t *pte; pgtable_t pgtable; struct page *hpage; spinlock_t *pmd_ptl, *pte_ptl; int result = SCAN_FAIL; struct vm_area_struct *vma; struct mmu_notifier_range range; VM_BUG_ON(address & ~HPAGE_PMD_MASK); /* * Before allocating the hugepage, release the mmap_lock read lock. * The allocation can take potentially a long time if it involves * sync compaction, and we do not need to hold the mmap_lock during * that. We will recheck the vma after taking it again in write mode. */ mmap_read_unlock(mm); result = alloc_charge_hpage(&hpage, mm, cc); if (result != SCAN_SUCCEED) goto out_nolock; mmap_read_lock(mm); result = hugepage_vma_revalidate(mm, address, true, &vma, cc); if (result != SCAN_SUCCEED) { mmap_read_unlock(mm); goto out_nolock; } result = find_pmd_or_thp_or_none(mm, address, &pmd); if (result != SCAN_SUCCEED) { mmap_read_unlock(mm); goto out_nolock; } if (unmapped) { /* * __collapse_huge_page_swapin will return with mmap_lock * released when it fails. So we jump out_nolock directly in * that case. Continuing to collapse causes inconsistency. */ result = __collapse_huge_page_swapin(mm, vma, address, pmd, referenced); if (result != SCAN_SUCCEED) goto out_nolock; } mmap_read_unlock(mm); /* * Prevent all access to pagetables with the exception of * gup_fast later handled by the ptep_clear_flush and the VM * handled by the anon_vma lock + PG_lock. */ mmap_write_lock(mm); result = hugepage_vma_revalidate(mm, address, true, &vma, cc); if (result != SCAN_SUCCEED) goto out_up_write; /* check if the pmd is still valid */ result = check_pmd_still_valid(mm, address, pmd); if (result != SCAN_SUCCEED) goto out_up_write; vma_start_write(vma); anon_vma_lock_write(vma->anon_vma); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address, address + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* * This removes any huge TLB entry from the CPU so we won't allow * huge and small TLB entries for the same virtual address to * avoid the risk of CPU bugs in that area. * * Parallel fast GUP is fine since fast GUP will back off when * it detects PMD is changed. */ _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); tlb_remove_table_sync_one(); pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); if (pte) { result = __collapse_huge_page_isolate(vma, address, pte, cc, &compound_pagelist); spin_unlock(pte_ptl); } else { result = SCAN_PMD_NULL; } if (unlikely(result != SCAN_SUCCEED)) { if (pte) pte_unmap(pte); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); /* * We can only use set_pmd_at when establishing * hugepmds and never for establishing regular pmds that * points to regular pagetables. Use pmd_populate for that */ pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); goto out_up_write; } /* * All pages are isolated and locked so anon_vma rmap * can't run anymore. */ anon_vma_unlock_write(vma->anon_vma); result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); if (unlikely(result != SCAN_SUCCEED)) goto out_up_write; /* * spin_lock() below is not the equivalent of smp_wmb(), but * the smp_wmb() inside __SetPageUptodate() can be reused to * avoid the copy_huge_page writes to become visible after * the set_pmd_at() write. */ __SetPageUptodate(hpage); pgtable = pmd_pgtable(_pmd); _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); page_add_new_anon_rmap(hpage, vma, address); lru_cache_add_inactive_or_unevictable(hpage, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); hpage = NULL; result = SCAN_SUCCEED; out_up_write: mmap_write_unlock(mm); out_nolock: if (hpage) put_page(hpage); trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); return result; } static int hpage_collapse_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, bool *mmap_locked, struct collapse_control *cc) { pmd_t *pmd; pte_t *pte, *_pte; int result = SCAN_FAIL, referenced = 0; int none_or_zero = 0, shared = 0; struct page *page = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; bool writable = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); result = find_pmd_or_thp_or_none(mm, address, &pmd); if (result != SCAN_SUCCEED) goto out; memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); pte = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte) { result = SCAN_PMD_NULL; goto out; } for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = ptep_get(_pte); if (is_swap_pte(pteval)) { ++unmapped; if (!cc->is_khugepaged || unmapped <= khugepaged_max_ptes_swap) { /* * Always be strict with uffd-wp * enabled swap entries. Please see * comment below for pte_uffd_wp(). */ if (pte_swp_uffd_wp_any(pteval)) { result = SCAN_PTE_UFFD_WP; goto out_unmap; } continue; } else { result = SCAN_EXCEED_SWAP_PTE; count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); goto out_unmap; } } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { ++none_or_zero; if (!userfaultfd_armed(vma) && (!cc->is_khugepaged || none_or_zero <= khugepaged_max_ptes_none)) { continue; } else { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); goto out_unmap; } } if (pte_uffd_wp(pteval)) { /* * Don't collapse the page if any of the small * PTEs are armed with uffd write protection. * Here we can also mark the new huge pmd as * write protected if any of the small ones is * marked but that could bring unknown * userfault messages that falls outside of * the registered range. So, just be simple. */ result = SCAN_PTE_UFFD_WP; goto out_unmap; } if (pte_write(pteval)) writable = true; page = vm_normal_page(vma, _address, pteval); if (unlikely(!page) || unlikely(is_zone_device_page(page))) { result = SCAN_PAGE_NULL; goto out_unmap; } if (page_mapcount(page) > 1) { ++shared; if (cc->is_khugepaged && shared > khugepaged_max_ptes_shared) { result = SCAN_EXCEED_SHARED_PTE; count_vm_event(THP_SCAN_EXCEED_SHARED_PTE); goto out_unmap; } } page = compound_head(page); /* * Record which node the original page is from and save this * information to cc->node_load[]. * Khugepaged will allocate hugepage from the node has the max * hit record. */ node = page_to_nid(page); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; goto out_unmap; } cc->node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; goto out_unmap; } if (PageLocked(page)) { result = SCAN_PAGE_LOCK; goto out_unmap; } if (!PageAnon(page)) { result = SCAN_PAGE_ANON; goto out_unmap; } /* * Check if the page has any GUP (or other external) pins. * * Here the check may be racy: * it may see total_mapcount > refcount in some cases? * But such case is ephemeral we could always retry collapse * later. However it may report false positive if the page * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. */ if (!is_refcount_suitable(page)) { result = SCAN_PAGE_COUNT; goto out_unmap; } /* * If collapse was initiated by khugepaged, check that there is * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; } if (!writable) { result = SCAN_PAGE_RO; } else if (cc->is_khugepaged && (!referenced || (unmapped && referenced < HPAGE_PMD_NR / 2))) { result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; } out_unmap: pte_unmap_unlock(pte, ptl); if (result == SCAN_SUCCEED) { result = collapse_huge_page(mm, address, referenced, unmapped, cc); /* collapse_huge_page will return with the mmap_lock released */ *mmap_locked = false; } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, none_or_zero, result, unmapped); return result; } static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot) { struct mm_slot *slot = &mm_slot->slot; struct mm_struct *mm = slot->mm; lockdep_assert_held(&khugepaged_mm_lock); if (hpage_collapse_test_exit(mm)) { /* free mm_slot */ hash_del(&slot->hash); list_del(&slot->mm_node); /* * Not strictly needed because the mm exited already. * * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); */ /* khugepaged_mm_lock actually not necessary for the below */ mm_slot_free(mm_slot_cache, mm_slot); mmdrop(mm); } } #ifdef CONFIG_SHMEM /* hpage must be locked, and mmap_lock must be held */ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp, struct page *hpage) { struct vm_fault vmf = { .vma = vma, .address = addr, .flags = 0, .pmd = pmdp, }; VM_BUG_ON(!PageTransHuge(hpage)); mmap_assert_locked(vma->vm_mm); if (do_set_pmd(&vmf, hpage)) return SCAN_FAIL; get_page(hpage); return SCAN_SUCCEED; } /** * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at * address haddr. * * @mm: process address space where collapse happens * @addr: THP collapse address * @install_pmd: If a huge PMD should be installed * * This function checks whether all the PTEs in the PMD are pointing to the * right THP. If so, retract the page table so the THP can refault in with * as pmd-mapped. Possibly install a huge PMD mapping the THP. */ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, bool install_pmd) { struct mmu_notifier_range range; bool notified = false; unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = vma_lookup(mm, haddr); struct page *hpage; pte_t *start_pte, *pte; pmd_t *pmd, pgt_pmd; spinlock_t *pml = NULL, *ptl; int nr_ptes = 0, result = SCAN_FAIL; int i; mmap_assert_locked(mm); /* First check VMA found, in case page tables are being torn down */ if (!vma || !vma->vm_file || !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) return SCAN_VMA_CHECK; /* Fast check before locking page if already PMD-mapped */ result = find_pmd_or_thp_or_none(mm, haddr, &pmd); if (result == SCAN_PMD_MAPPED) return result; /* * If we are here, we've succeeded in replacing all the native pages * in the page cache with a single hugepage. If a mm were to fault-in * this memory (mapped by a suitably aligned VMA), we'd get the hugepage * and map it by a PMD, regardless of sysfs THP settings. As such, let's * analogously elide sysfs THP settings here. */ if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) return SCAN_VMA_CHECK; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ if (userfaultfd_wp(vma)) return SCAN_PTE_UFFD_WP; hpage = find_lock_page(vma->vm_file->f_mapping, linear_page_index(vma, haddr)); if (!hpage) return SCAN_PAGE_NULL; if (!PageHead(hpage)) { result = SCAN_FAIL; goto drop_hpage; } if (compound_order(hpage) != HPAGE_PMD_ORDER) { result = SCAN_PAGE_COMPOUND; goto drop_hpage; } result = find_pmd_or_thp_or_none(mm, haddr, &pmd); switch (result) { case SCAN_SUCCEED: break; case SCAN_PMD_NONE: /* * All pte entries have been removed and pmd cleared. * Skip all the pte checks and just update the pmd mapping. */ goto maybe_install_pmd; default: goto drop_hpage; } result = SCAN_FAIL; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); if (!start_pte) /* mmap_lock + page lock should prevent this */ goto drop_hpage; /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; pte_t ptent = ptep_get(pte); /* empty pte, skip */ if (pte_none(ptent)) continue; /* page swapped out, abort */ if (!pte_present(ptent)) { result = SCAN_PTE_NON_PRESENT; goto abort; } page = vm_normal_page(vma, addr, ptent); if (WARN_ON_ONCE(page && is_zone_device_page(page))) page = NULL; /* * Note that uprobe, debugger, or MAP_PRIVATE may change the * page table, but the new page will not be a subpage of hpage. */ if (hpage + i != page) goto abort; } pte_unmap_unlock(start_pte, ptl); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, haddr + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); notified = true; /* * pmd_lock covers a wider range than ptl, and (if split from mm's * page_table_lock) ptl nests inside pml. The less time we hold pml, * the better; but userfaultfd's mfill_atomic_pte() on a private VMA * inserts a valid as-if-COWed PTE without even looking up page cache. * So page lock of hpage does not protect from it, so we must not drop * ptl before pgt_pmd is removed, so uffd private needs pml taken now. */ if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) pml = pmd_lock(mm, pmd); start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl); if (!start_pte) /* mmap_lock + page lock should prevent this */ goto abort; if (!pml) spin_lock(ptl); else if (ptl != pml) spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); /* step 2: clear page table and adjust rmap */ for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; pte_t ptent = ptep_get(pte); if (pte_none(ptent)) continue; /* * We dropped ptl after the first scan, to do the mmu_notifier: * page lock stops more PTEs of the hpage being faulted in, but * does not stop write faults COWing anon copies from existing * PTEs; and does not stop those being swapped out or migrated. */ if (!pte_present(ptent)) { result = SCAN_PTE_NON_PRESENT; goto abort; } page = vm_normal_page(vma, addr, ptent); if (hpage + i != page) goto abort; /* * Must clear entry, or a racing truncate may re-remove it. * TLB flush can be left until pmdp_collapse_flush() does it. * PTE dirty? Shmem page is already dirty; file is read-only. */ ptep_clear(mm, addr, pte); page_remove_rmap(page, vma, false); nr_ptes++; } pte_unmap(start_pte); if (!pml) spin_unlock(ptl); /* step 3: set proper refcount and mm_counters. */ if (nr_ptes) { page_ref_sub(hpage, nr_ptes); add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); } /* step 4: remove empty page table */ if (!pml) { pml = pmd_lock(mm, pmd); if (ptl != pml) spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); } pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd); pmdp_get_lockless_sync(); if (ptl != pml) spin_unlock(ptl); spin_unlock(pml); mmu_notifier_invalidate_range_end(&range); mm_dec_nr_ptes(mm); page_table_check_pte_clear_range(mm, haddr, pgt_pmd); pte_free_defer(mm, pmd_pgtable(pgt_pmd)); maybe_install_pmd: /* step 5: install pmd entry */ result = install_pmd ? set_huge_pmd(vma, haddr, pmd, hpage) : SCAN_SUCCEED; goto drop_hpage; abort: if (nr_ptes) { flush_tlb_mm(mm); page_ref_sub(hpage, nr_ptes); add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); } if (start_pte) pte_unmap_unlock(start_pte, ptl); if (pml && pml != ptl) spin_unlock(pml); if (notified) mmu_notifier_invalidate_range_end(&range); drop_hpage: unlock_page(hpage); put_page(hpage); return result; } static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { struct mmu_notifier_range range; struct mm_struct *mm; unsigned long addr; pmd_t *pmd, pgt_pmd; spinlock_t *pml; spinlock_t *ptl; bool skipped_uffd = false; /* * Check vma->anon_vma to exclude MAP_PRIVATE mappings that * got written to. These VMAs are likely not worth removing * page tables from, as PMD-mapping is likely to be split later. */ if (READ_ONCE(vma->anon_vma)) continue; addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); if (addr & ~HPAGE_PMD_MASK || vma->vm_end < addr + HPAGE_PMD_SIZE) continue; mm = vma->vm_mm; if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED) continue; if (hpage_collapse_test_exit(mm)) continue; /* * When a vma is registered with uffd-wp, we cannot recycle * the page table because there may be pte markers installed. * Other vmas can still have the same file mapped hugely, but * skip this one: it will always be mapped in small page size * for uffd-wp registered ranges. */ if (userfaultfd_wp(vma)) continue; /* PTEs were notified when unmapped; but now for the PMD? */ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); pml = pmd_lock(mm, pmd); ptl = pte_lockptr(mm, pmd); if (ptl != pml) spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); /* * Huge page lock is still held, so normally the page table * must remain empty; and we have already skipped anon_vma * and userfaultfd_wp() vmas. But since the mmap_lock is not * held, it is still possible for a racing userfaultfd_ioctl() * to have inserted ptes or markers. Now that we hold ptlock, * repeating the anon_vma check protects from one category, * and repeating the userfaultfd_wp() check from another. */ if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { skipped_uffd = true; } else { pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); pmdp_get_lockless_sync(); } if (ptl != pml) spin_unlock(ptl); spin_unlock(pml); mmu_notifier_invalidate_range_end(&range); if (!skipped_uffd) { mm_dec_nr_ptes(mm); page_table_check_pte_clear_range(mm, addr, pgt_pmd); pte_free_defer(mm, pmd_pgtable(pgt_pmd)); } } i_mmap_unlock_read(mapping); } /** * collapse_file - collapse filemap/tmpfs/shmem pages into huge one. * * @mm: process address space where collapse happens * @addr: virtual collapse start address * @file: file that collapse on * @start: collapse start address * @cc: collapse context and scratchpad * * Basic scheme is simple, details are more complex: * - allocate and lock a new huge page; * - scan page cache, locking old pages * + swap/gup in pages if necessary; * - copy data to new page * - handle shmem holes * + re-validate that holes weren't filled by someone else * + check for userfaultfd * - finalize updates to the page cache; * - if replacing succeeds: * + unlock huge page; * + free old pages; * - if replacing failed; * + unlock old pages * + unlock and free huge page; */ static int collapse_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { struct address_space *mapping = file->f_mapping; struct page *hpage; struct page *page; struct page *tmp; struct folio *folio; pgoff_t index = 0, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); int nr_none = 0, result = SCAN_SUCCEED; bool is_shmem = shmem_file(file); int nr = 0; VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); result = alloc_charge_hpage(&hpage, mm, cc); if (result != SCAN_SUCCEED) goto out; __SetPageLocked(hpage); if (is_shmem) __SetPageSwapBacked(hpage); hpage->index = start; hpage->mapping = mapping; /* * Ensure we have slots for all the pages in the range. This is * almost certainly a no-op because most of the pages must be present */ do { xas_lock_irq(&xas); xas_create_range(&xas); if (!xas_error(&xas)) break; xas_unlock_irq(&xas); if (!xas_nomem(&xas, GFP_KERNEL)) { result = SCAN_FAIL; goto rollback; } } while (1); for (index = start; index < end; index++) { xas_set(&xas, index); page = xas_load(&xas); VM_BUG_ON(index != xas.xa_index); if (is_shmem) { if (!page) { /* * Stop if extent has been truncated or * hole-punched, and is now completely * empty. */ if (index == start) { if (!xas_next_entry(&xas, end - 1)) { result = SCAN_TRUNCATED; goto xa_locked; } } nr_none++; continue; } if (xa_is_value(page) || !PageUptodate(page)) { xas_unlock_irq(&xas); /* swap in or instantiate fallocated page */ if (shmem_get_folio(mapping->host, index, &folio, SGP_NOALLOC)) { result = SCAN_FAIL; goto xa_unlocked; } /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = folio_file_page(folio, index); } else if (trylock_page(page)) { get_page(page); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; goto xa_locked; } } else { /* !is_shmem */ if (!page || xa_is_value(page)) { xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { result = SCAN_FAIL; goto xa_unlocked; } } else if (PageDirty(page)) { /* * khugepaged only works on read-only fd, * so this page is dirty because it hasn't * been flushed since first write. There * won't be new dirty pages. * * Trigger async flush here and hope the * writeback is done when khugepaged * revisits this page. * * This is a one-off situation. We are not * forcing writeback in loop. */ xas_unlock_irq(&xas); filemap_flush(mapping); result = SCAN_FAIL; goto xa_unlocked; } else if (PageWriteback(page)) { xas_unlock_irq(&xas); result = SCAN_FAIL; goto xa_unlocked; } else if (trylock_page(page)) { get_page(page); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; goto xa_locked; } } /* * The page must be locked, so we can drop the i_pages lock * without racing with truncate. */ VM_BUG_ON_PAGE(!PageLocked(page), page); /* make sure the page is up to date */ if (unlikely(!PageUptodate(page))) { result = SCAN_FAIL; goto out_unlock; } /* * If file was truncated then extended, or hole-punched, before * we locked the first page, then a THP might be there already. * This will be discovered on the first iteration. */ if (PageTransCompound(page)) { struct page *head = compound_head(page); result = compound_order(head) == HPAGE_PMD_ORDER && head->index == start /* Maybe PMD-mapped */ ? SCAN_PTE_MAPPED_HUGEPAGE : SCAN_PAGE_COMPOUND; goto out_unlock; } folio = page_folio(page); if (folio_mapping(folio) != mapping) { result = SCAN_TRUNCATED; goto out_unlock; } if (!is_shmem && (folio_test_dirty(folio) || folio_test_writeback(folio))) { /* * khugepaged only works on read-only fd, so this * page is dirty because it hasn't been flushed * since first write. */ result = SCAN_FAIL; goto out_unlock; } if (!folio_isolate_lru(folio)) { result = SCAN_DEL_PAGE_LRU; goto out_unlock; } if (!filemap_release_folio(folio, GFP_KERNEL)) { result = SCAN_PAGE_HAS_PRIVATE; folio_putback_lru(folio); goto out_unlock; } if (folio_mapped(folio)) try_to_unmap(folio, TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH); xas_lock_irq(&xas); VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); /* * We control three references to the page: * - we hold a pin on it; * - one reference from page cache; * - one from isolate_lru_page; * If those are the only references, then any new usage of the * page will have to fetch it from the page cache. That requires * locking the page to handle truncate, so any new usage will be * blocked until we unlock page after collapse/during rollback. */ if (page_count(page) != 3) { result = SCAN_PAGE_COUNT; xas_unlock_irq(&xas); putback_lru_page(page); goto out_unlock; } /* * Accumulate the pages that are being collapsed. */ list_add_tail(&page->lru, &pagelist); continue; out_unlock: unlock_page(page); put_page(page); goto xa_unlocked; } if (!is_shmem) { filemap_nr_thps_inc(mapping); /* * Paired with smp_mb() in do_dentry_open() to ensure * i_writecount is up to date and the update to nr_thps is * visible. Ensures the page cache will be truncated if the * file is opened writable. */ smp_mb(); if (inode_is_open_for_write(mapping->host)) { result = SCAN_FAIL; filemap_nr_thps_dec(mapping); } } xa_locked: xas_unlock_irq(&xas); xa_unlocked: /* * If collapse is successful, flush must be done now before copying. * If collapse is unsuccessful, does flush actually need to be done? * Do it anyway, to clear the state. */ try_to_unmap_flush(); if (result == SCAN_SUCCEED && nr_none && !shmem_charge(mapping->host, nr_none)) result = SCAN_FAIL; if (result != SCAN_SUCCEED) { nr_none = 0; goto rollback; } /* * The old pages are locked, so they won't change anymore. */ index = start; list_for_each_entry(page, &pagelist, lru) { while (index < page->index) { clear_highpage(hpage + (index % HPAGE_PMD_NR)); index++; } if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) { result = SCAN_COPY_MC; goto rollback; } index++; } while (index < end) { clear_highpage(hpage + (index % HPAGE_PMD_NR)); index++; } if (nr_none) { struct vm_area_struct *vma; int nr_none_check = 0; i_mmap_lock_read(mapping); xas_lock_irq(&xas); xas_set(&xas, start); for (index = start; index < end; index++) { if (!xas_next(&xas)) { xas_store(&xas, XA_RETRY_ENTRY); if (xas_error(&xas)) { result = SCAN_STORE_FAILED; goto immap_locked; } nr_none_check++; } } if (nr_none != nr_none_check) { result = SCAN_PAGE_FILLED; goto immap_locked; } /* * If userspace observed a missing page in a VMA with a MODE_MISSING * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that * page. If so, we need to roll back to avoid suppressing such an * event. Since wp/minor userfaultfds don't give userspace any * guarantees that the kernel doesn't fill a missing page with a zero * page, so they don't matter here. * * Any userfaultfds registered after this point will not be able to * observe any missing pages due to the previously inserted retry * entries. */ vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { if (userfaultfd_missing(vma)) { result = SCAN_EXCEED_NONE_PTE; goto immap_locked; } } immap_locked: i_mmap_unlock_read(mapping); if (result != SCAN_SUCCEED) { xas_set(&xas, start); for (index = start; index < end; index++) { if (xas_next(&xas) == XA_RETRY_ENTRY) xas_store(&xas, NULL); } xas_unlock_irq(&xas); goto rollback; } } else { xas_lock_irq(&xas); } nr = thp_nr_pages(hpage); if (is_shmem) __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr); else __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr); if (nr_none) { __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none); /* nr_none is always 0 for non-shmem. */ __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none); } /* * Mark hpage as uptodate before inserting it into the page cache so * that it isn't mistaken for an fallocated but unwritten page. */ folio = page_folio(hpage); folio_mark_uptodate(folio); folio_ref_add(folio, HPAGE_PMD_NR - 1); if (is_shmem) folio_mark_dirty(folio); folio_add_lru(folio); /* Join all the small entries into a single multi-index entry. */ xas_set_order(&xas, start, HPAGE_PMD_ORDER); xas_store(&xas, hpage); WARN_ON_ONCE(xas_error(&xas)); xas_unlock_irq(&xas); /* * Remove pte page tables, so we can re-fault the page as huge. * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp(). */ retract_page_tables(mapping, start); if (cc && !cc->is_khugepaged) result = SCAN_PTE_MAPPED_HUGEPAGE; unlock_page(hpage); /* * The collapse has succeeded, so free the old pages. */ list_for_each_entry_safe(page, tmp, &pagelist, lru) { list_del(&page->lru); page->mapping = NULL; ClearPageActive(page); ClearPageUnevictable(page); unlock_page(page); folio_put_refs(page_folio(page), 3); } goto out; rollback: /* Something went wrong: roll back page cache changes */ if (nr_none) { xas_lock_irq(&xas); mapping->nrpages -= nr_none; xas_unlock_irq(&xas); shmem_uncharge(mapping->host, nr_none); } list_for_each_entry_safe(page, tmp, &pagelist, lru) { list_del(&page->lru); unlock_page(page); putback_lru_page(page); put_page(page); } /* * Undo the updates of filemap_nr_thps_inc for non-SHMEM * file only. This undo is not needed unless failure is * due to SCAN_COPY_MC. */ if (!is_shmem && result == SCAN_COPY_MC) { filemap_nr_thps_dec(mapping); /* * Paired with smp_mb() in do_dentry_open() to * ensure the update to nr_thps is visible. */ smp_mb(); } hpage->mapping = NULL; unlock_page(hpage); put_page(hpage); out: VM_BUG_ON(!list_empty(&pagelist)); trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); return result; } static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { struct page *page = NULL; struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); int present, swap; int node = NUMA_NO_NODE; int result = SCAN_SUCCEED; present = 0; swap = 0; memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); rcu_read_lock(); xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) { ++swap; if (cc->is_khugepaged && swap > khugepaged_max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; count_vm_event(THP_SCAN_EXCEED_SWAP_PTE); break; } continue; } /* * TODO: khugepaged should compact smaller compound pages * into a PMD sized page */ if (PageTransCompound(page)) { struct page *head = compound_head(page); result = compound_order(head) == HPAGE_PMD_ORDER && head->index == start /* Maybe PMD-mapped */ ? SCAN_PTE_MAPPED_HUGEPAGE : SCAN_PAGE_COMPOUND; /* * For SCAN_PTE_MAPPED_HUGEPAGE, further processing * by the caller won't touch the page cache, and so * it's safe to skip LRU and refcount checks before * returning. */ break; } node = page_to_nid(page); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } cc->node_load[node]++; if (!PageLRU(page)) { result = SCAN_PAGE_LRU; break; } if (page_count(page) != 1 + page_mapcount(page) + page_has_private(page)) { result = SCAN_PAGE_COUNT; break; } /* * We probably should check if the page is referenced here, but * nobody would transfer pte_young() to PageReferenced() for us. * And rmap walk here is just too costly... */ present++; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); } } rcu_read_unlock(); if (result == SCAN_SUCCEED) { if (cc->is_khugepaged && present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { result = SCAN_EXCEED_NONE_PTE; count_vm_event(THP_SCAN_EXCEED_NONE_PTE); } else { result = collapse_file(mm, addr, file, start, cc); } } trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); return result; } #else static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { BUILD_BUG(); } #endif static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, struct collapse_control *cc) __releases(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock) { struct vma_iterator vmi; struct khugepaged_mm_slot *mm_slot; struct mm_slot *slot; struct mm_struct *mm; struct vm_area_struct *vma; int progress = 0; VM_BUG_ON(!pages); lockdep_assert_held(&khugepaged_mm_lock); *result = SCAN_FAIL; if (khugepaged_scan.mm_slot) { mm_slot = khugepaged_scan.mm_slot; slot = &mm_slot->slot; } else { slot = list_entry(khugepaged_scan.mm_head.next, struct mm_slot, mm_node); mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); khugepaged_scan.address = 0; khugepaged_scan.mm_slot = mm_slot; } spin_unlock(&khugepaged_mm_lock); mm = slot->mm; /* * Don't wait for semaphore (to avoid long wait times). Just move to * the next mm on the list. */ vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; progress++; if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; vma_iter_init(&vmi, mm, khugepaged_scan.address); for_each_vma(vmi, vma) { unsigned long hstart, hend; cond_resched(); if (unlikely(hpage_collapse_test_exit(mm))) { progress++; break; } if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { skip: progress++; continue; } hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE); hend = round_down(vma->vm_end, HPAGE_PMD_SIZE); if (khugepaged_scan.address > hend) goto skip; if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); while (khugepaged_scan.address < hend) { bool mmap_locked = true; cond_resched(); if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); mmap_read_unlock(mm); mmap_locked = false; *result = hpage_collapse_scan_file(mm, khugepaged_scan.address, file, pgoff, cc); fput(file); if (*result == SCAN_PTE_MAPPED_HUGEPAGE) { mmap_read_lock(mm); if (hpage_collapse_test_exit(mm)) goto breakouterloop; *result = collapse_pte_mapped_thp(mm, khugepaged_scan.address, false); if (*result == SCAN_PMD_MAPPED) *result = SCAN_SUCCEED; mmap_read_unlock(mm); } } else { *result = hpage_collapse_scan_pmd(mm, vma, khugepaged_scan.address, &mmap_locked, cc); } if (*result == SCAN_SUCCEED) ++khugepaged_pages_collapsed; /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; if (!mmap_locked) /* * We released mmap_lock so break loop. Note * that we drop mmap_lock before all hugepage * allocations, so if allocation fails, we are * guaranteed to break here and report the * correct result back to caller. */ goto breakouterloop_mmap_lock; if (progress >= pages) goto breakouterloop; } } breakouterloop: mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ breakouterloop_mmap_lock: spin_lock(&khugepaged_mm_lock); VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); /* * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ if (hpage_collapse_test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find * mm_slot not pointing to the exiting mm. */ if (slot->mm_node.next != &khugepaged_scan.mm_head) { slot = list_entry(slot->mm_node.next, struct mm_slot, mm_node); khugepaged_scan.mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot); khugepaged_scan.address = 0; } else { khugepaged_scan.mm_slot = NULL; khugepaged_full_scans++; } collect_mm_slot(mm_slot); } return progress; } static int khugepaged_has_work(void) { return !list_empty(&khugepaged_scan.mm_head) && hugepage_flags_enabled(); } static int khugepaged_wait_event(void) { return !list_empty(&khugepaged_scan.mm_head) || kthread_should_stop(); } static void khugepaged_do_scan(struct collapse_control *cc) { unsigned int progress = 0, pass_through_head = 0; unsigned int pages = READ_ONCE(khugepaged_pages_to_scan); bool wait = true; int result = SCAN_SUCCEED; lru_add_drain_all(); while (true) { cond_resched(); if (unlikely(kthread_should_stop() || try_to_freeze())) break; spin_lock(&khugepaged_mm_lock); if (!khugepaged_scan.mm_slot) pass_through_head++; if (khugepaged_has_work() && pass_through_head < 2) progress += khugepaged_scan_mm_slot(pages - progress, &result, cc); else progress = pages; spin_unlock(&khugepaged_mm_lock); if (progress >= pages) break; if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) { /* * If fail to allocate the first time, try to sleep for * a while. When hit again, cancel the scan. */ if (!wait) break; wait = false; khugepaged_alloc_sleep(); } } } static bool khugepaged_should_wakeup(void) { return kthread_should_stop() || time_after_eq(jiffies, khugepaged_sleep_expire); } static void khugepaged_wait_work(void) { if (khugepaged_has_work()) { const unsigned long scan_sleep_jiffies = msecs_to_jiffies(khugepaged_scan_sleep_millisecs); if (!scan_sleep_jiffies) return; khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; wait_event_freezable_timeout(khugepaged_wait, khugepaged_should_wakeup(), scan_sleep_jiffies); return; } if (hugepage_flags_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } static int khugepaged(void *none) { struct khugepaged_mm_slot *mm_slot; set_freezable(); set_user_nice(current, MAX_NICE); while (!kthread_should_stop()) { khugepaged_do_scan(&khugepaged_collapse_control); khugepaged_wait_work(); } spin_lock(&khugepaged_mm_lock); mm_slot = khugepaged_scan.mm_slot; khugepaged_scan.mm_slot = NULL; if (mm_slot) collect_mm_slot(mm_slot); spin_unlock(&khugepaged_mm_lock); return 0; } static void set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; unsigned long recommended_min; if (!hugepage_flags_enabled()) { calculate_min_free_kbytes(); goto update_wmarks; } for_each_populated_zone(zone) { /* * We don't need to worry about fragmentation of * ZONE_MOVABLE since it only has movable pages. */ if (zone_idx(zone) > gfp_zone(GFP_USER)) continue; nr_zones++; } /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ recommended_min = pageblock_nr_pages * nr_zones * 2; /* * Make sure that on average at least two pageblocks are almost free * of another type, one for a migratetype to fall back to and a * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ recommended_min += pageblock_nr_pages * nr_zones * MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; /* don't ever allow to reserve more than 5% of the lowmem */ recommended_min = min(recommended_min, (unsigned long) nr_free_buffer_pages() / 20); recommended_min <<= (PAGE_SHIFT-10); if (recommended_min > min_free_kbytes) { if (user_min_free_kbytes >= 0) pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", min_free_kbytes, recommended_min); min_free_kbytes = recommended_min; } update_wmarks: setup_per_zone_wmarks(); } int start_stop_khugepaged(void) { int err = 0; mutex_lock(&khugepaged_mutex); if (hugepage_flags_enabled()) { if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); if (IS_ERR(khugepaged_thread)) { pr_err("khugepaged: kthread_run(khugepaged) failed\n"); err = PTR_ERR(khugepaged_thread); khugepaged_thread = NULL; goto fail; } if (!list_empty(&khugepaged_scan.mm_head)) wake_up_interruptible(&khugepaged_wait); } else if (khugepaged_thread) { kthread_stop(khugepaged_thread); khugepaged_thread = NULL; } set_recommended_min_free_kbytes(); fail: mutex_unlock(&khugepaged_mutex); return err; } void khugepaged_min_free_kbytes_update(void) { mutex_lock(&khugepaged_mutex); if (hugepage_flags_enabled() && khugepaged_thread) set_recommended_min_free_kbytes(); mutex_unlock(&khugepaged_mutex); } bool current_is_khugepaged(void) { return kthread_func(current) == khugepaged; } static int madvise_collapse_errno(enum scan_result r) { /* * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide * actionable feedback to caller, so they may take an appropriate * fallback measure depending on the nature of the failure. */ switch (r) { case SCAN_ALLOC_HUGE_PAGE_FAIL: return -ENOMEM; case SCAN_CGROUP_CHARGE_FAIL: case SCAN_EXCEED_NONE_PTE: return -EBUSY; /* Resource temporary unavailable - trying again might succeed */ case SCAN_PAGE_COUNT: case SCAN_PAGE_LOCK: case SCAN_PAGE_LRU: case SCAN_DEL_PAGE_LRU: case SCAN_PAGE_FILLED: return -EAGAIN; /* * Other: Trying again likely not to succeed / error intrinsic to * specified memory range. khugepaged likely won't be able to collapse * either. */ default: return -EINVAL; } } int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { struct collapse_control *cc; struct mm_struct *mm = vma->vm_mm; unsigned long hstart, hend, addr; int thps = 0, last_fail = SCAN_FAIL; bool mmap_locked = true; BUG_ON(vma->vm_start > start); BUG_ON(vma->vm_end < end); *prev = vma; if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) return -EINVAL; cc = kmalloc(sizeof(*cc), GFP_KERNEL); if (!cc) return -ENOMEM; cc->is_khugepaged = false; mmgrab(mm); lru_add_drain_all(); hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = end & HPAGE_PMD_MASK; for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) { int result = SCAN_FAIL; if (!mmap_locked) { cond_resched(); mmap_read_lock(mm); mmap_locked = true; result = hugepage_vma_revalidate(mm, addr, false, &vma, cc); if (result != SCAN_SUCCEED) { last_fail = result; goto out_nolock; } hend = min(hend, vma->vm_end & HPAGE_PMD_MASK); } mmap_assert_locked(mm); memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { struct file *file = get_file(vma->vm_file); pgoff_t pgoff = linear_page_index(vma, addr); mmap_read_unlock(mm); mmap_locked = false; result = hpage_collapse_scan_file(mm, addr, file, pgoff, cc); fput(file); } else { result = hpage_collapse_scan_pmd(mm, vma, addr, &mmap_locked, cc); } if (!mmap_locked) *prev = NULL; /* Tell caller we dropped mmap_lock */ handle_result: switch (result) { case SCAN_SUCCEED: case SCAN_PMD_MAPPED: ++thps; break; case SCAN_PTE_MAPPED_HUGEPAGE: BUG_ON(mmap_locked); BUG_ON(*prev); mmap_read_lock(mm); result = collapse_pte_mapped_thp(mm, addr, true); mmap_read_unlock(mm); goto handle_result; /* Whitelisted set of results where continuing OK */ case SCAN_PMD_NULL: case SCAN_PTE_NON_PRESENT: case SCAN_PTE_UFFD_WP: case SCAN_PAGE_RO: case SCAN_LACK_REFERENCED_PAGE: case SCAN_PAGE_NULL: case SCAN_PAGE_COUNT: case SCAN_PAGE_LOCK: case SCAN_PAGE_COMPOUND: case SCAN_PAGE_LRU: case SCAN_DEL_PAGE_LRU: last_fail = result; break; default: last_fail = result; /* Other error, exit */ goto out_maybelock; } } out_maybelock: /* Caller expects us to hold mmap_lock on return */ if (!mmap_locked) mmap_read_lock(mm); out_nolock: mmap_assert_locked(mm); mmdrop(mm); kfree(cc); return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0 : madvise_collapse_errno(last_fail); }
linux-master
mm/khugepaged.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mempool.c * * memory buffer pool support. Such pools are mostly used * for guaranteed, deadlock-free memory allocations during * extreme VM load. * * started by Ingo Molnar, Copyright (C) 2001 * debugging by David Rientjes, Copyright (C) 2015 */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/kasan.h> #include <linux/kmemleak.h> #include <linux/export.h> #include <linux/mempool.h> #include <linux/writeback.h> #include "slab.h" #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) static void poison_error(mempool_t *pool, void *element, size_t size, size_t byte) { const int nr = pool->curr_nr; const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); int i; pr_err("BUG: mempool element poison mismatch\n"); pr_err("Mempool %p size %zu\n", pool, size); pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); for (i = start; i < end; i++) pr_cont("%x ", *(u8 *)(element + i)); pr_cont("%s\n", end < size ? "..." : ""); dump_stack(); } static void __check_element(mempool_t *pool, void *element, size_t size) { u8 *obj = element; size_t i; for (i = 0; i < size; i++) { u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; if (obj[i] != exp) { poison_error(pool, element, size, i); return; } } memset(obj, POISON_INUSE, size); } static void check_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ if (pool->free == mempool_kfree) { __check_element(pool, element, (size_t)pool->pool_data); } else if (pool->free == mempool_free_slab) { __check_element(pool, element, kmem_cache_size(pool->pool_data)); } else if (pool->free == mempool_free_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); kunmap_atomic(addr); } } static void __poison_element(void *element, size_t size) { u8 *obj = element; memset(obj, POISON_FREE, size - 1); obj[size - 1] = POISON_END; } static void poison_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ if (pool->alloc == mempool_kmalloc) { __poison_element(element, (size_t)pool->pool_data); } else if (pool->alloc == mempool_alloc_slab) { __poison_element(element, kmem_cache_size(pool->pool_data)); } else if (pool->alloc == mempool_alloc_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_atomic((struct page *)element); __poison_element(addr, 1UL << (PAGE_SHIFT + order)); kunmap_atomic(addr); } } #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ static inline void check_element(mempool_t *pool, void *element) { } static inline void poison_element(mempool_t *pool, void *element) { } #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ static __always_inline void kasan_poison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) kasan_slab_free_mempool(element); else if (pool->alloc == mempool_alloc_pages) kasan_poison_pages(element, (unsigned long)pool->pool_data, false); } static void kasan_unpoison_element(mempool_t *pool, void *element) { if (pool->alloc == mempool_kmalloc) kasan_unpoison_range(element, (size_t)pool->pool_data); else if (pool->alloc == mempool_alloc_slab) kasan_unpoison_range(element, kmem_cache_size(pool->pool_data)); else if (pool->alloc == mempool_alloc_pages) kasan_unpoison_pages(element, (unsigned long)pool->pool_data, false); } static __always_inline void add_element(mempool_t *pool, void *element) { BUG_ON(pool->curr_nr >= pool->min_nr); poison_element(pool, element); kasan_poison_element(pool, element); pool->elements[pool->curr_nr++] = element; } static void *remove_element(mempool_t *pool) { void *element = pool->elements[--pool->curr_nr]; BUG_ON(pool->curr_nr < 0); kasan_unpoison_element(pool, element); check_element(pool, element); return element; } /** * mempool_exit - exit a mempool initialized with mempool_init() * @pool: pointer to the memory pool which was initialized with * mempool_init(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. * * May be called on a zeroed but uninitialized mempool (i.e. allocated with * kzalloc()). */ void mempool_exit(mempool_t *pool) { while (pool->curr_nr) { void *element = remove_element(pool); pool->free(element, pool->pool_data); } kfree(pool->elements); pool->elements = NULL; } EXPORT_SYMBOL(mempool_exit); /** * mempool_destroy - deallocate a memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. */ void mempool_destroy(mempool_t *pool) { if (unlikely(!pool)) return; mempool_exit(pool); kfree(pool); } EXPORT_SYMBOL(mempool_destroy); int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) { spin_lock_init(&pool->lock); pool->min_nr = min_nr; pool->pool_data = pool_data; pool->alloc = alloc_fn; pool->free = free_fn; init_waitqueue_head(&pool->wait); pool->elements = kmalloc_array_node(min_nr, sizeof(void *), gfp_mask, node_id); if (!pool->elements) return -ENOMEM; /* * First pre-allocate the guaranteed number of buffers. */ while (pool->curr_nr < pool->min_nr) { void *element; element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) { mempool_exit(pool); return -ENOMEM; } add_element(pool, element); } return 0; } EXPORT_SYMBOL(mempool_init_node); /** * mempool_init - initialize a memory pool * @pool: pointer to the memory pool that should be initialized * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * * Like mempool_create(), but initializes the pool in (i.e. embedded in another * structure). * * Return: %0 on success, negative error code otherwise. */ int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_init); /** * mempool_create - create a memory pool * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * * this function creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() * functions. This function might sleep. Both the alloc_fn() and the free_fn() * functions might sleep - as long as the mempool_alloc() function is not called * from IRQ contexts. * * Return: pointer to the created memory pool object or %NULL on error. */ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data) { return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } EXPORT_SYMBOL(mempool_create); mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) { mempool_t *pool; pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); if (!pool) return NULL; if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, gfp_mask, node_id)) { kfree(pool); return NULL; } return pool; } EXPORT_SYMBOL(mempool_create_node); /** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * This function may sleep. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. * * Return: %0 on success, negative error code otherwise. */ int mempool_resize(mempool_t *pool, int new_min_nr) { void *element; void **new_elements; unsigned long flags; BUG_ON(new_min_nr <= 0); might_sleep(); spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); spin_lock_irqsave(&pool->lock, flags); } pool->min_nr = new_min_nr; goto out_unlock; } spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), GFP_KERNEL); if (!new_elements) return -ENOMEM; spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */ spin_unlock_irqrestore(&pool->lock, flags); kfree(new_elements); goto out; } memcpy(new_elements, pool->elements, pool->curr_nr * sizeof(*new_elements)); kfree(pool->elements); pool->elements = new_elements; pool->min_nr = new_min_nr; while (pool->curr_nr < pool->min_nr) { spin_unlock_irqrestore(&pool->lock, flags); element = pool->alloc(GFP_KERNEL, pool->pool_data); if (!element) goto out; spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); } else { spin_unlock_irqrestore(&pool->lock, flags); pool->free(element, pool->pool_data); /* Raced */ goto out; } } out_unlock: spin_unlock_irqrestore(&pool->lock, flags); out: return 0; } EXPORT_SYMBOL(mempool_resize); /** * mempool_alloc - allocate an element from a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @gfp_mask: the usual allocation bitmask. * * this function only sleeps if the alloc_fn() function sleeps or * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) * Note: using __GFP_ZERO is not supported. * * Return: pointer to the allocated element or %NULL on error. */ void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_entry_t wait; gfp_t gfp_temp; VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); might_alloc(gfp_mask); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); repeat_alloc: element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */ smp_wmb(); /* * Update the allocation stack trace as this is more useful * for debugging. */ kmemleak_update_trace(element); return element; } /* * We use gfp mask w/o direct reclaim or IO for the first round. If * alloc failed with that and @pool was empty, retry immediately. */ if (gfp_temp != gfp_mask) { spin_unlock_irqrestore(&pool->lock, flags); gfp_temp = gfp_mask; goto repeat_alloc; } /* We must not sleep if !__GFP_DIRECT_RECLAIM */ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { spin_unlock_irqrestore(&pool->lock, flags); return NULL; } /* Let's wait for someone else to return an element to @pool */ init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(&pool->lock, flags); /* * FIXME: this should be io_schedule(). The timeout is there as a * workaround for some DM problems in 2.6.18. */ io_schedule_timeout(5*HZ); finish_wait(&pool->wait, &wait); goto repeat_alloc; } EXPORT_SYMBOL(mempool_alloc); /** * mempool_free - return an element to the pool. * @element: pool element pointer. * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps. */ void mempool_free(void *element, mempool_t *pool) { unsigned long flags; if (unlikely(element == NULL)) return; /* * Paired with the wmb in mempool_alloc(). The preceding read is * for @element and the following @pool->curr_nr. This ensures * that the visible value of @pool->curr_nr is from after the * allocation of @element. This is necessary for fringe cases * where @element was passed to this task without going through * barriers. * * For example, assume @p is %NULL at the beginning and one task * performs "p = mempool_alloc(...);" while another task is doing * "while (!p) cpu_relax(); mempool_free(p, ...);". This function * may end up using curr_nr value which is from before allocation * of @p without the following rmb. */ smp_rmb(); /* * For correctness, we need a test which is guaranteed to trigger * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr * without locking achieves that and refilling as soon as possible * is desirable. * * Because curr_nr visible here is always a value after the * allocation of @element, any task which decremented curr_nr below * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets * incremented to min_nr afterwards. If curr_nr gets incremented * to min_nr after the allocation of @element, the elements * allocated after that are subject to the same guarantee. * * Waiters happen iff curr_nr is 0 and the above guarantee also * ensures that there will be frees which return elements to the * pool waking up the waiters. */ if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) { spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr < pool->min_nr)) { add_element(pool, element); spin_unlock_irqrestore(&pool->lock, flags); wake_up(&pool->wait); return; } spin_unlock_irqrestore(&pool->lock, flags); } pool->free(element, pool->pool_data); } EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; VM_BUG_ON(mem->ctor); return kmem_cache_alloc(mem, gfp_mask); } EXPORT_SYMBOL(mempool_alloc_slab); void mempool_free_slab(void *element, void *pool_data) { struct kmem_cache *mem = pool_data; kmem_cache_free(mem, element); } EXPORT_SYMBOL(mempool_free_slab); /* * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory * specified by pool_data */ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)pool_data; return kmalloc(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); void mempool_kfree(void *element, void *pool_data) { kfree(element); } EXPORT_SYMBOL(mempool_kfree); /* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data. */ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) { int order = (int)(long)pool_data; return alloc_pages(gfp_mask, order); } EXPORT_SYMBOL(mempool_alloc_pages); void mempool_free_pages(void *element, void *pool_data) { int order = (int)(long)pool_data; __free_pages(element, order); } EXPORT_SYMBOL(mempool_free_pages);
linux-master
mm/mempool.c
// SPDX-License-Identifier: GPL-2.0-only /* * zbud.c * * Copyright (C) 2013, Seth Jennings, IBM * * Concepts based on zcache internal zbud allocator by Dan Magenheimer. * * zbud is an special purpose allocator for storing compressed pages. Contrary * to what its name may suggest, zbud is not a buddy allocator, but rather an * allocator that "buddies" two compressed pages together in a single memory * page. * * While this design limits storage density, it has simple and deterministic * reclaim properties that make it preferable to a higher density approach when * reclaim will be used. * * zbud works by storing compressed pages, or "zpages", together in pairs in a * single memory page called a "zbud page". The first buddy is "left * justified" at the beginning of the zbud page, and the last buddy is "right * justified" at the end of the zbud page. The benefit is that if either * buddy is freed, the freed buddy space, coalesced with whatever slack space * that existed between the buddies, results in the largest possible free region * within the zbud page. * * zbud also provides an attractive lower bound on density. The ratio of zpages * to zbud pages can not be less than 1. This ensures that zbud can never "do * harm" by using more pages to store zpages than the uncompressed zpages would * have used on their own. * * zbud pages are divided into "chunks". The size of the chunks is fixed at * compile time and determined by NCHUNKS_ORDER below. Dividing zbud pages * into chunks allows organizing unbuddied zbud pages into a manageable number * of unbuddied lists according to the number of free chunks available in the * zbud page. * * The zbud API differs from that of conventional allocators in that the * allocation function, zbud_alloc(), returns an opaque handle to the user, * not a dereferenceable pointer. The user must map the handle using * zbud_map() in order to get a usable pointer by which to access the * allocation data and unmap the handle with zbud_unmap() when operations * on the allocation data are complete. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/atomic.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/preempt.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zpool.h> /***************** * Structures *****************/ /* * NCHUNKS_ORDER determines the internal allocation granularity, effectively * adjusting internal fragmentation. It also determines the number of * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk * in allocated page is occupied by zbud header, NCHUNKS will be calculated to * 63 which shows the max number of free chunks in zbud page, also there will be * 63 freelists per pool. */ #define NCHUNKS_ORDER 6 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) #define CHUNK_SIZE (1 << CHUNK_SHIFT) #define ZHDR_SIZE_ALIGNED CHUNK_SIZE #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) struct zbud_pool; /** * struct zbud_pool - stores metadata for each zbud pool * @lock: protects all pool fields and first|last_chunk fields of any * zbud page in the pool * @unbuddied: array of lists tracking zbud pages that only contain one buddy; * the lists each zbud page is added to depends on the size of * its free region. * @buddied: list tracking the zbud pages that contain two buddies; * these zbud pages are full * @pages_nr: number of zbud pages in the pool. * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular zbud pool. */ struct zbud_pool { spinlock_t lock; union { /* * Reuse unbuddied[0] as buddied on the ground that * unbuddied[0] is unused. */ struct list_head buddied; struct list_head unbuddied[NCHUNKS]; }; u64 pages_nr; }; /* * struct zbud_header - zbud page metadata occupying the first chunk of each * zbud page. * @buddy: links the zbud page into the unbuddied/buddied lists in the pool * @first_chunks: the size of the first buddy in chunks, 0 if free * @last_chunks: the size of the last buddy in chunks, 0 if free */ struct zbud_header { struct list_head buddy; unsigned int first_chunks; unsigned int last_chunks; }; /***************** * Helpers *****************/ /* Just to make the code easier to read */ enum buddy { FIRST, LAST }; /* Converts an allocation size in bytes to size in zbud chunks */ static int size_to_chunks(size_t size) { return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; } #define for_each_unbuddied_list(_iter, _begin) \ for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) /* Initializes the zbud header of a newly allocated zbud page */ static struct zbud_header *init_zbud_page(struct page *page) { struct zbud_header *zhdr = page_address(page); zhdr->first_chunks = 0; zhdr->last_chunks = 0; INIT_LIST_HEAD(&zhdr->buddy); return zhdr; } /* Resets the struct page fields and frees the page */ static void free_zbud_page(struct zbud_header *zhdr) { __free_page(virt_to_page(zhdr)); } /* * Encodes the handle of a particular buddy within a zbud page * Pool lock should be held as this function accesses first|last_chunks */ static unsigned long encode_handle(struct zbud_header *zhdr, enum buddy bud) { unsigned long handle; /* * For now, the encoded handle is actually just the pointer to the data * but this might not always be the case. A little information hiding. * Add CHUNK_SIZE to the handle if it is the first allocation to jump * over the zbud header in the first chunk. */ handle = (unsigned long)zhdr; if (bud == FIRST) /* skip over zbud header */ handle += ZHDR_SIZE_ALIGNED; else /* bud == LAST */ handle += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); return handle; } /* Returns the zbud page where a given handle is stored */ static struct zbud_header *handle_to_zbud_header(unsigned long handle) { return (struct zbud_header *)(handle & PAGE_MASK); } /* Returns the number of free chunks in a zbud page */ static int num_free_chunks(struct zbud_header *zhdr) { /* * Rather than branch for different situations, just use the fact that * free buddies have a length of zero to simplify everything. */ return NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; } /***************** * API Functions *****************/ /** * zbud_create_pool() - create a new zbud pool * @gfp: gfp flags when allocating the zbud pool structure * * Return: pointer to the new zbud pool or NULL if the metadata allocation * failed. */ static struct zbud_pool *zbud_create_pool(gfp_t gfp) { struct zbud_pool *pool; int i; pool = kzalloc(sizeof(struct zbud_pool), gfp); if (!pool) return NULL; spin_lock_init(&pool->lock); for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&pool->unbuddied[i]); INIT_LIST_HEAD(&pool->buddied); pool->pages_nr = 0; return pool; } /** * zbud_destroy_pool() - destroys an existing zbud pool * @pool: the zbud pool to be destroyed * * The pool should be emptied before this function is called. */ static void zbud_destroy_pool(struct zbud_pool *pool) { kfree(pool); } /** * zbud_alloc() - allocates a region of a given size * @pool: zbud pool from which to allocate * @size: size in bytes of the desired allocation * @gfp: gfp flags used if the pool needs to grow * @handle: handle of the new allocation * * This function will attempt to find a free region in the pool large enough to * satisfy the allocation request. A search of the unbuddied lists is * performed first. If no suitable free region is found, then a new page is * allocated and added to the pool to satisfy the request. * * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used * as zbud pool pages. * * Return: 0 if success and handle is set, otherwise -EINVAL if the size or * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate * a new page. */ static int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) { int chunks, i, freechunks; struct zbud_header *zhdr = NULL; enum buddy bud; struct page *page; if (!size || (gfp & __GFP_HIGHMEM)) return -EINVAL; if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) return -ENOSPC; chunks = size_to_chunks(size); spin_lock(&pool->lock); /* First, try to find an unbuddied zbud page. */ for_each_unbuddied_list(i, chunks) { if (!list_empty(&pool->unbuddied[i])) { zhdr = list_first_entry(&pool->unbuddied[i], struct zbud_header, buddy); list_del(&zhdr->buddy); if (zhdr->first_chunks == 0) bud = FIRST; else bud = LAST; goto found; } } /* Couldn't find unbuddied zbud page, create new one */ spin_unlock(&pool->lock); page = alloc_page(gfp); if (!page) return -ENOMEM; spin_lock(&pool->lock); pool->pages_nr++; zhdr = init_zbud_page(page); bud = FIRST; found: if (bud == FIRST) zhdr->first_chunks = chunks; else zhdr->last_chunks = chunks; if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0) { /* Add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); } else { /* Add to buddied list */ list_add(&zhdr->buddy, &pool->buddied); } *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); return 0; } /** * zbud_free() - frees the allocation associated with the given handle * @pool: pool in which the allocation resided * @handle: handle associated with the allocation returned by zbud_alloc() */ static void zbud_free(struct zbud_pool *pool, unsigned long handle) { struct zbud_header *zhdr; int freechunks; spin_lock(&pool->lock); zhdr = handle_to_zbud_header(handle); /* If first buddy, handle will be page aligned */ if ((handle - ZHDR_SIZE_ALIGNED) & ~PAGE_MASK) zhdr->last_chunks = 0; else zhdr->first_chunks = 0; /* Remove from existing buddy list */ list_del(&zhdr->buddy); if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* zbud page is empty, free */ free_zbud_page(zhdr); pool->pages_nr--; } else { /* Add to unbuddied list */ freechunks = num_free_chunks(zhdr); list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); } spin_unlock(&pool->lock); } /** * zbud_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be mapped * * While trivial for zbud, the mapping functions for others allocators * implementing this allocation API could have more complex information encoded * in the handle and could create temporary mappings to make the data * accessible to the user. * * Returns: a pointer to the mapped allocation */ static void *zbud_map(struct zbud_pool *pool, unsigned long handle) { return (void *)(handle); } /** * zbud_unmap() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be unmapped */ static void zbud_unmap(struct zbud_pool *pool, unsigned long handle) { } /** * zbud_get_pool_size() - gets the zbud pool size in pages * @pool: pool whose size is being queried * * Returns: size in pages of the given pool. The pool lock need not be * taken to access pages_nr. */ static u64 zbud_get_pool_size(struct zbud_pool *pool) { return pool->pages_nr; } /***************** * zpool ****************/ static void *zbud_zpool_create(const char *name, gfp_t gfp) { return zbud_create_pool(gfp); } static void zbud_zpool_destroy(void *pool) { zbud_destroy_pool(pool); } static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { return zbud_alloc(pool, size, gfp, handle); } static void zbud_zpool_free(void *pool, unsigned long handle) { zbud_free(pool, handle); } static void *zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { return zbud_map(pool, handle); } static void zbud_zpool_unmap(void *pool, unsigned long handle) { zbud_unmap(pool, handle); } static u64 zbud_zpool_total_size(void *pool) { return zbud_get_pool_size(pool) * PAGE_SIZE; } static struct zpool_driver zbud_zpool_driver = { .type = "zbud", .sleep_mapped = true, .owner = THIS_MODULE, .create = zbud_zpool_create, .destroy = zbud_zpool_destroy, .malloc = zbud_zpool_malloc, .free = zbud_zpool_free, .map = zbud_zpool_map, .unmap = zbud_zpool_unmap, .total_size = zbud_zpool_total_size, }; MODULE_ALIAS("zpool-zbud"); static int __init init_zbud(void) { /* Make sure the zbud header will fit in one chunk */ BUILD_BUG_ON(sizeof(struct zbud_header) > ZHDR_SIZE_ALIGNED); pr_info("loaded\n"); zpool_register_driver(&zbud_zpool_driver); return 0; } static void __exit exit_zbud(void) { zpool_unregister_driver(&zbud_zpool_driver); pr_info("unloaded\n"); } module_init(init_zbud); module_exit(exit_zbud); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Seth Jennings <[email protected]>"); MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
linux-master
mm/zbud.c
// SPDX-License-Identifier: GPL-2.0 /* * sparse memory mappings. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/mmzone.h> #include <linux/memblock.h> #include <linux/compiler.h> #include <linux/highmem.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/bootmem_info.h> #include "internal.h" #include <asm/dma.h> /* * Permanent SPARSEMEM data: * * 1) mem_section - memory sections, mem_map's for valid memory */ #ifdef CONFIG_SPARSEMEM_EXTREME struct mem_section **mem_section; #else struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] ____cacheline_internodealigned_in_smp; #endif EXPORT_SYMBOL(mem_section); #ifdef NODE_NOT_IN_PAGE_FLAGS /* * If we did not store the node number in the page then we have to * do a lookup in the section_to_node_table in order to find which * node the page belongs to. */ #if MAX_NUMNODES <= 256 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #else static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; #endif int page_to_nid(const struct page *page) { return section_to_node_table[page_to_section(page)]; } EXPORT_SYMBOL(page_to_nid); static void set_section_nid(unsigned long section_nr, int nid) { section_to_node_table[section_nr] = nid; } #else /* !NODE_NOT_IN_PAGE_FLAGS */ static inline void set_section_nid(unsigned long section_nr, int nid) { } #endif #ifdef CONFIG_SPARSEMEM_EXTREME static noinline struct mem_section __ref *sparse_index_alloc(int nid) { struct mem_section *section = NULL; unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); if (slab_is_available()) { section = kzalloc_node(array_size, GFP_KERNEL, nid); } else { section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, nid); if (!section) panic("%s: Failed to allocate %lu bytes nid=%d\n", __func__, array_size, nid); } return section; } static int __meminit sparse_index_init(unsigned long section_nr, int nid) { unsigned long root = SECTION_NR_TO_ROOT(section_nr); struct mem_section *section; /* * An existing section is possible in the sub-section hotplug * case. First hot-add instantiates, follow-on hot-add reuses * the existing section. * * The mem_hotplug_lock resolves the apparent race below. */ if (mem_section[root]) return 0; section = sparse_index_alloc(nid); if (!section) return -ENOMEM; mem_section[root] = section; return 0; } #else /* !SPARSEMEM_EXTREME */ static inline int sparse_index_init(unsigned long section_nr, int nid) { return 0; } #endif /* * During early boot, before section_mem_map is used for an actual * mem_map, we use section_mem_map to store the section's NUMA * node. This keeps us from having to use another data structure. The * node information is cleared just before we store the real mem_map. */ static inline unsigned long sparse_encode_early_nid(int nid) { return ((unsigned long)nid << SECTION_NID_SHIFT); } static inline int sparse_early_nid(struct mem_section *section) { return (section->section_mem_map >> SECTION_NID_SHIFT); } /* Validate the physical addressing limitations of the model */ static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); /* * Sanity checks - do not allow an architecture to pass * in larger pfns than the maximum scope of sparsemem: */ if (*start_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *start_pfn = max_sparsemem_pfn; *end_pfn = max_sparsemem_pfn; } else if (*end_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *end_pfn = max_sparsemem_pfn; } } /* * There are a number of times that we loop over NR_MEM_SECTIONS, * looking for section_present() on each. But, when we have very * large physical address spaces, NR_MEM_SECTIONS can also be * very large which makes the loops quite long. * * Keeping track of this gives us an easy way to break out of * those loops early. */ unsigned long __highest_present_section_nr; static void __section_mark_present(struct mem_section *ms, unsigned long section_nr) { if (section_nr > __highest_present_section_nr) __highest_present_section_nr = section_nr; ms->section_mem_map |= SECTION_MARKED_PRESENT; } #define for_each_present_section_nr(start, section_nr) \ for (section_nr = next_present_section_nr(start-1); \ section_nr != -1; \ section_nr = next_present_section_nr(section_nr)) static inline unsigned long first_present_section_nr(void) { return next_present_section_nr(-1); } #ifdef CONFIG_SPARSEMEM_VMEMMAP static void subsection_mask_set(unsigned long *map, unsigned long pfn, unsigned long nr_pages) { int idx = subsection_map_index(pfn); int end = subsection_map_index(pfn + nr_pages - 1); bitmap_set(map, idx, end - idx + 1); } void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) { int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); unsigned long nr, start_sec = pfn_to_section_nr(pfn); if (!nr_pages) return; for (nr = start_sec; nr <= end_sec; nr++) { struct mem_section *ms; unsigned long pfns; pfns = min(nr_pages, PAGES_PER_SECTION - (pfn & ~PAGE_SECTION_MASK)); ms = __nr_to_section(nr); subsection_mask_set(ms->usage->subsection_map, pfn, pfns); pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, pfns, subsection_map_index(pfn), subsection_map_index(pfn + pfns - 1)); pfn += pfns; nr_pages -= pfns; } } #else void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) { } #endif /* Record a memory area against a node. */ static void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; #ifdef CONFIG_SPARSEMEM_EXTREME if (unlikely(!mem_section)) { unsigned long size, align; size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); mem_section = memblock_alloc(size, align); if (!mem_section) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); } #endif start &= PAGE_SECTION_MASK; mminit_validate_memmodel_limits(&start, &end); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); set_section_nid(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) { ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_IS_ONLINE; __section_mark_present(ms, section); } } } /* * Mark all memblocks as present using memory_present(). * This is a convenience function that is useful to mark all of the systems * memory as present during initialization. */ static void __init memblocks_present(void) { unsigned long start, end; int i, nid; for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) memory_present(nid, start, end); } /* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) { unsigned long coded_mem_map = (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT); BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); return coded_mem_map; } #ifdef CONFIG_MEMORY_HOTPLUG /* * Decode mem_map from the coded memmap */ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) { /* mask off the extra low bits of information */ coded_mem_map &= SECTION_MAP_MASK; return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } #endif /* CONFIG_MEMORY_HOTPLUG */ static void __meminit sparse_init_one_section(struct mem_section *ms, unsigned long pnum, struct page *mem_map, struct mem_section_usage *usage, unsigned long flags) { ms->section_mem_map &= ~SECTION_MAP_MASK; ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | SECTION_HAS_MEM_MAP | flags; ms->usage = usage; } static unsigned long usemap_size(void) { return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); } size_t mem_section_usage_size(void) { return sizeof(struct mem_section_usage) + usemap_size(); } #ifdef CONFIG_MEMORY_HOTREMOVE static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) { #ifndef CONFIG_NUMA VM_BUG_ON(pgdat != &contig_page_data); return __pa_symbol(&contig_page_data); #else return __pa(pgdat); #endif } static struct mem_section_usage * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { struct mem_section_usage *usage; unsigned long goal, limit; int nid; /* * A page may contain usemaps for other sections preventing the * page being freed and making a section unremovable while * other sections referencing the usemap remain active. Similarly, * a pgdat can prevent a section being removed. If section A * contains a pgdat and section B contains the usemap, both * sections become inter-dependent. This allocates usemaps * from the same section as the pgdat where possible to avoid * this problem. */ goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); limit = goal + (1UL << PA_SECTION_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT); again: usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); if (!usage && limit) { limit = 0; goto again; } return usage; } static void __init check_usemap_section_nr(int nid, struct mem_section_usage *usage) { unsigned long usemap_snr, pgdat_snr; static unsigned long old_usemap_snr; static unsigned long old_pgdat_snr; struct pglist_data *pgdat = NODE_DATA(nid); int usemap_nid; /* First call */ if (!old_usemap_snr) { old_usemap_snr = NR_MEM_SECTIONS; old_pgdat_snr = NR_MEM_SECTIONS; } usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); if (usemap_snr == pgdat_snr) return; if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) /* skip redundant message */ return; old_usemap_snr = usemap_snr; old_pgdat_snr = pgdat_snr; usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); if (usemap_nid != nid) { pr_info("node %d must be removed before remove section %ld\n", nid, usemap_snr); return; } /* * There is a circular dependency. * Some platforms allow un-removable section because they will just * gather other removable sections for dynamic partitioning. * Just notify un-removable section's number here. */ pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", usemap_snr, pgdat_snr, nid); } #else static struct mem_section_usage * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); } static void __init check_usemap_section_nr(int nid, struct mem_section_usage *usage) { } #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_SPARSEMEM_VMEMMAP static unsigned long __init section_map_size(void) { return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); } #else static unsigned long __init section_map_size(void) { return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); } struct page __init *__populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { unsigned long size = section_map_size(); struct page *map = sparse_buffer_alloc(size); phys_addr_t addr = __pa(MAX_DMA_ADDRESS); if (map) return map; map = memmap_alloc(size, size, addr, nid, false); if (!map) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", __func__, size, PAGE_SIZE, nid, &addr); return map; } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ static void *sparsemap_buf __meminitdata; static void *sparsemap_buf_end __meminitdata; static inline void __meminit sparse_buffer_free(unsigned long size) { WARN_ON(!sparsemap_buf || size == 0); memblock_free(sparsemap_buf, size); } static void __init sparse_buffer_init(unsigned long size, int nid) { phys_addr_t addr = __pa(MAX_DMA_ADDRESS); WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ /* * Pre-allocated buffer is mainly used by __populate_section_memmap * and we want it to be properly aligned to the section size - this is * especially the case for VMEMMAP which maps memmap to PMDs */ sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true); sparsemap_buf_end = sparsemap_buf + size; } static void __init sparse_buffer_fini(void) { unsigned long size = sparsemap_buf_end - sparsemap_buf; if (sparsemap_buf && size > 0) sparse_buffer_free(size); sparsemap_buf = NULL; } void * __meminit sparse_buffer_alloc(unsigned long size) { void *ptr = NULL; if (sparsemap_buf) { ptr = (void *) roundup((unsigned long)sparsemap_buf, size); if (ptr + size > sparsemap_buf_end) ptr = NULL; else { /* Free redundant aligned space */ if ((unsigned long)(ptr - sparsemap_buf) > 0) sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); sparsemap_buf = ptr + size; } } return ptr; } void __weak __meminit vmemmap_populate_print_last(void) { } /* * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) * And number of present sections in this node is map_count. */ static void __init sparse_init_nid(int nid, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count) { struct mem_section_usage *usage; unsigned long pnum; struct page *map; usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), mem_section_usage_size() * map_count); if (!usage) { pr_err("%s: node[%d] usemap allocation failed", __func__, nid); goto failed; } sparse_buffer_init(map_count * section_map_size(), nid); for_each_present_section_nr(pnum_begin, pnum) { unsigned long pfn = section_nr_to_pfn(pnum); if (pnum >= pnum_end) break; map = __populate_section_memmap(pfn, PAGES_PER_SECTION, nid, NULL, NULL); if (!map) { pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", __func__, nid); pnum_begin = pnum; sparse_buffer_fini(); goto failed; } check_usemap_section_nr(nid, usage); sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, SECTION_IS_EARLY); usage = (void *) usage + mem_section_usage_size(); } sparse_buffer_fini(); return; failed: /* We failed to allocate, mark all the following pnums as not present */ for_each_present_section_nr(pnum_begin, pnum) { struct mem_section *ms; if (pnum >= pnum_end) break; ms = __nr_to_section(pnum); ms->section_mem_map = 0; } } /* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void __init sparse_init(void) { unsigned long pnum_end, pnum_begin, map_count = 1; int nid_begin; memblocks_present(); pnum_begin = first_present_section_nr(); nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ set_pageblock_order(); for_each_present_section_nr(pnum_begin + 1, pnum_end) { int nid = sparse_early_nid(__nr_to_section(pnum_end)); if (nid == nid_begin) { map_count++; continue; } /* Init node with sections in range [pnum_begin, pnum_end) */ sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); nid_begin = nid; pnum_begin = pnum_end; map_count = 1; } /* cover the last node */ sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); vmemmap_populate_print_last(); } #ifdef CONFIG_MEMORY_HOTPLUG /* Mark all memory sections within the pfn range as online */ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *ms; /* onlining code should never touch invalid ranges */ if (WARN_ON(!valid_section_nr(section_nr))) continue; ms = __nr_to_section(section_nr); ms->section_mem_map |= SECTION_IS_ONLINE; } } /* Mark all memory sections within the pfn range as offline */ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); struct mem_section *ms; /* * TODO this needs some double checking. Offlining code makes * sure to check pfn_valid but those checks might be just bogus */ if (WARN_ON(!valid_section_nr(section_nr))) continue; ms = __nr_to_section(section_nr); ms->section_mem_map &= ~SECTION_IS_ONLINE; } } #ifdef CONFIG_SPARSEMEM_VMEMMAP static struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); } static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { unsigned long start = (unsigned long) pfn_to_page(pfn); unsigned long end = start + nr_pages * sizeof(struct page); vmemmap_free(start, end, altmap); } static void free_map_bootmem(struct page *memmap) { unsigned long start = (unsigned long)memmap; unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); vmemmap_free(start, end, NULL); } static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) { DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; struct mem_section *ms = __pfn_to_section(pfn); unsigned long *subsection_map = ms->usage ? &ms->usage->subsection_map[0] : NULL; subsection_mask_set(map, pfn, nr_pages); if (subsection_map) bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), "section already deactivated (%#lx + %ld)\n", pfn, nr_pages)) return -EINVAL; bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); return 0; } static bool is_subsection_map_empty(struct mem_section *ms) { return bitmap_empty(&ms->usage->subsection_map[0], SUBSECTIONS_PER_SECTION); } static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) { struct mem_section *ms = __pfn_to_section(pfn); DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; unsigned long *subsection_map; int rc = 0; subsection_mask_set(map, pfn, nr_pages); subsection_map = &ms->usage->subsection_map[0]; if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) rc = -EINVAL; else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) rc = -EEXIST; else bitmap_or(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); return rc; } #else static struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { return kvmalloc_node(array_size(sizeof(struct page), PAGES_PER_SECTION), GFP_KERNEL, nid); } static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { kvfree(pfn_to_page(pfn)); } static void free_map_bootmem(struct page *memmap) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic, nr_pages; struct page *page = virt_to_page(memmap); nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; for (i = 0; i < nr_pages; i++, page++) { magic = page->index; BUG_ON(magic == NODE_INFO); maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); removing_section_nr = page_private(page); /* * When this function is called, the removing section is * logical offlined state. This means all pages are isolated * from page allocator. If removing section's memmap is placed * on the same section, it must not be freed. * If it is freed, page allocator may allocate it which will * be removed physically soon. */ if (maps_section_nr != removing_section_nr) put_page_bootmem(page); } } static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) { return 0; } static bool is_subsection_map_empty(struct mem_section *ms) { return true; } static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) { return 0; } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * To deactivate a memory region, there are 3 cases to handle across * two configurations (SPARSEMEM_VMEMMAP={y,n}): * * 1. deactivation of a partial hot-added section (only possible in * the SPARSEMEM_VMEMMAP=y case). * a) section was present at memory init. * b) section was hot-added post memory init. * 2. deactivation of a complete hot-added section. * 3. deactivation of a complete section from memory init. * * For 1, when subsection_map does not empty we will not be freeing the * usage map, but still need to free the vmemmap range. * * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified */ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { struct mem_section *ms = __pfn_to_section(pfn); bool section_is_early = early_section(ms); struct page *memmap = NULL; bool empty; if (clear_subsection_map(pfn, nr_pages)) return; empty = is_subsection_map_empty(ms); if (empty) { unsigned long section_nr = pfn_to_section_nr(pfn); /* * When removing an early section, the usage map is kept (as the * usage maps of other sections fall into the same page). It * will be re-used when re-adding the section - which is then no * longer an early section. If the usage map is PageReserved, it * was allocated during boot. */ if (!PageReserved(virt_to_page(ms->usage))) { kfree(ms->usage); ms->usage = NULL; } memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); /* * Mark the section invalid so that valid_section() * return false. This prevents code from dereferencing * ms->usage array. */ ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; } /* * The memmap of early sections is always fully populated. See * section_activate() and pfn_valid() . */ if (!section_is_early) depopulate_section_memmap(pfn, nr_pages, altmap); else if (memmap) free_map_bootmem(memmap); if (empty) ms->section_mem_map = (unsigned long)NULL; } static struct page * __meminit section_activate(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { struct mem_section *ms = __pfn_to_section(pfn); struct mem_section_usage *usage = NULL; struct page *memmap; int rc; if (!ms->usage) { usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); if (!usage) return ERR_PTR(-ENOMEM); ms->usage = usage; } rc = fill_subsection_map(pfn, nr_pages); if (rc) { if (usage) ms->usage = NULL; kfree(usage); return ERR_PTR(rc); } /* * The early init code does not consider partially populated * initial sections, it simply assumes that memory will never be * referenced. If we hot-add memory into such a section then we * do not need to populate the memmap and can simply reuse what * is already there. */ if (nr_pages < PAGES_PER_SECTION && early_section(ms)) return pfn_to_page(pfn); memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); if (!memmap) { section_deactivate(pfn, nr_pages, altmap); return ERR_PTR(-ENOMEM); } return memmap; } /** * sparse_add_section - add a memory section, or populate an existing one * @nid: The node to add section on * @start_pfn: start pfn of the memory range * @nr_pages: number of pfns to add in the section * @altmap: alternate pfns to allocate the memmap backing store * @pgmap: alternate compound page geometry for devmap mappings * * This is only intended for hotplug. * * Note that only VMEMMAP supports sub-section aligned hotplug, * the proper alignment and size are gated by check_pfn_span(). * * * Return: * * 0 - On success. * * -EEXIST - Section has been present. * * -ENOMEM - Out of memory. */ int __meminit sparse_add_section(int nid, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct mem_section *ms; struct page *memmap; int ret; ret = sparse_index_init(section_nr, nid); if (ret < 0) return ret; memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); if (IS_ERR(memmap)) return PTR_ERR(memmap); /* * Poison uninitialized struct pages in order to catch invalid flags * combinations. */ page_init_poison(memmap, sizeof(struct page) * nr_pages); ms = __nr_to_section(section_nr); set_section_nid(section_nr, nid); __section_mark_present(ms, section_nr); /* Align memmap to section boundary in the subsection case */ if (section_nr_to_pfn(section_nr) != start_pfn) memmap = pfn_to_page(section_nr_to_pfn(section_nr)); sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); return 0; } void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) { struct mem_section *ms = __pfn_to_section(pfn); if (WARN_ON_ONCE(!valid_section(ms))) return; section_deactivate(pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */
linux-master
mm/sparse.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/blkdev.h> #include <linux/wait.h> #include <linux/rbtree.h> #include <linux/kthread.h> #include <linux/backing-dev.h> #include <linux/blk-cgroup.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/writeback.h> #include <linux/device.h> #include <trace/events/writeback.h> #include "internal.h" struct backing_dev_info noop_backing_dev_info; EXPORT_SYMBOL_GPL(noop_backing_dev_info); static const char *bdi_unknown_name = "(unknown)"; /* * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU * reader side locking. */ DEFINE_SPINLOCK(bdi_lock); static u64 bdi_id_cursor; static struct rb_root bdi_tree = RB_ROOT; LIST_HEAD(bdi_list); /* bdi_wq serves all asynchronous writeback tasks */ struct workqueue_struct *bdi_wq; #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> static struct dentry *bdi_debug_root; static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); } static int bdi_debug_stats_show(struct seq_file *m, void *v) { struct backing_dev_info *bdi = m->private; struct bdi_writeback *wb = &bdi->wb; unsigned long background_thresh; unsigned long dirty_thresh; unsigned long wb_thresh; unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; struct inode *inode; nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; spin_lock(&wb->list_lock); list_for_each_entry(inode, &wb->b_dirty, i_io_list) nr_dirty++; list_for_each_entry(inode, &wb->b_io, i_io_list) nr_io++; list_for_each_entry(inode, &wb->b_more_io, i_io_list) nr_more_io++; list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) if (inode->i_state & I_DIRTY_TIME) nr_dirty_time++; spin_unlock(&wb->list_lock); global_dirty_limits(&background_thresh, &dirty_thresh); wb_thresh = wb_calc_thresh(wb, dirty_thresh); seq_printf(m, "BdiWriteback: %10lu kB\n" "BdiReclaimable: %10lu kB\n" "BdiDirtyThresh: %10lu kB\n" "DirtyThresh: %10lu kB\n" "BackgroundThresh: %10lu kB\n" "BdiDirtied: %10lu kB\n" "BdiWritten: %10lu kB\n" "BdiWriteBandwidth: %10lu kBps\n" "b_dirty: %10lu\n" "b_io: %10lu\n" "b_more_io: %10lu\n" "b_dirty_time: %10lu\n" "bdi_list: %10u\n" "state: %10lx\n", (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), K(wb_thresh), K(dirty_thresh), K(background_thresh), (unsigned long) K(wb_stat(wb, WB_DIRTIED)), (unsigned long) K(wb_stat(wb, WB_WRITTEN)), (unsigned long) K(wb->write_bandwidth), nr_dirty, nr_io, nr_more_io, nr_dirty_time, !list_empty(&bdi->bdi_list), bdi->wb.state); return 0; } DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, &bdi_debug_stats_fops); } static void bdi_debug_unregister(struct backing_dev_info *bdi) { debugfs_remove_recursive(bdi->debug_dir); } #else static inline void bdi_debug_init(void) { } static inline void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { } static inline void bdi_debug_unregister(struct backing_dev_info *bdi) { } #endif static ssize_t read_ahead_kb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned long read_ahead_kb; ssize_t ret; ret = kstrtoul(buf, 10, &read_ahead_kb); if (ret < 0) return ret; bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); return count; } #define BDI_SHOW(name, expr) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct backing_dev_info *bdi = dev_get_drvdata(dev); \ \ return sysfs_emit(buf, "%lld\n", (long long)expr); \ } \ static DEVICE_ATTR_RW(name); BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) static ssize_t min_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_min_ratio(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE) static ssize_t min_ratio_fine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_min_ratio_no_scale(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(min_ratio_fine, bdi->min_ratio) static ssize_t max_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_max_ratio(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE) static ssize_t max_ratio_fine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_max_ratio_no_scale(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(max_ratio_fine, bdi->max_ratio) static ssize_t min_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%llu\n", bdi_get_min_bytes(bdi)); } static ssize_t min_bytes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); u64 bytes; ssize_t ret; ret = kstrtoull(buf, 10, &bytes); if (ret < 0) return ret; ret = bdi_set_min_bytes(bdi, bytes); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(min_bytes); static ssize_t max_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%llu\n", bdi_get_max_bytes(bdi)); } static ssize_t max_bytes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); u64 bytes; ssize_t ret; ret = kstrtoull(buf, 10, &bytes); if (ret < 0) return ret; ret = bdi_set_max_bytes(bdi, bytes); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(max_bytes); static ssize_t stable_pages_required_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n"); return sysfs_emit(buf, "%d\n", 0); } static DEVICE_ATTR_RO(stable_pages_required); static ssize_t strict_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int strict_limit; ssize_t ret; ret = kstrtouint(buf, 10, &strict_limit); if (ret < 0) return ret; ret = bdi_set_strict_limit(bdi, strict_limit); if (!ret) ret = count; return ret; } static ssize_t strict_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%d\n", !!(bdi->capabilities & BDI_CAP_STRICTLIMIT)); } static DEVICE_ATTR_RW(strict_limit); static struct attribute *bdi_dev_attrs[] = { &dev_attr_read_ahead_kb.attr, &dev_attr_min_ratio.attr, &dev_attr_min_ratio_fine.attr, &dev_attr_max_ratio.attr, &dev_attr_max_ratio_fine.attr, &dev_attr_min_bytes.attr, &dev_attr_max_bytes.attr, &dev_attr_stable_pages_required.attr, &dev_attr_strict_limit.attr, NULL, }; ATTRIBUTE_GROUPS(bdi_dev); static const struct class bdi_class = { .name = "bdi", .dev_groups = bdi_dev_groups, }; static __init int bdi_class_init(void) { int ret; ret = class_register(&bdi_class); if (ret) return ret; bdi_debug_init(); return 0; } postcore_initcall(bdi_class_init); static int __init default_bdi_init(void) { bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!bdi_wq) return -ENOMEM; return 0; } subsys_initcall(default_bdi_init); /* * This function is used when the first inode for this wb is marked dirty. It * wakes-up the corresponding bdi thread which should then take care of the * periodic background write-out of dirty inodes. Since the write-out would * starts only 'dirty_writeback_interval' centisecs from now anyway, we just * set up a timer which wakes the bdi thread up later. * * Note, we wouldn't bother setting up the timer, but this function is on the * fast-path (used by '__mark_inode_dirty()'), so we save few context switches * by delaying the wake-up. * * We have to be careful not to postpone flush work if it is scheduled for * earlier. Thus we use queue_delayed_work(). */ void wb_wakeup_delayed(struct bdi_writeback *wb) { unsigned long timeout; timeout = msecs_to_jiffies(dirty_writeback_interval * 10); spin_lock_irq(&wb->work_lock); if (test_bit(WB_registered, &wb->state)) queue_delayed_work(bdi_wq, &wb->dwork, timeout); spin_unlock_irq(&wb->work_lock); } static void wb_update_bandwidth_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(to_delayed_work(work), struct bdi_writeback, bw_dwork); wb_update_bandwidth(wb); } /* * Initial write bandwidth: 100 MB/s */ #define INIT_BW (100 << (20 - PAGE_SHIFT)) static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, gfp_t gfp) { int i, err; memset(wb, 0, sizeof(*wb)); wb->bdi = bdi; wb->last_old_flush = jiffies; INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); INIT_LIST_HEAD(&wb->b_dirty_time); spin_lock_init(&wb->list_lock); atomic_set(&wb->writeback_inodes, 0); wb->bw_time_stamp = jiffies; wb->balanced_dirty_ratelimit = INIT_BW; wb->dirty_ratelimit = INIT_BW; wb->write_bandwidth = INIT_BW; wb->avg_write_bandwidth = INIT_BW; spin_lock_init(&wb->work_lock); INIT_LIST_HEAD(&wb->work_list); INIT_DELAYED_WORK(&wb->dwork, wb_workfn); INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); wb->dirty_sleep = jiffies; err = fprop_local_init_percpu(&wb->completions, gfp); if (err) return err; for (i = 0; i < NR_WB_STAT_ITEMS; i++) { err = percpu_counter_init(&wb->stat[i], 0, gfp); if (err) goto out_destroy_stat; } return 0; out_destroy_stat: while (i--) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); return err; } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); /* * Remove bdi from the global list and shutdown any threads we have running */ static void wb_shutdown(struct bdi_writeback *wb) { /* Make sure nobody queues further work */ spin_lock_irq(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { spin_unlock_irq(&wb->work_lock); return; } spin_unlock_irq(&wb->work_lock); cgwb_remove_from_bdi_list(wb); /* * Drain work list and shutdown the delayed_work. !WB_registered * tells wb_workfn() that @wb is dying and its work_list needs to * be drained no matter what. */ mod_delayed_work(bdi_wq, &wb->dwork, 0); flush_delayed_work(&wb->dwork); WARN_ON(!list_empty(&wb->work_list)); flush_delayed_work(&wb->bw_dwork); } static void wb_exit(struct bdi_writeback *wb) { int i; WARN_ON(delayed_work_pending(&wb->dwork)); for (i = 0; i < NR_WB_STAT_ITEMS; i++) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); } #ifdef CONFIG_CGROUP_WRITEBACK #include <linux/memcontrol.h> /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. */ static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; static LIST_HEAD(offline_cgwbs); static void cleanup_offline_cgwbs_workfn(struct work_struct *work); static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); static void cgwb_free_rcu(struct rcu_head *rcu_head) { struct bdi_writeback *wb = container_of(rcu_head, struct bdi_writeback, rcu); percpu_ref_exit(&wb->refcnt); kfree(wb); } static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); struct backing_dev_info *bdi = wb->bdi; mutex_lock(&wb->bdi->cgwb_release_mutex); wb_shutdown(wb); css_put(wb->memcg_css); css_put(wb->blkcg_css); mutex_unlock(&wb->bdi->cgwb_release_mutex); /* triggers blkg destruction if no online users left */ blkcg_unpin_online(wb->blkcg_css); fprop_local_destroy_percpu(&wb->memcg_completions); spin_lock_irq(&cgwb_lock); list_del(&wb->offline_node); spin_unlock_irq(&cgwb_lock); wb_exit(wb); bdi_put(bdi); WARN_ON_ONCE(!list_empty(&wb->b_attached)); call_rcu(&wb->rcu, cgwb_free_rcu); } static void cgwb_release(struct percpu_ref *refcnt) { struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, refcnt); queue_work(cgwb_release_wq, &wb->release_work); } static void cgwb_kill(struct bdi_writeback *wb) { lockdep_assert_held(&cgwb_lock); WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); list_del(&wb->memcg_node); list_del(&wb->blkcg_node); list_add(&wb->offline_node, &offline_cgwbs); percpu_ref_kill(&wb->refcnt); } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { spin_lock_irq(&cgwb_lock); list_del_rcu(&wb->bdi_node); spin_unlock_irq(&cgwb_lock); } static int cgwb_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp) { struct mem_cgroup *memcg; struct cgroup_subsys_state *blkcg_css; struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; struct bdi_writeback *wb; unsigned long flags; int ret = 0; memcg = mem_cgroup_from_css(memcg_css); blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); /* look up again under lock and discard on blkcg mismatch */ spin_lock_irqsave(&cgwb_lock, flags); wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); if (wb && wb->blkcg_css != blkcg_css) { cgwb_kill(wb); wb = NULL; } spin_unlock_irqrestore(&cgwb_lock, flags); if (wb) goto out_put; /* need to create a new one */ wb = kmalloc(sizeof(*wb), gfp); if (!wb) { ret = -ENOMEM; goto out_put; } ret = wb_init(wb, bdi, gfp); if (ret) goto err_free; ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); if (ret) goto err_wb_exit; ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); if (ret) goto err_ref_exit; wb->memcg_css = memcg_css; wb->blkcg_css = blkcg_css; INIT_LIST_HEAD(&wb->b_attached); INIT_WORK(&wb->release_work, cgwb_release_workfn); set_bit(WB_registered, &wb->state); bdi_get(bdi); /* * The root wb determines the registered state of the whole bdi and * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate * whether they're still online. Don't link @wb if any is dead. * See wb_memcg_offline() and wb_blkcg_offline(). */ ret = -ENODEV; spin_lock_irqsave(&cgwb_lock, flags); if (test_bit(WB_registered, &bdi->wb.state) && blkcg_cgwb_list->next && memcg_cgwb_list->next) { /* we might have raced another instance of this function */ ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); if (!ret) { list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); blkcg_pin_online(blkcg_css); css_get(memcg_css); css_get(blkcg_css); } } spin_unlock_irqrestore(&cgwb_lock, flags); if (ret) { if (ret == -EEXIST) ret = 0; goto err_fprop_exit; } goto out_put; err_fprop_exit: bdi_put(bdi); fprop_local_destroy_percpu(&wb->memcg_completions); err_ref_exit: percpu_ref_exit(&wb->refcnt); err_wb_exit: wb_exit(wb); err_free: kfree(wb); out_put: css_put(blkcg_css); return ret; } /** * wb_get_lookup - get wb for a given memcg * @bdi: target bdi * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) * * Try to get the wb for @memcg_css on @bdi. The returned wb has its * refcount incremented. * * This function uses css_get() on @memcg_css and thus expects its refcnt * to be positive on invocation. IOW, rcu_read_lock() protection on * @memcg_css isn't enough. try_get it before calling this function. * * A wb is keyed by its associated memcg. As blkcg implicitly enables * memcg on the default hierarchy, memcg association is guaranteed to be * more specific (equal or descendant to the associated blkcg) and thus can * identify both the memcg and blkcg associations. * * Because the blkcg associated with a memcg may change as blkcg is enabled * and disabled closer to root in the hierarchy, each wb keeps track of * both the memcg and blkcg associated with it and verifies the blkcg on * each lookup. On mismatch, the existing wb is discarded and a new one is * created. */ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css) { struct bdi_writeback *wb; if (!memcg_css->parent) return &bdi->wb; rcu_read_lock(); wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); if (wb) { struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); } rcu_read_unlock(); return wb; } /** * wb_get_create - get wb for a given memcg, create if necessary * @bdi: target bdi * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) * @gfp: allocation mask to use * * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to * create one. See wb_get_lookup() for more details. */ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp) { struct bdi_writeback *wb; might_alloc(gfp); do { wb = wb_get_lookup(bdi, memcg_css); } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); return wb; } static int cgwb_bdi_init(struct backing_dev_info *bdi) { int ret; INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); mutex_init(&bdi->cgwb_release_mutex); init_rwsem(&bdi->wb_switch_rwsem); ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); if (!ret) { bdi->wb.memcg_css = &root_mem_cgroup->css; bdi->wb.blkcg_css = blkcg_root_css; } return ret; } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { struct radix_tree_iter iter; void **slot; struct bdi_writeback *wb; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); spin_lock_irq(&cgwb_lock); radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); spin_unlock_irq(&cgwb_lock); mutex_lock(&bdi->cgwb_release_mutex); spin_lock_irq(&cgwb_lock); while (!list_empty(&bdi->wb_list)) { wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, bdi_node); spin_unlock_irq(&cgwb_lock); wb_shutdown(wb); spin_lock_irq(&cgwb_lock); } spin_unlock_irq(&cgwb_lock); mutex_unlock(&bdi->cgwb_release_mutex); } /* * cleanup_offline_cgwbs_workfn - try to release dying cgwbs * * Try to release dying cgwbs by switching attached inodes to the nearest * living ancestor's writeback. Processed wbs are placed at the end * of the list to guarantee the forward progress. */ static void cleanup_offline_cgwbs_workfn(struct work_struct *work) { struct bdi_writeback *wb; LIST_HEAD(processed); spin_lock_irq(&cgwb_lock); while (!list_empty(&offline_cgwbs)) { wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, offline_node); list_move(&wb->offline_node, &processed); /* * If wb is dirty, cleaning up the writeback by switching * attached inodes will result in an effective removal of any * bandwidth restrictions, which isn't the goal. Instead, * it can be postponed until the next time, when all io * will be likely completed. If in the meantime some inodes * will get re-dirtied, they should be eventually switched to * a new cgwb. */ if (wb_has_dirty_io(wb)) continue; if (!wb_tryget(wb)) continue; spin_unlock_irq(&cgwb_lock); while (cleanup_offline_cgwb(wb)) cond_resched(); spin_lock_irq(&cgwb_lock); wb_put(wb); } if (!list_empty(&processed)) list_splice_tail(&processed, &offline_cgwbs); spin_unlock_irq(&cgwb_lock); } /** * wb_memcg_offline - kill all wb's associated with a memcg being offlined * @memcg: memcg being offlined * * Also prevents creation of any new wb's associated with @memcg. */ void wb_memcg_offline(struct mem_cgroup *memcg) { struct list_head *memcg_cgwb_list = &memcg->cgwb_list; struct bdi_writeback *wb, *next; spin_lock_irq(&cgwb_lock); list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) cgwb_kill(wb); memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); } /** * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined * @css: blkcg being offlined * * Also prevents creation of any new wb's associated with @blkcg. */ void wb_blkcg_offline(struct cgroup_subsys_state *css) { struct bdi_writeback *wb, *next; struct list_head *list = blkcg_get_cgwb_list(css); spin_lock_irq(&cgwb_lock); list_for_each_entry_safe(wb, next, list, blkcg_node) cgwb_kill(wb); list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); } static void cgwb_bdi_register(struct backing_dev_info *bdi) { spin_lock_irq(&cgwb_lock); list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); spin_unlock_irq(&cgwb_lock); } static int __init cgwb_init(void) { /* * There can be many concurrent release work items overwhelming * system_wq. Put them in a separate wq and limit concurrency. * There's no point in executing many of these in parallel. */ cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); if (!cgwb_release_wq) return -ENOMEM; return 0; } subsys_initcall(cgwb_init); #else /* CONFIG_CGROUP_WRITEBACK */ static int cgwb_bdi_init(struct backing_dev_info *bdi) { return wb_init(&bdi->wb, bdi, GFP_KERNEL); } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } static void cgwb_bdi_register(struct backing_dev_info *bdi) { list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { list_del_rcu(&wb->bdi_node); } #endif /* CONFIG_CGROUP_WRITEBACK */ int bdi_init(struct backing_dev_info *bdi) { bdi->dev = NULL; kref_init(&bdi->refcnt); bdi->min_ratio = 0; bdi->max_ratio = 100 * BDI_RATIO_SCALE; bdi->max_prop_frac = FPROP_FRAC_BASE; INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->wb_list); init_waitqueue_head(&bdi->wb_waitq); return cgwb_bdi_init(bdi); } struct backing_dev_info *bdi_alloc(int node_id) { struct backing_dev_info *bdi; bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); if (!bdi) return NULL; if (bdi_init(bdi)) { kfree(bdi); return NULL; } bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; bdi->ra_pages = VM_READAHEAD_PAGES; bdi->io_pages = VM_READAHEAD_PAGES; timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); return bdi; } EXPORT_SYMBOL(bdi_alloc); static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) { struct rb_node **p = &bdi_tree.rb_node; struct rb_node *parent = NULL; struct backing_dev_info *bdi; lockdep_assert_held(&bdi_lock); while (*p) { parent = *p; bdi = rb_entry(parent, struct backing_dev_info, rb_node); if (bdi->id > id) p = &(*p)->rb_left; else if (bdi->id < id) p = &(*p)->rb_right; else break; } if (parentp) *parentp = parent; return p; } /** * bdi_get_by_id - lookup and get bdi from its id * @id: bdi id to lookup * * Find bdi matching @id and get it. Returns NULL if the matching bdi * doesn't exist or is already unregistered. */ struct backing_dev_info *bdi_get_by_id(u64 id) { struct backing_dev_info *bdi = NULL; struct rb_node **p; spin_lock_bh(&bdi_lock); p = bdi_lookup_rb_node(id, NULL); if (*p) { bdi = rb_entry(*p, struct backing_dev_info, rb_node); bdi_get(bdi); } spin_unlock_bh(&bdi_lock); return bdi; } int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) { struct device *dev; struct rb_node *parent, **p; if (bdi->dev) /* The driver needs to use separate queues per device */ return 0; vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); dev = device_create(&bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); if (IS_ERR(dev)) return PTR_ERR(dev); cgwb_bdi_register(bdi); bdi->dev = dev; bdi_debug_register(bdi, dev_name(dev)); set_bit(WB_registered, &bdi->wb.state); spin_lock_bh(&bdi_lock); bdi->id = ++bdi_id_cursor; p = bdi_lookup_rb_node(bdi->id, &parent); rb_link_node(&bdi->rb_node, parent, p); rb_insert_color(&bdi->rb_node, &bdi_tree); list_add_tail_rcu(&bdi->bdi_list, &bdi_list); spin_unlock_bh(&bdi_lock); trace_writeback_bdi_register(bdi); return 0; } int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) { va_list args; int ret; va_start(args, fmt); ret = bdi_register_va(bdi, fmt, args); va_end(args); return ret; } EXPORT_SYMBOL(bdi_register); void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) { WARN_ON_ONCE(bdi->owner); bdi->owner = owner; get_device(owner); } /* * Remove bdi from bdi_list, and ensure that it is no longer visible */ static void bdi_remove_from_list(struct backing_dev_info *bdi) { spin_lock_bh(&bdi_lock); rb_erase(&bdi->rb_node, &bdi_tree); list_del_rcu(&bdi->bdi_list); spin_unlock_bh(&bdi_lock); synchronize_rcu_expedited(); } void bdi_unregister(struct backing_dev_info *bdi) { del_timer_sync(&bdi->laptop_mode_wb_timer); /* make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); wb_shutdown(&bdi->wb); cgwb_bdi_unregister(bdi); /* * If this BDI's min ratio has been set, use bdi_set_min_ratio() to * update the global bdi_min_ratio. */ if (bdi->min_ratio) bdi_set_min_ratio(bdi, 0); if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); bdi->dev = NULL; } if (bdi->owner) { put_device(bdi->owner); bdi->owner = NULL; } } EXPORT_SYMBOL(bdi_unregister); static void release_bdi(struct kref *ref) { struct backing_dev_info *bdi = container_of(ref, struct backing_dev_info, refcnt); WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state)); WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); kfree(bdi); } void bdi_put(struct backing_dev_info *bdi) { kref_put(&bdi->refcnt, release_bdi); } EXPORT_SYMBOL(bdi_put); struct backing_dev_info *inode_to_bdi(struct inode *inode) { struct super_block *sb; if (!inode) return &noop_backing_dev_info; sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) return I_BDEV(inode)->bd_disk->bdi; #endif return sb->s_bdi; } EXPORT_SYMBOL(inode_to_bdi); const char *bdi_dev_name(struct backing_dev_info *bdi) { if (!bdi || !bdi->dev) return bdi_unknown_name; return bdi->dev_name; } EXPORT_SYMBOL_GPL(bdi_dev_name);
linux-master
mm/backing-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * z3fold.c * * Author: Vitaly Wool <[email protected]> * Copyright (C) 2016, Sony Mobile Communications Inc. * * This implementation is based on zbud written by Seth Jennings. * * z3fold is an special purpose allocator for storing compressed pages. It * can store up to three compressed pages per page which improves the * compression ratio of zbud while retaining its main concepts (e. g. always * storing an integral number of objects per page) and simplicity. * It still has simple and deterministic reclaim properties that make it * preferable to a higher density approach (with no requirement on integral * number of object per page) when reclaim is used. * * As in zbud, pages are divided into "chunks". The size of the chunks is * fixed at compile time and is determined by NCHUNKS_ORDER below. * * z3fold doesn't export any API and is meant to be used via zpool API. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/atomic.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/page-flags.h> #include <linux/migrate.h> #include <linux/node.h> #include <linux/compaction.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/zpool.h> #include <linux/kmemleak.h> /* * NCHUNKS_ORDER determines the internal allocation granularity, effectively * adjusting internal fragmentation. It also determines the number of * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks * in the beginning of an allocated page are occupied by z3fold header, so * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), * which shows the max number of free chunks in z3fold page, also there will * be 63, or 62, respectively, freelists per pool. */ #define NCHUNKS_ORDER 6 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) #define CHUNK_SIZE (1 << CHUNK_SHIFT) #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS) #define BUDDY_MASK (0x3) #define BUDDY_SHIFT 2 #define SLOTS_ALIGN (0x40) /***************** * Structures *****************/ struct z3fold_pool; enum buddy { HEADLESS = 0, FIRST, MIDDLE, LAST, BUDDIES_MAX = LAST }; struct z3fold_buddy_slots { /* * we are using BUDDY_MASK in handle_to_buddy etc. so there should * be enough slots to hold all possible variants */ unsigned long slot[BUDDY_MASK + 1]; unsigned long pool; /* back link */ rwlock_t lock; }; #define HANDLE_FLAG_MASK (0x03) /* * struct z3fold_header - z3fold page metadata occupying first chunks of each * z3fold page, except for HEADLESS pages * @buddy: links the z3fold page into the relevant list in the * pool * @page_lock: per-page lock * @refcount: reference count for the z3fold page * @work: work_struct for page layout optimization * @slots: pointer to the structure holding buddy slots * @pool: pointer to the containing pool * @cpu: CPU which this page "belongs" to * @first_chunks: the size of the first buddy in chunks, 0 if free * @middle_chunks: the size of the middle buddy in chunks, 0 if free * @last_chunks: the size of the last buddy in chunks, 0 if free * @first_num: the starting number (for the first handle) * @mapped_count: the number of objects currently mapped */ struct z3fold_header { struct list_head buddy; spinlock_t page_lock; struct kref refcount; struct work_struct work; struct z3fold_buddy_slots *slots; struct z3fold_pool *pool; short cpu; unsigned short first_chunks; unsigned short middle_chunks; unsigned short last_chunks; unsigned short start_middle; unsigned short first_num:2; unsigned short mapped_count:2; unsigned short foreign_handles:2; }; /** * struct z3fold_pool - stores metadata for each z3fold pool * @name: pool name * @lock: protects pool unbuddied lists * @stale_lock: protects pool stale page list * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- * buddies; the list each z3fold page is added to depends on * the size of its free region. * @stale: list of pages marked for freeing * @pages_nr: number of z3fold pages in the pool. * @c_handle: cache for z3fold_buddy_slots allocation * @compact_wq: workqueue for page layout background optimization * @release_wq: workqueue for safe page release * @work: work_struct for safe page release * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular z3fold pool. */ struct z3fold_pool { const char *name; spinlock_t lock; spinlock_t stale_lock; struct list_head *unbuddied; struct list_head stale; atomic64_t pages_nr; struct kmem_cache *c_handle; struct workqueue_struct *compact_wq; struct workqueue_struct *release_wq; struct work_struct work; }; /* * Internal z3fold page flags */ enum z3fold_page_flags { PAGE_HEADLESS = 0, MIDDLE_CHUNK_MAPPED, NEEDS_COMPACTING, PAGE_STALE, PAGE_CLAIMED, /* by either reclaim or free */ PAGE_MIGRATED, /* page is migrated and soon to be released */ }; /* * handle flags, go under HANDLE_FLAG_MASK */ enum z3fold_handle_flags { HANDLES_NOFREE = 0, }; /* * Forward declarations */ static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); static void compact_page_work(struct work_struct *w); /***************** * Helpers *****************/ /* Converts an allocation size in bytes to size in z3fold chunks */ static int size_to_chunks(size_t size) { return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; } #define for_each_unbuddied_list(_iter, _begin) \ for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, gfp_t gfp) { struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, gfp); if (slots) { /* It will be freed separately in free_handle(). */ kmemleak_not_leak(slots); slots->pool = (unsigned long)pool; rwlock_init(&slots->lock); } return slots; } static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) { return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); } static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) { return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); } /* Lock a z3fold page */ static inline void z3fold_page_lock(struct z3fold_header *zhdr) { spin_lock(&zhdr->page_lock); } /* Try to lock a z3fold page */ static inline int z3fold_page_trylock(struct z3fold_header *zhdr) { return spin_trylock(&zhdr->page_lock); } /* Unlock a z3fold page */ static inline void z3fold_page_unlock(struct z3fold_header *zhdr) { spin_unlock(&zhdr->page_lock); } /* return locked z3fold page if it's not headless */ static inline struct z3fold_header *get_z3fold_header(unsigned long handle) { struct z3fold_buddy_slots *slots; struct z3fold_header *zhdr; int locked = 0; if (!(handle & (1 << PAGE_HEADLESS))) { slots = handle_to_slots(handle); do { unsigned long addr; read_lock(&slots->lock); addr = *(unsigned long *)handle; zhdr = (struct z3fold_header *)(addr & PAGE_MASK); locked = z3fold_page_trylock(zhdr); read_unlock(&slots->lock); if (locked) { struct page *page = virt_to_page(zhdr); if (!test_bit(PAGE_MIGRATED, &page->private)) break; z3fold_page_unlock(zhdr); } cpu_relax(); } while (true); } else { zhdr = (struct z3fold_header *)(handle & PAGE_MASK); } return zhdr; } static inline void put_z3fold_header(struct z3fold_header *zhdr) { struct page *page = virt_to_page(zhdr); if (!test_bit(PAGE_HEADLESS, &page->private)) z3fold_page_unlock(zhdr); } static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) { struct z3fold_buddy_slots *slots; int i; bool is_free; if (WARN_ON(*(unsigned long *)handle == 0)) return; slots = handle_to_slots(handle); write_lock(&slots->lock); *(unsigned long *)handle = 0; if (test_bit(HANDLES_NOFREE, &slots->pool)) { write_unlock(&slots->lock); return; /* simple case, nothing else to do */ } if (zhdr->slots != slots) zhdr->foreign_handles--; is_free = true; for (i = 0; i <= BUDDY_MASK; i++) { if (slots->slot[i]) { is_free = false; break; } } write_unlock(&slots->lock); if (is_free) { struct z3fold_pool *pool = slots_to_pool(slots); if (zhdr->slots == slots) zhdr->slots = NULL; kmem_cache_free(pool->c_handle, slots); } } /* Initializes the z3fold header of a newly allocated z3fold page */ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, struct z3fold_pool *pool, gfp_t gfp) { struct z3fold_header *zhdr = page_address(page); struct z3fold_buddy_slots *slots; clear_bit(PAGE_HEADLESS, &page->private); clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); clear_bit(PAGE_STALE, &page->private); clear_bit(PAGE_CLAIMED, &page->private); clear_bit(PAGE_MIGRATED, &page->private); if (headless) return zhdr; slots = alloc_slots(pool, gfp); if (!slots) return NULL; memset(zhdr, 0, sizeof(*zhdr)); spin_lock_init(&zhdr->page_lock); kref_init(&zhdr->refcount); zhdr->cpu = -1; zhdr->slots = slots; zhdr->pool = pool; INIT_LIST_HEAD(&zhdr->buddy); INIT_WORK(&zhdr->work, compact_page_work); return zhdr; } /* Resets the struct page fields and frees the page */ static void free_z3fold_page(struct page *page, bool headless) { if (!headless) { lock_page(page); __ClearPageMovable(page); unlock_page(page); } __free_page(page); } /* Helper function to build the index */ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) { return (bud + zhdr->first_num) & BUDDY_MASK; } /* * Encodes the handle of a particular buddy within a z3fold page * Pool lock should be held as this function accesses first_num */ static unsigned long __encode_handle(struct z3fold_header *zhdr, struct z3fold_buddy_slots *slots, enum buddy bud) { unsigned long h = (unsigned long)zhdr; int idx = 0; /* * For a headless page, its handle is its pointer with the extra * PAGE_HEADLESS bit set */ if (bud == HEADLESS) return h | (1 << PAGE_HEADLESS); /* otherwise, return pointer to encoded handle */ idx = __idx(zhdr, bud); h += idx; if (bud == LAST) h |= (zhdr->last_chunks << BUDDY_SHIFT); write_lock(&slots->lock); slots->slot[idx] = h; write_unlock(&slots->lock); return (unsigned long)&slots->slot[idx]; } static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) { return __encode_handle(zhdr, zhdr->slots, bud); } /* only for LAST bud, returns zero otherwise */ static unsigned short handle_to_chunks(unsigned long handle) { struct z3fold_buddy_slots *slots = handle_to_slots(handle); unsigned long addr; read_lock(&slots->lock); addr = *(unsigned long *)handle; read_unlock(&slots->lock); return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; } /* * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle * but that doesn't matter. because the masking will result in the * correct buddy number. */ static enum buddy handle_to_buddy(unsigned long handle) { struct z3fold_header *zhdr; struct z3fold_buddy_slots *slots = handle_to_slots(handle); unsigned long addr; read_lock(&slots->lock); WARN_ON(handle & (1 << PAGE_HEADLESS)); addr = *(unsigned long *)handle; read_unlock(&slots->lock); zhdr = (struct z3fold_header *)(addr & PAGE_MASK); return (addr - zhdr->first_num) & BUDDY_MASK; } static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) { return zhdr->pool; } static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) { struct page *page = virt_to_page(zhdr); struct z3fold_pool *pool = zhdr_to_pool(zhdr); WARN_ON(!list_empty(&zhdr->buddy)); set_bit(PAGE_STALE, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); spin_lock(&pool->lock); spin_unlock(&pool->lock); if (locked) z3fold_page_unlock(zhdr); spin_lock(&pool->stale_lock); list_add(&zhdr->buddy, &pool->stale); queue_work(pool->release_wq, &pool->work); spin_unlock(&pool->stale_lock); atomic64_dec(&pool->pages_nr); } static void release_z3fold_page_locked(struct kref *ref) { struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, refcount); WARN_ON(z3fold_page_trylock(zhdr)); __release_z3fold_page(zhdr, true); } static void release_z3fold_page_locked_list(struct kref *ref) { struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, refcount); struct z3fold_pool *pool = zhdr_to_pool(zhdr); spin_lock(&pool->lock); list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); WARN_ON(z3fold_page_trylock(zhdr)); __release_z3fold_page(zhdr, true); } static inline int put_z3fold_locked(struct z3fold_header *zhdr) { return kref_put(&zhdr->refcount, release_z3fold_page_locked); } static inline int put_z3fold_locked_list(struct z3fold_header *zhdr) { return kref_put(&zhdr->refcount, release_z3fold_page_locked_list); } static void free_pages_work(struct work_struct *w) { struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); spin_lock(&pool->stale_lock); while (!list_empty(&pool->stale)) { struct z3fold_header *zhdr = list_first_entry(&pool->stale, struct z3fold_header, buddy); struct page *page = virt_to_page(zhdr); list_del(&zhdr->buddy); if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) continue; spin_unlock(&pool->stale_lock); cancel_work_sync(&zhdr->work); free_z3fold_page(page, false); cond_resched(); spin_lock(&pool->stale_lock); } spin_unlock(&pool->stale_lock); } /* * Returns the number of free chunks in a z3fold page. * NB: can't be used with HEADLESS pages. */ static int num_free_chunks(struct z3fold_header *zhdr) { int nfree; /* * If there is a middle object, pick up the bigger free space * either before or after it. Otherwise just subtract the number * of chunks occupied by the first and the last objects. */ if (zhdr->middle_chunks != 0) { int nfree_before = zhdr->first_chunks ? 0 : zhdr->start_middle - ZHDR_CHUNKS; int nfree_after = zhdr->last_chunks ? 0 : TOTAL_CHUNKS - (zhdr->start_middle + zhdr->middle_chunks); nfree = max(nfree_before, nfree_after); } else nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; return nfree; } /* Add to the appropriate unbuddied list */ static inline void add_to_unbuddied(struct z3fold_pool *pool, struct z3fold_header *zhdr) { if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || zhdr->middle_chunks == 0) { struct list_head *unbuddied; int freechunks = num_free_chunks(zhdr); migrate_disable(); unbuddied = this_cpu_ptr(pool->unbuddied); spin_lock(&pool->lock); list_add(&zhdr->buddy, &unbuddied[freechunks]); spin_unlock(&pool->lock); zhdr->cpu = smp_processor_id(); migrate_enable(); } } static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) { enum buddy bud = HEADLESS; if (zhdr->middle_chunks) { if (!zhdr->first_chunks && chunks <= zhdr->start_middle - ZHDR_CHUNKS) bud = FIRST; else if (!zhdr->last_chunks) bud = LAST; } else { if (!zhdr->first_chunks) bud = FIRST; else if (!zhdr->last_chunks) bud = LAST; else bud = MIDDLE; } return bud; } static inline void *mchunk_memmove(struct z3fold_header *zhdr, unsigned short dst_chunk) { void *beg = zhdr; return memmove(beg + (dst_chunk << CHUNK_SHIFT), beg + (zhdr->start_middle << CHUNK_SHIFT), zhdr->middle_chunks << CHUNK_SHIFT); } static inline bool buddy_single(struct z3fold_header *zhdr) { return !((zhdr->first_chunks && zhdr->middle_chunks) || (zhdr->first_chunks && zhdr->last_chunks) || (zhdr->middle_chunks && zhdr->last_chunks)); } static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) { struct z3fold_pool *pool = zhdr_to_pool(zhdr); void *p = zhdr; unsigned long old_handle = 0; size_t sz = 0; struct z3fold_header *new_zhdr = NULL; int first_idx = __idx(zhdr, FIRST); int middle_idx = __idx(zhdr, MIDDLE); int last_idx = __idx(zhdr, LAST); unsigned short *moved_chunks = NULL; /* * No need to protect slots here -- all the slots are "local" and * the page lock is already taken */ if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { p += ZHDR_SIZE_ALIGNED; sz = zhdr->first_chunks << CHUNK_SHIFT; old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; moved_chunks = &zhdr->first_chunks; } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { p += zhdr->start_middle << CHUNK_SHIFT; sz = zhdr->middle_chunks << CHUNK_SHIFT; old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; moved_chunks = &zhdr->middle_chunks; } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); sz = zhdr->last_chunks << CHUNK_SHIFT; old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; moved_chunks = &zhdr->last_chunks; } if (sz > 0) { enum buddy new_bud = HEADLESS; short chunks = size_to_chunks(sz); void *q; new_zhdr = __z3fold_alloc(pool, sz, false); if (!new_zhdr) return NULL; if (WARN_ON(new_zhdr == zhdr)) goto out_fail; new_bud = get_free_buddy(new_zhdr, chunks); q = new_zhdr; switch (new_bud) { case FIRST: new_zhdr->first_chunks = chunks; q += ZHDR_SIZE_ALIGNED; break; case MIDDLE: new_zhdr->middle_chunks = chunks; new_zhdr->start_middle = new_zhdr->first_chunks + ZHDR_CHUNKS; q += new_zhdr->start_middle << CHUNK_SHIFT; break; case LAST: new_zhdr->last_chunks = chunks; q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); break; default: goto out_fail; } new_zhdr->foreign_handles++; memcpy(q, p, sz); write_lock(&zhdr->slots->lock); *(unsigned long *)old_handle = (unsigned long)new_zhdr + __idx(new_zhdr, new_bud); if (new_bud == LAST) *(unsigned long *)old_handle |= (new_zhdr->last_chunks << BUDDY_SHIFT); write_unlock(&zhdr->slots->lock); add_to_unbuddied(pool, new_zhdr); z3fold_page_unlock(new_zhdr); *moved_chunks = 0; } return new_zhdr; out_fail: if (new_zhdr && !put_z3fold_locked(new_zhdr)) { add_to_unbuddied(pool, new_zhdr); z3fold_page_unlock(new_zhdr); } return NULL; } #define BIG_CHUNK_GAP 3 /* Has to be called with lock held */ static int z3fold_compact_page(struct z3fold_header *zhdr) { struct page *page = virt_to_page(zhdr); if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) return 0; /* can't move middle chunk, it's used */ if (unlikely(PageIsolated(page))) return 0; if (zhdr->middle_chunks == 0) return 0; /* nothing to compact */ if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* move to the beginning */ mchunk_memmove(zhdr, ZHDR_CHUNKS); zhdr->first_chunks = zhdr->middle_chunks; zhdr->middle_chunks = 0; zhdr->start_middle = 0; zhdr->first_num++; return 1; } /* * moving data is expensive, so let's only do that if * there's substantial gain (at least BIG_CHUNK_GAP chunks) */ if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= BIG_CHUNK_GAP) { mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; return 1; } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle + zhdr->middle_chunks) >= BIG_CHUNK_GAP) { unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - zhdr->middle_chunks; mchunk_memmove(zhdr, new_start); zhdr->start_middle = new_start; return 1; } return 0; } static void do_compact_page(struct z3fold_header *zhdr, bool locked) { struct z3fold_pool *pool = zhdr_to_pool(zhdr); struct page *page; page = virt_to_page(zhdr); if (locked) WARN_ON(z3fold_page_trylock(zhdr)); else z3fold_page_lock(zhdr); if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { z3fold_page_unlock(zhdr); return; } spin_lock(&pool->lock); list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); if (put_z3fold_locked(zhdr)) return; if (test_bit(PAGE_STALE, &page->private) || test_and_set_bit(PAGE_CLAIMED, &page->private)) { z3fold_page_unlock(zhdr); return; } if (!zhdr->foreign_handles && buddy_single(zhdr) && zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { if (!put_z3fold_locked(zhdr)) { clear_bit(PAGE_CLAIMED, &page->private); z3fold_page_unlock(zhdr); } return; } z3fold_compact_page(zhdr); add_to_unbuddied(pool, zhdr); clear_bit(PAGE_CLAIMED, &page->private); z3fold_page_unlock(zhdr); } static void compact_page_work(struct work_struct *w) { struct z3fold_header *zhdr = container_of(w, struct z3fold_header, work); do_compact_page(zhdr, false); } /* returns _locked_ z3fold page header or NULL */ static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, size_t size, bool can_sleep) { struct z3fold_header *zhdr = NULL; struct page *page; struct list_head *unbuddied; int chunks = size_to_chunks(size), i; lookup: migrate_disable(); /* First, try to find an unbuddied z3fold page. */ unbuddied = this_cpu_ptr(pool->unbuddied); for_each_unbuddied_list(i, chunks) { struct list_head *l = &unbuddied[i]; zhdr = list_first_entry_or_null(READ_ONCE(l), struct z3fold_header, buddy); if (!zhdr) continue; /* Re-check under lock. */ spin_lock(&pool->lock); if (unlikely(zhdr != list_first_entry(READ_ONCE(l), struct z3fold_header, buddy)) || !z3fold_page_trylock(zhdr)) { spin_unlock(&pool->lock); zhdr = NULL; migrate_enable(); if (can_sleep) cond_resched(); goto lookup; } list_del_init(&zhdr->buddy); zhdr->cpu = -1; spin_unlock(&pool->lock); page = virt_to_page(zhdr); if (test_bit(NEEDS_COMPACTING, &page->private) || test_bit(PAGE_CLAIMED, &page->private)) { z3fold_page_unlock(zhdr); zhdr = NULL; migrate_enable(); if (can_sleep) cond_resched(); goto lookup; } /* * this page could not be removed from its unbuddied * list while pool lock was held, and then we've taken * page lock so kref_put could not be called before * we got here, so it's safe to just call kref_get() */ kref_get(&zhdr->refcount); break; } migrate_enable(); if (!zhdr) { int cpu; /* look for _exact_ match on other cpus' lists */ for_each_online_cpu(cpu) { struct list_head *l; unbuddied = per_cpu_ptr(pool->unbuddied, cpu); spin_lock(&pool->lock); l = &unbuddied[chunks]; zhdr = list_first_entry_or_null(READ_ONCE(l), struct z3fold_header, buddy); if (!zhdr || !z3fold_page_trylock(zhdr)) { spin_unlock(&pool->lock); zhdr = NULL; continue; } list_del_init(&zhdr->buddy); zhdr->cpu = -1; spin_unlock(&pool->lock); page = virt_to_page(zhdr); if (test_bit(NEEDS_COMPACTING, &page->private) || test_bit(PAGE_CLAIMED, &page->private)) { z3fold_page_unlock(zhdr); zhdr = NULL; if (can_sleep) cond_resched(); continue; } kref_get(&zhdr->refcount); break; } } if (zhdr && !zhdr->slots) { zhdr->slots = alloc_slots(pool, GFP_ATOMIC); if (!zhdr->slots) goto out_fail; } return zhdr; out_fail: if (!put_z3fold_locked(zhdr)) { add_to_unbuddied(pool, zhdr); z3fold_page_unlock(zhdr); } return NULL; } /* * API Functions */ /** * z3fold_create_pool() - create a new z3fold pool * @name: pool name * @gfp: gfp flags when allocating the z3fold pool structure * * Return: pointer to the new z3fold pool or NULL if the metadata allocation * failed. */ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) { struct z3fold_pool *pool = NULL; int i, cpu; pool = kzalloc(sizeof(struct z3fold_pool), gfp); if (!pool) goto out; pool->c_handle = kmem_cache_create("z3fold_handle", sizeof(struct z3fold_buddy_slots), SLOTS_ALIGN, 0, NULL); if (!pool->c_handle) goto out_c; spin_lock_init(&pool->lock); spin_lock_init(&pool->stale_lock); pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, __alignof__(struct list_head)); if (!pool->unbuddied) goto out_pool; for_each_possible_cpu(cpu) { struct list_head *unbuddied = per_cpu_ptr(pool->unbuddied, cpu); for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&unbuddied[i]); } INIT_LIST_HEAD(&pool->stale); atomic64_set(&pool->pages_nr, 0); pool->name = name; pool->compact_wq = create_singlethread_workqueue(pool->name); if (!pool->compact_wq) goto out_unbuddied; pool->release_wq = create_singlethread_workqueue(pool->name); if (!pool->release_wq) goto out_wq; INIT_WORK(&pool->work, free_pages_work); return pool; out_wq: destroy_workqueue(pool->compact_wq); out_unbuddied: free_percpu(pool->unbuddied); out_pool: kmem_cache_destroy(pool->c_handle); out_c: kfree(pool); out: return NULL; } /** * z3fold_destroy_pool() - destroys an existing z3fold pool * @pool: the z3fold pool to be destroyed * * The pool should be emptied before this function is called. */ static void z3fold_destroy_pool(struct z3fold_pool *pool) { kmem_cache_destroy(pool->c_handle); /* * We need to destroy pool->compact_wq before pool->release_wq, * as any pending work on pool->compact_wq will call * queue_work(pool->release_wq, &pool->work). * * There are still outstanding pages until both workqueues are drained, * so we cannot unregister migration until then. */ destroy_workqueue(pool->compact_wq); destroy_workqueue(pool->release_wq); free_percpu(pool->unbuddied); kfree(pool); } static const struct movable_operations z3fold_mops; /** * z3fold_alloc() - allocates a region of a given size * @pool: z3fold pool from which to allocate * @size: size in bytes of the desired allocation * @gfp: gfp flags used if the pool needs to grow * @handle: handle of the new allocation * * This function will attempt to find a free region in the pool large enough to * satisfy the allocation request. A search of the unbuddied lists is * performed first. If no suitable free region is found, then a new page is * allocated and added to the pool to satisfy the request. * * Return: 0 if success and handle is set, otherwise -EINVAL if the size or * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate * a new page. */ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, unsigned long *handle) { int chunks = size_to_chunks(size); struct z3fold_header *zhdr = NULL; struct page *page = NULL; enum buddy bud; bool can_sleep = gfpflags_allow_blocking(gfp); if (!size || (gfp & __GFP_HIGHMEM)) return -EINVAL; if (size > PAGE_SIZE) return -ENOSPC; if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) bud = HEADLESS; else { retry: zhdr = __z3fold_alloc(pool, size, can_sleep); if (zhdr) { bud = get_free_buddy(zhdr, chunks); if (bud == HEADLESS) { if (!put_z3fold_locked(zhdr)) z3fold_page_unlock(zhdr); pr_err("No free chunks in unbuddied\n"); WARN_ON(1); goto retry; } page = virt_to_page(zhdr); goto found; } bud = FIRST; } page = alloc_page(gfp); if (!page) return -ENOMEM; zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); if (!zhdr) { __free_page(page); return -ENOMEM; } atomic64_inc(&pool->pages_nr); if (bud == HEADLESS) { set_bit(PAGE_HEADLESS, &page->private); goto headless; } if (can_sleep) { lock_page(page); __SetPageMovable(page, &z3fold_mops); unlock_page(page); } else { WARN_ON(!trylock_page(page)); __SetPageMovable(page, &z3fold_mops); unlock_page(page); } z3fold_page_lock(zhdr); found: if (bud == FIRST) zhdr->first_chunks = chunks; else if (bud == LAST) zhdr->last_chunks = chunks; else { zhdr->middle_chunks = chunks; zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; } add_to_unbuddied(pool, zhdr); headless: spin_lock(&pool->lock); *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); if (bud != HEADLESS) z3fold_page_unlock(zhdr); return 0; } /** * z3fold_free() - frees the allocation associated with the given handle * @pool: pool in which the allocation resided * @handle: handle associated with the allocation returned by z3fold_alloc() * * In the case that the z3fold page in which the allocation resides is under * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function * only sets the first|middle|last_chunks to 0. The page is actually freed * once all buddies are evicted (see z3fold_reclaim_page() below). */ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) { struct z3fold_header *zhdr; struct page *page; enum buddy bud; bool page_claimed; zhdr = get_z3fold_header(handle); page = virt_to_page(zhdr); page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); if (test_bit(PAGE_HEADLESS, &page->private)) { /* if a headless page is under reclaim, just leave. * NB: we use test_and_set_bit for a reason: if the bit * has not been set before, we release this page * immediately so we don't care about its value any more. */ if (!page_claimed) { put_z3fold_header(zhdr); free_z3fold_page(page, true); atomic64_dec(&pool->pages_nr); } return; } /* Non-headless case */ bud = handle_to_buddy(handle); switch (bud) { case FIRST: zhdr->first_chunks = 0; break; case MIDDLE: zhdr->middle_chunks = 0; break; case LAST: zhdr->last_chunks = 0; break; default: pr_err("%s: unknown bud %d\n", __func__, bud); WARN_ON(1); put_z3fold_header(zhdr); return; } if (!page_claimed) free_handle(handle, zhdr); if (put_z3fold_locked_list(zhdr)) return; if (page_claimed) { /* the page has not been claimed by us */ put_z3fold_header(zhdr); return; } if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { clear_bit(PAGE_CLAIMED, &page->private); put_z3fold_header(zhdr); return; } if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { zhdr->cpu = -1; kref_get(&zhdr->refcount); clear_bit(PAGE_CLAIMED, &page->private); do_compact_page(zhdr, true); return; } kref_get(&zhdr->refcount); clear_bit(PAGE_CLAIMED, &page->private); queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); put_z3fold_header(zhdr); } /** * z3fold_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be mapped * * Extracts the buddy number from handle and constructs the pointer to the * correct starting chunk within the page. * * Returns: a pointer to the mapped allocation */ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) { struct z3fold_header *zhdr; struct page *page; void *addr; enum buddy buddy; zhdr = get_z3fold_header(handle); addr = zhdr; page = virt_to_page(zhdr); if (test_bit(PAGE_HEADLESS, &page->private)) goto out; buddy = handle_to_buddy(handle); switch (buddy) { case FIRST: addr += ZHDR_SIZE_ALIGNED; break; case MIDDLE: addr += zhdr->start_middle << CHUNK_SHIFT; set_bit(MIDDLE_CHUNK_MAPPED, &page->private); break; case LAST: addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); break; default: pr_err("unknown buddy id %d\n", buddy); WARN_ON(1); addr = NULL; break; } if (addr) zhdr->mapped_count++; out: put_z3fold_header(zhdr); return addr; } /** * z3fold_unmap() - unmaps the allocation associated with the given handle * @pool: pool in which the allocation resides * @handle: handle associated with the allocation to be unmapped */ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) { struct z3fold_header *zhdr; struct page *page; enum buddy buddy; zhdr = get_z3fold_header(handle); page = virt_to_page(zhdr); if (test_bit(PAGE_HEADLESS, &page->private)) return; buddy = handle_to_buddy(handle); if (buddy == MIDDLE) clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); zhdr->mapped_count--; put_z3fold_header(zhdr); } /** * z3fold_get_pool_size() - gets the z3fold pool size in pages * @pool: pool whose size is being queried * * Returns: size in pages of the given pool. */ static u64 z3fold_get_pool_size(struct z3fold_pool *pool) { return atomic64_read(&pool->pages_nr); } static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) { struct z3fold_header *zhdr; struct z3fold_pool *pool; VM_BUG_ON_PAGE(PageIsolated(page), page); if (test_bit(PAGE_HEADLESS, &page->private)) return false; zhdr = page_address(page); z3fold_page_lock(zhdr); if (test_bit(NEEDS_COMPACTING, &page->private) || test_bit(PAGE_STALE, &page->private)) goto out; if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) goto out; if (test_and_set_bit(PAGE_CLAIMED, &page->private)) goto out; pool = zhdr_to_pool(zhdr); spin_lock(&pool->lock); if (!list_empty(&zhdr->buddy)) list_del_init(&zhdr->buddy); spin_unlock(&pool->lock); kref_get(&zhdr->refcount); z3fold_page_unlock(zhdr); return true; out: z3fold_page_unlock(zhdr); return false; } static int z3fold_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { struct z3fold_header *zhdr, *new_zhdr; struct z3fold_pool *pool; VM_BUG_ON_PAGE(!PageIsolated(page), page); VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); zhdr = page_address(page); pool = zhdr_to_pool(zhdr); if (!z3fold_page_trylock(zhdr)) return -EAGAIN; if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { clear_bit(PAGE_CLAIMED, &page->private); z3fold_page_unlock(zhdr); return -EBUSY; } if (work_pending(&zhdr->work)) { z3fold_page_unlock(zhdr); return -EAGAIN; } new_zhdr = page_address(newpage); memcpy(new_zhdr, zhdr, PAGE_SIZE); newpage->private = page->private; set_bit(PAGE_MIGRATED, &page->private); z3fold_page_unlock(zhdr); spin_lock_init(&new_zhdr->page_lock); INIT_WORK(&new_zhdr->work, compact_page_work); /* * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, * so we only have to reinitialize it. */ INIT_LIST_HEAD(&new_zhdr->buddy); __ClearPageMovable(page); get_page(newpage); z3fold_page_lock(new_zhdr); if (new_zhdr->first_chunks) encode_handle(new_zhdr, FIRST); if (new_zhdr->last_chunks) encode_handle(new_zhdr, LAST); if (new_zhdr->middle_chunks) encode_handle(new_zhdr, MIDDLE); set_bit(NEEDS_COMPACTING, &newpage->private); new_zhdr->cpu = smp_processor_id(); __SetPageMovable(newpage, &z3fold_mops); z3fold_page_unlock(new_zhdr); queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); /* PAGE_CLAIMED and PAGE_MIGRATED are cleared now. */ page->private = 0; put_page(page); return 0; } static void z3fold_page_putback(struct page *page) { struct z3fold_header *zhdr; struct z3fold_pool *pool; zhdr = page_address(page); pool = zhdr_to_pool(zhdr); z3fold_page_lock(zhdr); if (!list_empty(&zhdr->buddy)) list_del_init(&zhdr->buddy); INIT_LIST_HEAD(&page->lru); if (put_z3fold_locked(zhdr)) return; if (list_empty(&zhdr->buddy)) add_to_unbuddied(pool, zhdr); clear_bit(PAGE_CLAIMED, &page->private); z3fold_page_unlock(zhdr); } static const struct movable_operations z3fold_mops = { .isolate_page = z3fold_page_isolate, .migrate_page = z3fold_page_migrate, .putback_page = z3fold_page_putback, }; /***************** * zpool ****************/ static void *z3fold_zpool_create(const char *name, gfp_t gfp) { return z3fold_create_pool(name, gfp); } static void z3fold_zpool_destroy(void *pool) { z3fold_destroy_pool(pool); } static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { return z3fold_alloc(pool, size, gfp, handle); } static void z3fold_zpool_free(void *pool, unsigned long handle) { z3fold_free(pool, handle); } static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { return z3fold_map(pool, handle); } static void z3fold_zpool_unmap(void *pool, unsigned long handle) { z3fold_unmap(pool, handle); } static u64 z3fold_zpool_total_size(void *pool) { return z3fold_get_pool_size(pool) * PAGE_SIZE; } static struct zpool_driver z3fold_zpool_driver = { .type = "z3fold", .sleep_mapped = true, .owner = THIS_MODULE, .create = z3fold_zpool_create, .destroy = z3fold_zpool_destroy, .malloc = z3fold_zpool_malloc, .free = z3fold_zpool_free, .map = z3fold_zpool_map, .unmap = z3fold_zpool_unmap, .total_size = z3fold_zpool_total_size, }; MODULE_ALIAS("zpool-z3fold"); static int __init init_z3fold(void) { /* * Make sure the z3fold header is not larger than the page size and * there has remaining spaces for its buddy. */ BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE); zpool_register_driver(&z3fold_zpool_driver); return 0; } static void __exit exit_z3fold(void) { zpool_unregister_driver(&z3fold_zpool_driver); } module_init(init_z3fold); module_exit(exit_z3fold); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vitaly Wool <[email protected]>"); MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
linux-master
mm/z3fold.c
// SPDX-License-Identifier: GPL-2.0-only /* * This kernel test validates architecture page table helpers and * accessors and helps in verifying their continued compliance with * expected generic MM semantics. * * Copyright (C) 2019 ARM Ltd. * * Author: Anshuman Khandual <[email protected]> */ #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__ #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/kernel.h> #include <linux/kconfig.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mm_types.h> #include <linux/module.h> #include <linux/pfn_t.h> #include <linux/printk.h> #include <linux/pgtable.h> #include <linux/random.h> #include <linux/spinlock.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/start_kernel.h> #include <linux/sched/mm.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> /* * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics * expectations that are being validated here. All future changes in here * or the documentation need to be in sync. * * On s390 platform, the lower 4 bits are used to identify given page table * entry type. But these bits might affect the ability to clear entries with * pxx_clear() because of how dynamic page table folding works on s390. So * while loading up the entries do not change the lower 4 bits. It does not * have affect any other platform. Also avoid the 62nd bit on ppc64 that is * used to mark a pte entry. */ #define S390_SKIP_MASK GENMASK(3, 0) #if __BITS_PER_LONG == 64 #define PPC64_SKIP_MASK GENMASK(62, 62) #else #define PPC64_SKIP_MASK 0x0 #endif #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK) #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK) #define RANDOM_NZVALUE GENMASK(7, 0) struct pgtable_debug_args { struct mm_struct *mm; struct vm_area_struct *vma; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; p4d_t *start_p4dp; pud_t *start_pudp; pmd_t *start_pmdp; pgtable_t start_ptep; unsigned long vaddr; pgprot_t page_prot; pgprot_t page_prot_none; bool is_contiguous_page; unsigned long pud_pfn; unsigned long pmd_pfn; unsigned long pte_pfn; unsigned long fixed_alignment; unsigned long fixed_pgd_pfn; unsigned long fixed_p4d_pfn; unsigned long fixed_pud_pfn; unsigned long fixed_pmd_pfn; unsigned long fixed_pte_pfn; }; static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx) { pgprot_t prot = vm_get_page_prot(idx); pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); unsigned long val = idx, *ptr = &val; pr_debug("Validating PTE basic (%pGv)\n", ptr); /* * This test needs to be executed after the given page table entry * is created with pfn_pte() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. */ WARN_ON(pte_dirty(pte_wrprotect(pte))); WARN_ON(!pte_same(pte, pte)); WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma))); WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma)))); WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte)))); } static void __init pte_advanced_tests(struct pgtable_debug_args *args) { struct page *page; pte_t pte; /* * Architectures optimize set_pte_at by avoiding TLB flush. * This requires set_pte_at to be not used to update an * existing pte entry. Clear pte before we do set_pte_at * * flush_dcache_page() is called after set_pte_at() to clear * PG_arch_1 for the page on ARM64. The page flag isn't cleared * when it's released and page allocation check will fail when * the page is allocated again. For architectures other than ARM64, * the unexpected overhead of cache flushing is acceptable. */ page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; if (!page) return; pr_debug("Validating PTE advanced\n"); if (WARN_ON(!args->ptep)) return; pte = pfn_pte(args->pte_pfn, args->page_prot); set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); pte = ptep_get(args->ptep); WARN_ON(pte_write(pte)); ptep_get_and_clear(args->mm, args->vaddr, args->ptep); pte = ptep_get(args->ptep); WARN_ON(!pte_none(pte)); pte = pfn_pte(args->pte_pfn, args->page_prot); pte = pte_wrprotect(pte); pte = pte_mkclean(pte); set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); pte = pte_mkwrite(pte, args->vma); pte = pte_mkdirty(pte); ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); pte = ptep_get(args->ptep); WARN_ON(!(pte_write(pte) && pte_dirty(pte))); ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); pte = ptep_get(args->ptep); WARN_ON(!pte_none(pte)); pte = pfn_pte(args->pte_pfn, args->page_prot); pte = pte_mkyoung(pte); set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); pte = ptep_get(args->ptep); WARN_ON(pte_young(pte)); ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { pgprot_t prot = vm_get_page_prot(idx); unsigned long val = idx, *ptr = &val; pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD basic (%pGv)\n", ptr); pmd = pfn_pmd(args->fixed_pmd_pfn, prot); /* * This test needs to be executed after the given page table entry * is created with pfn_pmd() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. */ WARN_ON(pmd_dirty(pmd_wrprotect(pmd))); WARN_ON(!pmd_same(pmd, pmd)); WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd)))); WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd)))); WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma))); WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd)))); WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd)))); WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma)))); WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd)))); WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd)))); /* * A huge page does not point to next level page table * entry. Hence this must qualify as pmd_bad(). */ WARN_ON(!pmd_bad(pmd_mkhuge(pmd))); } static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { struct page *page; pmd_t pmd; unsigned long vaddr = args->vaddr; if (!has_transparent_hugepage()) return; page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL; if (!page) return; /* * flush_dcache_page() is called after set_pmd_at() to clear * PG_arch_1 for the page on ARM64. The page flag isn't cleared * when it's released and page allocation check will fail when * the page is allocated again. For architectures other than ARM64, * the unexpected overhead of cache flushing is acceptable. */ pr_debug("Validating PMD advanced\n"); /* Align the address wrt HPAGE_PMD_SIZE */ vaddr &= HPAGE_PMD_MASK; pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep); pmd = pfn_pmd(args->pmd_pfn, args->page_prot); set_pmd_at(args->mm, vaddr, args->pmdp, pmd); flush_dcache_page(page); pmdp_set_wrprotect(args->mm, vaddr, args->pmdp); pmd = READ_ONCE(*args->pmdp); WARN_ON(pmd_write(pmd)); pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); pmd = READ_ONCE(*args->pmdp); WARN_ON(!pmd_none(pmd)); pmd = pfn_pmd(args->pmd_pfn, args->page_prot); pmd = pmd_wrprotect(pmd); pmd = pmd_mkclean(pmd); set_pmd_at(args->mm, vaddr, args->pmdp, pmd); flush_dcache_page(page); pmd = pmd_mkwrite(pmd, args->vma); pmd = pmd_mkdirty(pmd); pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1); pmd = READ_ONCE(*args->pmdp); WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd))); pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1); pmd = READ_ONCE(*args->pmdp); WARN_ON(!pmd_none(pmd)); pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot)); pmd = pmd_mkyoung(pmd); set_pmd_at(args->mm, vaddr, args->pmdp, pmd); flush_dcache_page(page); pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp); pmd = READ_ONCE(*args->pmdp); WARN_ON(pmd_young(pmd)); /* Clear the pte entries */ pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp); pgtable_trans_huge_withdraw(args->mm, args->pmdp); } static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD leaf\n"); pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); /* * PMD based THP is a leaf entry. */ pmd = pmd_mkhuge(pmd); WARN_ON(!pmd_leaf(pmd)); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { pgprot_t prot = vm_get_page_prot(idx); unsigned long val = idx, *ptr = &val; pud_t pud; if (!has_transparent_pud_hugepage()) return; pr_debug("Validating PUD basic (%pGv)\n", ptr); pud = pfn_pud(args->fixed_pud_pfn, prot); /* * This test needs to be executed after the given page table entry * is created with pfn_pud() to make sure that vm_get_page_prot(idx) * does not have the dirty bit enabled from the beginning. This is * important for platforms like arm64 where (!PTE_RDONLY) indicate * dirty bit being set. */ WARN_ON(pud_dirty(pud_wrprotect(pud))); WARN_ON(!pud_same(pud, pud)); WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud)))); WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud)))); if (mm_pmd_folded(args->mm)) return; /* * A huge page does not point to next level page table * entry. Hence this must qualify as pud_bad(). */ WARN_ON(!pud_bad(pud_mkhuge(pud))); } static void __init pud_advanced_tests(struct pgtable_debug_args *args) { struct page *page; unsigned long vaddr = args->vaddr; pud_t pud; if (!has_transparent_pud_hugepage()) return; page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL; if (!page) return; /* * flush_dcache_page() is called after set_pud_at() to clear * PG_arch_1 for the page on ARM64. The page flag isn't cleared * when it's released and page allocation check will fail when * the page is allocated again. For architectures other than ARM64, * the unexpected overhead of cache flushing is acceptable. */ pr_debug("Validating PUD advanced\n"); /* Align the address wrt HPAGE_PUD_SIZE */ vaddr &= HPAGE_PUD_MASK; pud = pfn_pud(args->pud_pfn, args->page_prot); set_pud_at(args->mm, vaddr, args->pudp, pud); flush_dcache_page(page); pudp_set_wrprotect(args->mm, vaddr, args->pudp); pud = READ_ONCE(*args->pudp); WARN_ON(pud_write(pud)); #ifndef __PAGETABLE_PMD_FOLDED pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); pud = READ_ONCE(*args->pudp); WARN_ON(!pud_none(pud)); #endif /* __PAGETABLE_PMD_FOLDED */ pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pud_wrprotect(pud); pud = pud_mkclean(pud); set_pud_at(args->mm, vaddr, args->pudp, pud); flush_dcache_page(page); pud = pud_mkwrite(pud); pud = pud_mkdirty(pud); pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1); pud = READ_ONCE(*args->pudp); WARN_ON(!(pud_write(pud) && pud_dirty(pud))); #ifndef __PAGETABLE_PMD_FOLDED pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1); pud = READ_ONCE(*args->pudp); WARN_ON(!pud_none(pud)); #endif /* __PAGETABLE_PMD_FOLDED */ pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pud_mkyoung(pud); set_pud_at(args->mm, vaddr, args->pudp, pud); flush_dcache_page(page); pudp_test_and_clear_young(args->vma, vaddr, args->pudp); pud = READ_ONCE(*args->pudp); WARN_ON(pud_young(pud)); pudp_huge_get_and_clear(args->mm, vaddr, args->pudp); } static void __init pud_leaf_tests(struct pgtable_debug_args *args) { pud_t pud; if (!has_transparent_pud_hugepage()) return; pr_debug("Validating PUD leaf\n"); pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); /* * PUD based THP is a leaf entry. */ pud = pud_mkhuge(pud); WARN_ON(!pud_leaf(pud)); } #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { } static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { } static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { } static void __init pud_advanced_tests(struct pgtable_debug_args *args) { } static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { } static void __init pud_leaf_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static void __init pmd_huge_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!arch_vmap_pmd_supported(args->page_prot) || args->fixed_alignment < PMD_SIZE) return; pr_debug("Validating PMD huge\n"); /* * X86 defined pmd_set_huge() verifies that the given * PMD is not a populated non-leaf entry. */ WRITE_ONCE(*args->pmdp, __pmd(0)); WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot)); WARN_ON(!pmd_clear_huge(args->pmdp)); pmd = READ_ONCE(*args->pmdp); WARN_ON(!pmd_none(pmd)); } static void __init pud_huge_tests(struct pgtable_debug_args *args) { pud_t pud; if (!arch_vmap_pud_supported(args->page_prot) || args->fixed_alignment < PUD_SIZE) return; pr_debug("Validating PUD huge\n"); /* * X86 defined pud_set_huge() verifies that the given * PUD is not a populated non-leaf entry. */ WRITE_ONCE(*args->pudp, __pud(0)); WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot)); WARN_ON(!pud_clear_huge(args->pudp)); pud = READ_ONCE(*args->pudp); WARN_ON(!pud_none(pud)); } #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static void __init pmd_huge_tests(struct pgtable_debug_args *args) { } static void __init pud_huge_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static void __init p4d_basic_tests(struct pgtable_debug_args *args) { p4d_t p4d; pr_debug("Validating P4D basic\n"); memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t)); WARN_ON(!p4d_same(p4d, p4d)); } static void __init pgd_basic_tests(struct pgtable_debug_args *args) { pgd_t pgd; pr_debug("Validating PGD basic\n"); memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); WARN_ON(!pgd_same(pgd, pgd)); } #ifndef __PAGETABLE_PUD_FOLDED static void __init pud_clear_tests(struct pgtable_debug_args *args) { pud_t pud = READ_ONCE(*args->pudp); if (mm_pmd_folded(args->mm)) return; pr_debug("Validating PUD clear\n"); pud = __pud(pud_val(pud) | RANDOM_ORVALUE); WRITE_ONCE(*args->pudp, pud); pud_clear(args->pudp); pud = READ_ONCE(*args->pudp); WARN_ON(!pud_none(pud)); } static void __init pud_populate_tests(struct pgtable_debug_args *args) { pud_t pud; if (mm_pmd_folded(args->mm)) return; pr_debug("Validating PUD populate\n"); /* * This entry points to next level page table page. * Hence this must not qualify as pud_bad(). */ pud_populate(args->mm, args->pudp, args->start_pmdp); pud = READ_ONCE(*args->pudp); WARN_ON(pud_bad(pud)); } #else /* !__PAGETABLE_PUD_FOLDED */ static void __init pud_clear_tests(struct pgtable_debug_args *args) { } static void __init pud_populate_tests(struct pgtable_debug_args *args) { } #endif /* PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_P4D_FOLDED static void __init p4d_clear_tests(struct pgtable_debug_args *args) { p4d_t p4d = READ_ONCE(*args->p4dp); if (mm_pud_folded(args->mm)) return; pr_debug("Validating P4D clear\n"); p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE); WRITE_ONCE(*args->p4dp, p4d); p4d_clear(args->p4dp); p4d = READ_ONCE(*args->p4dp); WARN_ON(!p4d_none(p4d)); } static void __init p4d_populate_tests(struct pgtable_debug_args *args) { p4d_t p4d; if (mm_pud_folded(args->mm)) return; pr_debug("Validating P4D populate\n"); /* * This entry points to next level page table page. * Hence this must not qualify as p4d_bad(). */ pud_clear(args->pudp); p4d_clear(args->p4dp); p4d_populate(args->mm, args->p4dp, args->start_pudp); p4d = READ_ONCE(*args->p4dp); WARN_ON(p4d_bad(p4d)); } static void __init pgd_clear_tests(struct pgtable_debug_args *args) { pgd_t pgd = READ_ONCE(*(args->pgdp)); if (mm_p4d_folded(args->mm)) return; pr_debug("Validating PGD clear\n"); pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); WRITE_ONCE(*args->pgdp, pgd); pgd_clear(args->pgdp); pgd = READ_ONCE(*args->pgdp); WARN_ON(!pgd_none(pgd)); } static void __init pgd_populate_tests(struct pgtable_debug_args *args) { pgd_t pgd; if (mm_p4d_folded(args->mm)) return; pr_debug("Validating PGD populate\n"); /* * This entry points to next level page table page. * Hence this must not qualify as pgd_bad(). */ p4d_clear(args->p4dp); pgd_clear(args->pgdp); pgd_populate(args->mm, args->pgdp, args->start_p4dp); pgd = READ_ONCE(*args->pgdp); WARN_ON(pgd_bad(pgd)); } #else /* !__PAGETABLE_P4D_FOLDED */ static void __init p4d_clear_tests(struct pgtable_debug_args *args) { } static void __init pgd_clear_tests(struct pgtable_debug_args *args) { } static void __init p4d_populate_tests(struct pgtable_debug_args *args) { } static void __init pgd_populate_tests(struct pgtable_debug_args *args) { } #endif /* PAGETABLE_P4D_FOLDED */ static void __init pte_clear_tests(struct pgtable_debug_args *args) { struct page *page; pte_t pte = pfn_pte(args->pte_pfn, args->page_prot); page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; if (!page) return; /* * flush_dcache_page() is called after set_pte_at() to clear * PG_arch_1 for the page on ARM64. The page flag isn't cleared * when it's released and page allocation check will fail when * the page is allocated again. For architectures other than ARM64, * the unexpected overhead of cache flushing is acceptable. */ pr_debug("Validating PTE clear\n"); if (WARN_ON(!args->ptep)) return; #ifndef CONFIG_RISCV pte = __pte(pte_val(pte) | RANDOM_ORVALUE); #endif set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); barrier(); ptep_clear(args->mm, args->vaddr, args->ptep); pte = ptep_get(args->ptep); WARN_ON(!pte_none(pte)); } static void __init pmd_clear_tests(struct pgtable_debug_args *args) { pmd_t pmd = READ_ONCE(*args->pmdp); pr_debug("Validating PMD clear\n"); pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE); WRITE_ONCE(*args->pmdp, pmd); pmd_clear(args->pmdp); pmd = READ_ONCE(*args->pmdp); WARN_ON(!pmd_none(pmd)); } static void __init pmd_populate_tests(struct pgtable_debug_args *args) { pmd_t pmd; pr_debug("Validating PMD populate\n"); /* * This entry points to next level page table page. * Hence this must not qualify as pmd_bad(). */ pmd_populate(args->mm, args->pmdp, args->start_ptep); pmd = READ_ONCE(*args->pmdp); WARN_ON(pmd_bad(pmd)); } static void __init pte_special_tests(struct pgtable_debug_args *args) { pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) return; pr_debug("Validating PTE special\n"); WARN_ON(!pte_special(pte_mkspecial(pte))); } static void __init pte_protnone_tests(struct pgtable_debug_args *args) { pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none); if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) return; pr_debug("Validating PTE protnone\n"); WARN_ON(!pte_protnone(pte)); WARN_ON(!pte_present(pte)); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) return; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD protnone\n"); pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none)); WARN_ON(!pmd_protnone(pmd)); WARN_ON(!pmd_present(pmd)); } #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP static void __init pte_devmap_tests(struct pgtable_debug_args *args) { pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); pr_debug("Validating PTE devmap\n"); WARN_ON(!pte_devmap(pte_mkdevmap(pte))); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD devmap\n"); pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd))); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void __init pud_devmap_tests(struct pgtable_debug_args *args) { pud_t pud; if (!has_transparent_pud_hugepage()) return; pr_debug("Validating PUD devmap\n"); pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); WARN_ON(!pud_devmap(pud_mkdevmap(pud))); } #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #else static void __init pte_devmap_tests(struct pgtable_debug_args *args) { } static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { } static void __init pud_devmap_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args) { pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) return; pr_debug("Validating PTE soft dirty\n"); WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte))); WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte))); } static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args) { pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) return; pr_debug("Validating PTE swap soft dirty\n"); WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte))); WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte))); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)) return; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD soft dirty\n"); pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd))); WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd))); } static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) || !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION)) return; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD swap soft dirty\n"); pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd))); WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd))); } #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { } static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args) { unsigned long max_swap_offset; swp_entry_t entry, entry2; pte_t pte; pr_debug("Validating PTE swap exclusive\n"); /* See generic_max_swapfile_size(): probe the maximum offset */ max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL)))); /* Create a swp entry with all possible bits set */ entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset); pte = swp_entry_to_pte(entry); WARN_ON(pte_swp_exclusive(pte)); WARN_ON(!is_swap_pte(pte)); entry2 = pte_to_swp_entry(pte); WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); pte = pte_swp_mkexclusive(pte); WARN_ON(!pte_swp_exclusive(pte)); WARN_ON(!is_swap_pte(pte)); WARN_ON(pte_swp_soft_dirty(pte)); entry2 = pte_to_swp_entry(pte); WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); pte = pte_swp_clear_exclusive(pte); WARN_ON(pte_swp_exclusive(pte)); WARN_ON(!is_swap_pte(pte)); entry2 = pte_to_swp_entry(pte); WARN_ON(memcmp(&entry, &entry2, sizeof(entry))); } static void __init pte_swap_tests(struct pgtable_debug_args *args) { swp_entry_t swp; pte_t pte; pr_debug("Validating PTE swap\n"); pte = pfn_pte(args->fixed_pte_pfn, args->page_prot); swp = __pte_to_swp_entry(pte); pte = __swp_entry_to_pte(swp); WARN_ON(args->fixed_pte_pfn != pte_pfn(pte)); } #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION static void __init pmd_swap_tests(struct pgtable_debug_args *args) { swp_entry_t swp; pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD swap\n"); pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); swp = __pmd_to_swp_entry(pmd); pmd = __swp_entry_to_pmd(swp); WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd)); } #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */ static void __init pmd_swap_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ static void __init swap_migration_tests(struct pgtable_debug_args *args) { struct page *page; swp_entry_t swp; if (!IS_ENABLED(CONFIG_MIGRATION)) return; /* * swap_migration_tests() requires a dedicated page as it needs to * be locked before creating a migration entry from it. Locking the * page that actually maps kernel text ('start_kernel') can be real * problematic. Lets use the allocated page explicitly for this * purpose. */ page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL; if (!page) return; pr_debug("Validating swap migration\n"); /* * make_[readable|writable]_migration_entry() expects given page to * be locked, otherwise it stumbles upon a BUG_ON(). */ __SetPageLocked(page); swp = make_writable_migration_entry(page_to_pfn(page)); WARN_ON(!is_migration_entry(swp)); WARN_ON(!is_writable_migration_entry(swp)); swp = make_readable_migration_entry(swp_offset(swp)); WARN_ON(!is_migration_entry(swp)); WARN_ON(is_writable_migration_entry(swp)); swp = make_readable_migration_entry(page_to_pfn(page)); WARN_ON(!is_migration_entry(swp)); WARN_ON(is_writable_migration_entry(swp)); __ClearPageLocked(page); } #ifdef CONFIG_HUGETLB_PAGE static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { struct page *page; pte_t pte; pr_debug("Validating HugeTLB basic\n"); /* * Accessing the page associated with the pfn is safe here, * as it was previously derived from a real kernel symbol. */ page = pfn_to_page(args->fixed_pmd_pfn); pte = mk_huge_pte(page, args->page_prot); WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte))); WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte)))); WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte)))); #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot); WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS))); #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ } #else /* !CONFIG_HUGETLB_PAGE */ static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void __init pmd_thp_tests(struct pgtable_debug_args *args) { pmd_t pmd; if (!has_transparent_hugepage()) return; pr_debug("Validating PMD based THP\n"); /* * pmd_trans_huge() and pmd_present() must return positive after * MMU invalidation with pmd_mkinvalid(). This behavior is an * optimization for transparent huge page. pmd_trans_huge() must * be true if pmd_page() returns a valid THP to avoid taking the * pmd_lock when others walk over non transhuge pmds (i.e. there * are no THP allocated). Especially when splitting a THP and * removing the present bit from the pmd, pmd_trans_huge() still * needs to return true. pmd_present() should be true whenever * pmd_trans_huge() returns true. */ pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot); WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd))); #ifndef __HAVE_ARCH_PMDP_INVALIDATE WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd)))); WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd)))); #endif /* __HAVE_ARCH_PMDP_INVALIDATE */ } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void __init pud_thp_tests(struct pgtable_debug_args *args) { pud_t pud; if (!has_transparent_pud_hugepage()) return; pr_debug("Validating PUD based THP\n"); pud = pfn_pud(args->fixed_pud_pfn, args->page_prot); WARN_ON(!pud_trans_huge(pud_mkhuge(pud))); /* * pud_mkinvalid() has been dropped for now. Enable back * these tests when it comes back with a modified pud_present(). * * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud)))); * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud)))); */ } #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void __init pud_thp_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static void __init pmd_thp_tests(struct pgtable_debug_args *args) { } static void __init pud_thp_tests(struct pgtable_debug_args *args) { } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static unsigned long __init get_random_vaddr(void) { unsigned long random_vaddr, random_pages, total_user_pages; total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE; random_pages = get_random_long() % total_user_pages; random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE; return random_vaddr; } static void __init destroy_args(struct pgtable_debug_args *args) { struct page *page = NULL; /* Free (huge) page */ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_pud_hugepage() && args->pud_pfn != ULONG_MAX) { if (args->is_contiguous_page) { free_contig_range(args->pud_pfn, (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT))); } else { page = pfn_to_page(args->pud_pfn); __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT); } args->pud_pfn = ULONG_MAX; args->pmd_pfn = ULONG_MAX; args->pte_pfn = ULONG_MAX; } if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_hugepage() && args->pmd_pfn != ULONG_MAX) { if (args->is_contiguous_page) { free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER)); } else { page = pfn_to_page(args->pmd_pfn); __free_pages(page, HPAGE_PMD_ORDER); } args->pmd_pfn = ULONG_MAX; args->pte_pfn = ULONG_MAX; } if (args->pte_pfn != ULONG_MAX) { page = pfn_to_page(args->pte_pfn); __free_page(page); args->pte_pfn = ULONG_MAX; } /* Free page table entries */ if (args->start_ptep) { pte_free(args->mm, args->start_ptep); mm_dec_nr_ptes(args->mm); } if (args->start_pmdp) { pmd_free(args->mm, args->start_pmdp); mm_dec_nr_pmds(args->mm); } if (args->start_pudp) { pud_free(args->mm, args->start_pudp); mm_dec_nr_puds(args->mm); } if (args->start_p4dp) p4d_free(args->mm, args->start_p4dp); /* Free vma and mm struct */ if (args->vma) vm_area_free(args->vma); if (args->mm) mmdrop(args->mm); } static struct page * __init debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order) { struct page *page = NULL; #ifdef CONFIG_CONTIG_ALLOC if (order > MAX_ORDER) { page = alloc_contig_pages((1 << order), GFP_KERNEL, first_online_node, NULL); if (page) { args->is_contiguous_page = true; return page; } } #endif if (order <= MAX_ORDER) page = alloc_pages(GFP_KERNEL, order); return page; } /* * Check if a physical memory range described by <pstart, pend> contains * an area that is of size psize, and aligned to psize. * * Don't use address 0, an all-zeroes physical address might mask bugs, and * it's not used on x86. */ static void __init phys_align_check(phys_addr_t pstart, phys_addr_t pend, unsigned long psize, phys_addr_t *physp, unsigned long *alignp) { phys_addr_t aligned_start, aligned_end; if (pstart == 0) pstart = PAGE_SIZE; aligned_start = ALIGN(pstart, psize); aligned_end = aligned_start + psize; if (aligned_end > aligned_start && aligned_end <= pend) { *alignp = psize; *physp = aligned_start; } } static void __init init_fixed_pfns(struct pgtable_debug_args *args) { u64 idx; phys_addr_t phys, pstart, pend; /* * Initialize the fixed pfns. To do this, try to find a * valid physical range, preferably aligned to PUD_SIZE, * but settling for aligned to PMD_SIZE as a fallback. If * neither of those is found, use the physical address of * the start_kernel symbol. * * The memory doesn't need to be allocated, it just needs to exist * as usable memory. It won't be touched. * * The alignment is recorded, and can be checked to see if we * can run the tests that require an actual valid physical * address range on some architectures ({pmd,pud}_huge_test * on x86). */ phys = __pa_symbol(&start_kernel); args->fixed_alignment = PAGE_SIZE; for_each_mem_range(idx, &pstart, &pend) { /* First check for a PUD-aligned area */ phys_align_check(pstart, pend, PUD_SIZE, &phys, &args->fixed_alignment); /* If a PUD-aligned area is found, we're done */ if (args->fixed_alignment == PUD_SIZE) break; /* * If no PMD-aligned area found yet, check for one, * but continue the loop to look for a PUD-aligned area. */ if (args->fixed_alignment < PMD_SIZE) phys_align_check(pstart, pend, PMD_SIZE, &phys, &args->fixed_alignment); } args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK); args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK); args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK); args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK); args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK); WARN_ON(!pfn_valid(args->fixed_pte_pfn)); } static int __init init_args(struct pgtable_debug_args *args) { struct page *page = NULL; int ret = 0; /* * Initialize the debugging data. * * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE) * will help create page table entries with PROT_NONE permission as * required for pxx_protnone_tests(). */ memset(args, 0, sizeof(*args)); args->vaddr = get_random_vaddr(); args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS); args->page_prot_none = vm_get_page_prot(VM_NONE); args->is_contiguous_page = false; args->pud_pfn = ULONG_MAX; args->pmd_pfn = ULONG_MAX; args->pte_pfn = ULONG_MAX; args->fixed_pgd_pfn = ULONG_MAX; args->fixed_p4d_pfn = ULONG_MAX; args->fixed_pud_pfn = ULONG_MAX; args->fixed_pmd_pfn = ULONG_MAX; args->fixed_pte_pfn = ULONG_MAX; /* Allocate mm and vma */ args->mm = mm_alloc(); if (!args->mm) { pr_err("Failed to allocate mm struct\n"); ret = -ENOMEM; goto error; } args->vma = vm_area_alloc(args->mm); if (!args->vma) { pr_err("Failed to allocate vma\n"); ret = -ENOMEM; goto error; } /* * Allocate page table entries. They will be modified in the tests. * Lets save the page table entries so that they can be released * when the tests are completed. */ args->pgdp = pgd_offset(args->mm, args->vaddr); args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr); if (!args->p4dp) { pr_err("Failed to allocate p4d entries\n"); ret = -ENOMEM; goto error; } args->start_p4dp = p4d_offset(args->pgdp, 0UL); WARN_ON(!args->start_p4dp); args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr); if (!args->pudp) { pr_err("Failed to allocate pud entries\n"); ret = -ENOMEM; goto error; } args->start_pudp = pud_offset(args->p4dp, 0UL); WARN_ON(!args->start_pudp); args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr); if (!args->pmdp) { pr_err("Failed to allocate pmd entries\n"); ret = -ENOMEM; goto error; } args->start_pmdp = pmd_offset(args->pudp, 0UL); WARN_ON(!args->start_pmdp); if (pte_alloc(args->mm, args->pmdp)) { pr_err("Failed to allocate pte entries\n"); ret = -ENOMEM; goto error; } args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp)); WARN_ON(!args->start_ptep); init_fixed_pfns(args); /* * Allocate (huge) pages because some of the tests need to access * the data in the pages. The corresponding tests will be skipped * if we fail to allocate (huge) pages. */ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_pud_hugepage()) { page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PUD_SHIFT - PAGE_SHIFT); if (page) { args->pud_pfn = page_to_pfn(page); args->pmd_pfn = args->pud_pfn; args->pte_pfn = args->pud_pfn; return 0; } } if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && has_transparent_hugepage()) { page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER); if (page) { args->pmd_pfn = page_to_pfn(page); args->pte_pfn = args->pmd_pfn; return 0; } } page = alloc_page(GFP_KERNEL); if (page) args->pte_pfn = page_to_pfn(page); return 0; error: destroy_args(args); return ret; } static int __init debug_vm_pgtable(void) { struct pgtable_debug_args args; spinlock_t *ptl = NULL; int idx, ret; pr_info("Validating architecture page table helpers\n"); ret = init_args(&args); if (ret) return ret; /* * Iterate over each possible vm_flags to make sure that all * the basic page table transformation validations just hold * true irrespective of the starting protection value for a * given page table entry. * * Protection based vm_flags combinatins are always linear * and increasing i.e starting from VM_NONE and going upto * (VM_SHARED | READ | WRITE | EXEC). */ #define VM_FLAGS_START (VM_NONE) #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ) for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) { pte_basic_tests(&args, idx); pmd_basic_tests(&args, idx); pud_basic_tests(&args, idx); } /* * Both P4D and PGD level tests are very basic which do not * involve creating page table entries from the protection * value and the given pfn. Hence just keep them out from * the above iteration for now to save some test execution * time. */ p4d_basic_tests(&args); pgd_basic_tests(&args); pmd_leaf_tests(&args); pud_leaf_tests(&args); pte_special_tests(&args); pte_protnone_tests(&args); pmd_protnone_tests(&args); pte_devmap_tests(&args); pmd_devmap_tests(&args); pud_devmap_tests(&args); pte_soft_dirty_tests(&args); pmd_soft_dirty_tests(&args); pte_swap_soft_dirty_tests(&args); pmd_swap_soft_dirty_tests(&args); pte_swap_exclusive_tests(&args); pte_swap_tests(&args); pmd_swap_tests(&args); swap_migration_tests(&args); pmd_thp_tests(&args); pud_thp_tests(&args); hugetlb_basic_tests(&args); /* * Page table modifying tests. They need to hold * proper page table lock. */ args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); pte_clear_tests(&args); pte_advanced_tests(&args); if (args.ptep) pte_unmap_unlock(args.ptep, ptl); ptl = pmd_lock(args.mm, args.pmdp); pmd_clear_tests(&args); pmd_advanced_tests(&args); pmd_huge_tests(&args); pmd_populate_tests(&args); spin_unlock(ptl); ptl = pud_lock(args.mm, args.pudp); pud_clear_tests(&args); pud_advanced_tests(&args); pud_huge_tests(&args); pud_populate_tests(&args); spin_unlock(ptl); spin_lock(&(args.mm->page_table_lock)); p4d_clear_tests(&args); pgd_clear_tests(&args); p4d_populate_tests(&args); pgd_populate_tests(&args); spin_unlock(&(args.mm->page_table_lock)); destroy_args(&args); return 0; } late_initcall(debug_vm_pgtable);
linux-master
mm/debug_vm_pgtable.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/page-writeback.c * * Copyright (C) 2002, Linus Torvalds. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Contains functions related to writing back dirty pages at the * address_space level. * * 10Apr2002 Andrew Morton * Initial version */ #include <linux/kernel.h> #include <linux/math64.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/init.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/blkdev.h> #include <linux/mpage.h> #include <linux/rmap.h> #include <linux/percpu.h> #include <linux/smp.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/syscalls.h> #include <linux/pagevec.h> #include <linux/timer.h> #include <linux/sched/rt.h> #include <linux/sched/signal.h> #include <linux/mm_inline.h> #include <trace/events/writeback.h> #include "internal.h" /* * Sleep at most 200ms at a time in balance_dirty_pages(). */ #define MAX_PAUSE max(HZ/5, 1) /* * Try to keep balance_dirty_pages() call intervals higher than this many pages * by raising pause time to max_pause when falls below it. */ #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) /* * Estimate write bandwidth at 200ms intervals. */ #define BANDWIDTH_INTERVAL max(HZ/5, 1) #define RATELIMIT_CALC_SHIFT 10 /* * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * will look to see if it needs to force writeback or throttling. */ static long ratelimit_pages = 32; /* The following parameters are exported via /proc/sys/vm */ /* * Start background writeback (via writeback threads) at this percentage */ static int dirty_background_ratio = 10; /* * dirty_background_bytes starts at 0 (disabled) so that it is a function of * dirty_background_ratio * the amount of dirtyable memory */ static unsigned long dirty_background_bytes; /* * free highmem will not be subtracted from the total free memory * for calculating free ratios if vm_highmem_is_dirtyable is true */ static int vm_highmem_is_dirtyable; /* * The generator of dirty data starts writeback at this percentage */ static int vm_dirty_ratio = 20; /* * vm_dirty_bytes starts at 0 (disabled) so that it is a function of * vm_dirty_ratio * the amount of dirtyable memory */ static unsigned long vm_dirty_bytes; /* * The interval between `kupdate'-style writebacks */ unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ EXPORT_SYMBOL_GPL(dirty_writeback_interval); /* * The longest time for which data is allowed to remain dirty */ unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ /* * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: * a full sync is triggered after this time elapses without any disk activity. */ int laptop_mode; EXPORT_SYMBOL(laptop_mode); /* End of sysctl-exported parameters */ struct wb_domain global_wb_domain; /* consolidated parameters for balance_dirty_pages() and its subroutines */ struct dirty_throttle_control { #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *dom; struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */ #endif struct bdi_writeback *wb; struct fprop_local_percpu *wb_completions; unsigned long avail; /* dirtyable */ unsigned long dirty; /* file_dirty + write + nfs */ unsigned long thresh; /* dirty threshold */ unsigned long bg_thresh; /* dirty background threshold */ unsigned long wb_dirty; /* per-wb counterparts */ unsigned long wb_thresh; unsigned long wb_bg_thresh; unsigned long pos_ratio; }; /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will * reflect changes in current writeout rate. */ #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) #ifdef CONFIG_CGROUP_WRITEBACK #define GDTC_INIT(__wb) .wb = (__wb), \ .dom = &global_wb_domain, \ .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB .dom = &global_wb_domain #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ .dom = mem_cgroup_wb_domain(__wb), \ .wb_completions = &(__wb)->memcg_completions, \ .gdtc = __gdtc static bool mdtc_valid(struct dirty_throttle_control *dtc) { return dtc->dom; } static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) { return dtc->dom; } static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc) { return mdtc->gdtc; } static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) { return &wb->memcg_completions; } static void wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp) { unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); unsigned long long min = wb->bdi->min_ratio; unsigned long long max = wb->bdi->max_ratio; /* * @wb may already be clean by the time control reaches here and * the total may not include its bw. */ if (this_bw < tot_bw) { if (min) { min *= this_bw; min = div64_ul(min, tot_bw); } if (max < 100 * BDI_RATIO_SCALE) { max *= this_bw; max = div64_ul(max, tot_bw); } } *minp = min; *maxp = max; } #else /* CONFIG_CGROUP_WRITEBACK */ #define GDTC_INIT(__wb) .wb = (__wb), \ .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB #define MDTC_INIT(__wb, __gdtc) static bool mdtc_valid(struct dirty_throttle_control *dtc) { return false; } static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) { return &global_wb_domain; } static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc) { return NULL; } static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) { return NULL; } static void wb_min_max_ratio(struct bdi_writeback *wb, unsigned long *minp, unsigned long *maxp) { *minp = wb->bdi->min_ratio; *maxp = wb->bdi->max_ratio; } #endif /* CONFIG_CGROUP_WRITEBACK */ /* * In a memory zone, there is a certain amount of pages we consider * available for the page cache, which is essentially the number of * free and reclaimable pages, minus some zone reserves to protect * lowmem and the ability to uphold the zone's watermarks without * requiring writeback. * * This number of dirtyable pages is the base value of which the * user-configurable dirty ratio is the effective number of pages that * are allowed to be actually dirtied. Per individual zone, or * globally by using the sum of dirtyable pages over all zones. * * Because the user is allowed to specify the dirty limit globally as * absolute number of bytes, calculating the per-zone dirty limit can * require translating the configured limit into a percentage of * global dirtyable memory first. */ /** * node_dirtyable_memory - number of dirtyable pages in a node * @pgdat: the node * * Return: the node's number of pages potentially available for dirty * page cache. This is the base value for the per-node dirty limits. */ static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) { unsigned long nr_pages = 0; int z; for (z = 0; z < MAX_NR_ZONES; z++) { struct zone *zone = pgdat->node_zones + z; if (!populated_zone(zone)) continue; nr_pages += zone_page_state(zone, NR_FREE_PAGES); } /* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones. */ nr_pages -= min(nr_pages, pgdat->totalreserve_pages); nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); return nr_pages; } static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; unsigned long x = 0; int i; for_each_node_state(node, N_HIGH_MEMORY) { for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { struct zone *z; unsigned long nr_pages; if (!is_highmem_idx(i)) continue; z = &NODE_DATA(node)->node_zones[i]; if (!populated_zone(z)) continue; nr_pages = zone_page_state(z, NR_FREE_PAGES); /* watch for underflows */ nr_pages -= min(nr_pages, high_wmark_pages(z)); nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE); nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE); x += nr_pages; } } /* * Make sure that the number of highmem pages is never larger * than the number of the total dirtyable memory. This can only * occur in very strange VM situations but we want to make sure * that this does not occur. */ return min(x, total); #else return 0; #endif } /** * global_dirtyable_memory - number of globally dirtyable pages * * Return: the global number of pages potentially available for dirty * page cache. This is the base value for the global dirty limits. */ static unsigned long global_dirtyable_memory(void) { unsigned long x; x = global_zone_page_state(NR_FREE_PAGES); /* * Pages reserved for the kernel should not be considered * dirtyable, to prevent a situation where reclaim has to * clean pages in order to balance the zones. */ x -= min(x, totalreserve_pages); x += global_node_page_state(NR_INACTIVE_FILE); x += global_node_page_state(NR_ACTIVE_FILE); if (!vm_highmem_is_dirtyable) x -= highmem_dirtyable_memory(x); return x + 1; /* Ensure that we never return 0 */ } /** * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain * @dtc: dirty_throttle_control of interest * * Calculate @dtc->thresh and ->bg_thresh considering * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller * must ensure that @dtc->avail is set before calling this function. The * dirty limits will be lifted by 1/4 for real-time tasks. */ static void domain_dirty_limits(struct dirty_throttle_control *dtc) { const unsigned long available_memory = dtc->avail; struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); unsigned long bytes = vm_dirty_bytes; unsigned long bg_bytes = dirty_background_bytes; /* convert ratios to per-PAGE_SIZE for higher precision */ unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; unsigned long thresh; unsigned long bg_thresh; struct task_struct *tsk; /* gdtc is !NULL iff @dtc is for memcg domain */ if (gdtc) { unsigned long global_avail = gdtc->avail; /* * The byte settings can't be applied directly to memcg * domains. Convert them to ratios by scaling against * globally available memory. As the ratios are in * per-PAGE_SIZE, they can be obtained by dividing bytes by * number of pages. */ if (bytes) ratio = min(DIV_ROUND_UP(bytes, global_avail), PAGE_SIZE); if (bg_bytes) bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), PAGE_SIZE); bytes = bg_bytes = 0; } if (bytes) thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); else thresh = (ratio * available_memory) / PAGE_SIZE; if (bg_bytes) bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); else bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; if (bg_thresh >= thresh) bg_thresh = thresh / 2; tsk = current; if (rt_task(tsk)) { bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; } dtc->thresh = thresh; dtc->bg_thresh = bg_thresh; /* we should eventually report the domain in the TP */ if (!gdtc) trace_global_dirty_state(bg_thresh, thresh); } /** * global_dirty_limits - background-writeback and dirty-throttling thresholds * @pbackground: out parameter for bg_thresh * @pdirty: out parameter for thresh * * Calculate bg_thresh and thresh for global_wb_domain. See * domain_dirty_limits() for details. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB }; gdtc.avail = global_dirtyable_memory(); domain_dirty_limits(&gdtc); *pbackground = gdtc.bg_thresh; *pdirty = gdtc.thresh; } /** * node_dirty_limit - maximum number of dirty pages allowed in a node * @pgdat: the node * * Return: the maximum number of dirty pages allowed in a node, based * on the node's dirtyable memory. */ static unsigned long node_dirty_limit(struct pglist_data *pgdat) { unsigned long node_memory = node_dirtyable_memory(pgdat); struct task_struct *tsk = current; unsigned long dirty; if (vm_dirty_bytes) dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * node_memory / global_dirtyable_memory(); else dirty = vm_dirty_ratio * node_memory / 100; if (rt_task(tsk)) dirty += dirty / 4; return dirty; } /** * node_dirty_ok - tells whether a node is within its dirty limits * @pgdat: the node to check * * Return: %true when the dirty pages in @pgdat are within the node's * dirty limit, %false if the limit is exceeded. */ bool node_dirty_ok(struct pglist_data *pgdat) { unsigned long limit = node_dirty_limit(pgdat); unsigned long nr_pages = 0; nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); nr_pages += node_page_state(pgdat, NR_WRITEBACK); return nr_pages <= limit; } #ifdef CONFIG_SYSCTL static int dirty_background_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_bytes = 0; return ret; } static int dirty_background_bytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) dirty_background_ratio = 0; return ret; } static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_ratio != old_ratio) { writeback_set_ratelimit(); vm_dirty_bytes = 0; } return ret; } static int dirty_bytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write && vm_dirty_bytes != old_bytes) { writeback_set_ratelimit(); vm_dirty_ratio = 0; } return ret; } #endif static unsigned long wp_next_time(unsigned long cur_time) { cur_time += VM_COMPLETIONS_PERIOD_LEN; /* 0 has a special meaning... */ if (!cur_time) return 1; return cur_time; } static void wb_domain_writeout_add(struct wb_domain *dom, struct fprop_local_percpu *completions, unsigned int max_prop_frac, long nr) { __fprop_add_percpu_max(&dom->completions, completions, max_prop_frac, nr); /* First event after period switching was turned off? */ if (unlikely(!dom->period_time)) { /* * We can race with other __bdi_writeout_inc calls here but * it does not cause any harm since the resulting time when * timer will fire and what is in writeout_period_time will be * roughly the same. */ dom->period_time = wp_next_time(jiffies); mod_timer(&dom->period_timer, dom->period_time); } } /* * Increment @wb's writeout completion count and the global writeout * completion count. Called from __folio_end_writeback(). */ static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr) { struct wb_domain *cgdom; wb_stat_mod(wb, WB_WRITTEN, nr); wb_domain_writeout_add(&global_wb_domain, &wb->completions, wb->bdi->max_prop_frac, nr); cgdom = mem_cgroup_wb_domain(wb); if (cgdom) wb_domain_writeout_add(cgdom, wb_memcg_completions(wb), wb->bdi->max_prop_frac, nr); } void wb_writeout_inc(struct bdi_writeback *wb) { unsigned long flags; local_irq_save(flags); __wb_writeout_add(wb, 1); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(wb_writeout_inc); /* * On idle system, we can be called long after we scheduled because we use * deferred timers so count with missed periods. */ static void writeout_period(struct timer_list *t) { struct wb_domain *dom = from_timer(dom, t, period_timer); int miss_periods = (jiffies - dom->period_time) / VM_COMPLETIONS_PERIOD_LEN; if (fprop_new_period(&dom->completions, miss_periods + 1)) { dom->period_time = wp_next_time(dom->period_time + miss_periods * VM_COMPLETIONS_PERIOD_LEN); mod_timer(&dom->period_timer, dom->period_time); } else { /* * Aging has zeroed all fractions. Stop wasting CPU on period * updates. */ dom->period_time = 0; } } int wb_domain_init(struct wb_domain *dom, gfp_t gfp) { memset(dom, 0, sizeof(*dom)); spin_lock_init(&dom->lock); timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE); dom->dirty_limit_tstamp = jiffies; return fprop_global_init(&dom->completions, gfp); } #ifdef CONFIG_CGROUP_WRITEBACK void wb_domain_exit(struct wb_domain *dom) { del_timer_sync(&dom->period_timer); fprop_global_destroy(&dom->completions); } #endif /* * bdi_min_ratio keeps the sum of the minimum dirty shares of all * registered backing devices, which, for obvious reasons, can not * exceed 100%. */ static unsigned int bdi_min_ratio; static int bdi_check_pages_limit(unsigned long pages) { unsigned long max_dirty_pages = global_dirtyable_memory(); if (pages > max_dirty_pages) return -EINVAL; return 0; } static unsigned long bdi_ratio_from_pages(unsigned long pages) { unsigned long background_thresh; unsigned long dirty_thresh; unsigned long ratio; global_dirty_limits(&background_thresh, &dirty_thresh); ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh); return ratio; } static u64 bdi_get_bytes(unsigned int ratio) { unsigned long background_thresh; unsigned long dirty_thresh; u64 bytes; global_dirty_limits(&background_thresh, &dirty_thresh); bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100; return bytes; } static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { unsigned int delta; int ret = 0; if (min_ratio > 100 * BDI_RATIO_SCALE) return -EINVAL; min_ratio *= BDI_RATIO_SCALE; spin_lock_bh(&bdi_lock); if (min_ratio > bdi->max_ratio) { ret = -EINVAL; } else { if (min_ratio < bdi->min_ratio) { delta = bdi->min_ratio - min_ratio; bdi_min_ratio -= delta; bdi->min_ratio = min_ratio; } else { delta = min_ratio - bdi->min_ratio; if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) { bdi_min_ratio += delta; bdi->min_ratio = min_ratio; } else { ret = -EINVAL; } } } spin_unlock_bh(&bdi_lock); return ret; } static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio) { int ret = 0; if (max_ratio > 100 * BDI_RATIO_SCALE) return -EINVAL; spin_lock_bh(&bdi_lock); if (bdi->min_ratio > max_ratio) { ret = -EINVAL; } else { bdi->max_ratio = max_ratio; bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; } spin_unlock_bh(&bdi_lock); return ret; } int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio) { return __bdi_set_min_ratio(bdi, min_ratio); } int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio) { return __bdi_set_max_ratio(bdi, max_ratio); } int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) { return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE); } int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio) { return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE); } EXPORT_SYMBOL(bdi_set_max_ratio); u64 bdi_get_min_bytes(struct backing_dev_info *bdi) { return bdi_get_bytes(bdi->min_ratio); } int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes) { int ret; unsigned long pages = min_bytes >> PAGE_SHIFT; unsigned long min_ratio; ret = bdi_check_pages_limit(pages); if (ret) return ret; min_ratio = bdi_ratio_from_pages(pages); return __bdi_set_min_ratio(bdi, min_ratio); } u64 bdi_get_max_bytes(struct backing_dev_info *bdi) { return bdi_get_bytes(bdi->max_ratio); } int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes) { int ret; unsigned long pages = max_bytes >> PAGE_SHIFT; unsigned long max_ratio; ret = bdi_check_pages_limit(pages); if (ret) return ret; max_ratio = bdi_ratio_from_pages(pages); return __bdi_set_max_ratio(bdi, max_ratio); } int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit) { if (strict_limit > 1) return -EINVAL; spin_lock_bh(&bdi_lock); if (strict_limit) bdi->capabilities |= BDI_CAP_STRICTLIMIT; else bdi->capabilities &= ~BDI_CAP_STRICTLIMIT; spin_unlock_bh(&bdi_lock); return 0; } static unsigned long dirty_freerun_ceiling(unsigned long thresh, unsigned long bg_thresh) { return (thresh + bg_thresh) / 2; } static unsigned long hard_dirty_limit(struct wb_domain *dom, unsigned long thresh) { return max(thresh, dom->dirty_limit); } /* * Memory which can be further allocated to a memcg domain is capped by * system-wide clean memory excluding the amount being used in the domain. */ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, unsigned long filepages, unsigned long headroom) { struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); unsigned long clean = filepages - min(filepages, mdtc->dirty); unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); unsigned long other_clean = global_clean - min(global_clean, clean); mdtc->avail = filepages + min(headroom, other_clean); } /** * __wb_calc_thresh - @wb's share of dirty throttling threshold * @dtc: dirty_throttle_context of interest * * Note that balance_dirty_pages() will only seriously take it as a hard limit * when sleeping max_pause per page is not enough to keep the dirty pages under * control. For example, when the device is completely stalled due to some error * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the wb dirty pages go high. * * It allocates high/low dirty limits to fast/slow devices, in order to prevent * - starving fast devices * - piling up dirty pages (that will take long time to sync) on slow devices * * The wb's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. * * Return: @wb's dirty limit in pages. The term "dirty" in the context of * dirty balancing includes all PG_dirty and PG_writeback pages. */ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) { struct wb_domain *dom = dtc_dom(dtc); unsigned long thresh = dtc->thresh; u64 wb_thresh; unsigned long numerator, denominator; unsigned long wb_min_ratio, wb_max_ratio; /* * Calculate this BDI's share of the thresh ratio. */ fprop_fraction_percpu(&dom->completions, dtc->wb_completions, &numerator, &denominator); wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE); wb_thresh *= numerator; wb_thresh = div64_ul(wb_thresh, denominator); wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE); if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE)) wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE); return wb_thresh; } unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb), .thresh = thresh }; return __wb_calc_thresh(&gdtc); } /* * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) * limit - setpoint * * it's a 3rd order polynomial that subjects to * * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast * (2) f(setpoint) = 1.0 => the balance point * (3) f(limit) = 0 => the hard limit * (4) df/dx <= 0 => negative feedback control * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) * => fast response on large errors; small oscillation near setpoint */ static long long pos_ratio_polynom(unsigned long setpoint, unsigned long dirty, unsigned long limit) { long long pos_ratio; long x; x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, (limit - setpoint) | 1); pos_ratio = x; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; pos_ratio += 1 << RATELIMIT_CALC_SHIFT; return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); } /* * Dirty position control. * * (o) global/bdi setpoints * * We want the dirty pages be balanced around the global/wb setpoints. * When the number of dirty pages is higher/lower than the setpoint, the * dirty position control ratio (and hence task dirty ratelimit) will be * decreased/increased to bring the dirty pages back to the setpoint. * * pos_ratio = 1 << RATELIMIT_CALC_SHIFT * * if (dirty < setpoint) scale up pos_ratio * if (dirty > setpoint) scale down pos_ratio * * if (wb_dirty < wb_setpoint) scale up pos_ratio * if (wb_dirty > wb_setpoint) scale down pos_ratio * * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT * * (o) global control line * * ^ pos_ratio * | * | |<===== global dirty control scope ======>| * 2.0 * * * * * * * * | .* * | . * * | . * * | . * * | . * * | . * * 1.0 ................................* * | . . * * | . . * * | . . * * | . . * * | . . * * 0 +------------.------------------.----------------------*-------------> * freerun^ setpoint^ limit^ dirty pages * * (o) wb control line * * ^ pos_ratio * | * | * * | * * | * * | * * | * |<=========== span ============>| * 1.0 .......................* * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * | . * * 1/4 ...............................................* * * * * * * * * * * * * | . . * | . . * | . . * 0 +----------------------.-------------------------------.-------------> * wb_setpoint^ x_intercept^ * * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can * be smoothly throttled down to normal if it starts high in situations like * - start writing to a slow SD card and a fast disk at the same time. The SD * card's wb_dirty may rush to many times higher than wb_setpoint. * - the wb dirty thresh drops quickly due to change of JBOD workload */ static void wb_position_ratio(struct dirty_throttle_control *dtc) { struct bdi_writeback *wb = dtc->wb; unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long wb_thresh = dtc->wb_thresh; unsigned long x_intercept; unsigned long setpoint; /* dirty pages' target balance point */ unsigned long wb_setpoint; unsigned long span; long long pos_ratio; /* for scaling up/down the rate limit */ long x; dtc->pos_ratio = 0; if (unlikely(dtc->dirty >= limit)) return; /* * global setpoint * * See comment for pos_ratio_polynom(). */ setpoint = (freerun + limit) / 2; pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); /* * The strictlimit feature is a tool preventing mistrusted filesystems * from growing a large number of dirty pages before throttling. For * such filesystems balance_dirty_pages always checks wb counters * against wb limits. Even if global "nr_dirty" is under "freerun". * This is especially important for fuse which sets bdi->max_ratio to * 1% by default. Without strictlimit feature, fuse writeback may * consume arbitrary amount of RAM because it is accounted in * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". * * Here, in wb_position_ratio(), we calculate pos_ratio based on * two values: wb_dirty and wb_thresh. Let's consider an example: * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global * limits are set by default to 10% and 20% (background and throttle). * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is * about ~6K pages (as the average of background and throttle wb * limits). The 3rd order polynomial will provide positive feedback if * wb_dirty is under wb_setpoint and vice versa. * * Note, that we cannot use global counters in these calculations * because we want to throttle process writing to a strictlimit wb * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB * in the example above). */ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { long long wb_pos_ratio; if (dtc->wb_dirty < 8) { dtc->pos_ratio = min_t(long long, pos_ratio * 2, 2 << RATELIMIT_CALC_SHIFT); return; } if (dtc->wb_dirty >= wb_thresh) return; wb_setpoint = dirty_freerun_ceiling(wb_thresh, dtc->wb_bg_thresh); if (wb_setpoint == 0 || wb_setpoint == wb_thresh) return; wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, wb_thresh); /* * Typically, for strictlimit case, wb_setpoint << setpoint * and pos_ratio >> wb_pos_ratio. In the other words global * state ("dirty") is not limiting factor and we have to * make decision based on wb counters. But there is an * important case when global pos_ratio should get precedence: * global limits are exceeded (e.g. due to activities on other * wb's) while given strictlimit wb is below limit. * * "pos_ratio * wb_pos_ratio" would work for the case above, * but it would look too non-natural for the case of all * activity in the system coming from a single strictlimit wb * with bdi->max_ratio == 100%. * * Note that min() below somewhat changes the dynamics of the * control system. Normally, pos_ratio value can be well over 3 * (when globally we are at freerun and wb is well below wb * setpoint). Now the maximum pos_ratio in the same situation * is 2. We might want to tweak this if we observe the control * system is too slow to adapt. */ dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); return; } /* * We have computed basic pos_ratio above based on global situation. If * the wb is over/under its share of dirty pages, we want to scale * pos_ratio further down/up. That is done by the following mechanism. */ /* * wb setpoint * * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) * * x_intercept - wb_dirty * := -------------------------- * x_intercept - wb_setpoint * * The main wb control line is a linear function that subjects to * * (1) f(wb_setpoint) = 1.0 * (2) k = - 1 / (8 * write_bw) (in single wb case) * or equally: x_intercept = wb_setpoint + 8 * write_bw * * For single wb case, the dirty pages are observed to fluctuate * regularly within range * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] * for various filesystems, where (2) can yield in a reasonable 12.5% * fluctuation range for pos_ratio. * * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its * own size, so move the slope over accordingly and choose a slope that * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh. */ if (unlikely(wb_thresh > dtc->thresh)) wb_thresh = dtc->thresh; /* * It's very possible that wb_thresh is close to 0 not because the * device is slow, but that it has remained inactive for long time. * Honour such devices a reasonable good (hopefully IO efficient) * threshold, so that the occasional writes won't be blocked and active * writes can rampup the threshold quickly. */ wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); /* * scale global setpoint to wb's: * wb_setpoint = setpoint * wb_thresh / thresh */ x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); wb_setpoint = setpoint * (u64)x >> 16; /* * Use span=(8*write_bw) in single wb case as indicated by * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. * * wb_thresh thresh - wb_thresh * span = --------- * (8 * write_bw) + ------------------ * wb_thresh * thresh thresh */ span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; x_intercept = wb_setpoint + span; if (dtc->wb_dirty < x_intercept - span / 4) { pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), (x_intercept - wb_setpoint) | 1); } else pos_ratio /= 4; /* * wb reserve area, safeguard against dirty pool underrun and disk idle * It may push the desired control point of global dirty pages higher * than setpoint. */ x_intercept = wb_thresh / 2; if (dtc->wb_dirty < x_intercept) { if (dtc->wb_dirty > x_intercept / 8) pos_ratio = div_u64(pos_ratio * x_intercept, dtc->wb_dirty); else pos_ratio *= 8; } dtc->pos_ratio = pos_ratio; } static void wb_update_write_bandwidth(struct bdi_writeback *wb, unsigned long elapsed, unsigned long written) { const unsigned long period = roundup_pow_of_two(3 * HZ); unsigned long avg = wb->avg_write_bandwidth; unsigned long old = wb->write_bandwidth; u64 bw; /* * bw = written * HZ / elapsed * * bw * elapsed + write_bandwidth * (period - elapsed) * write_bandwidth = --------------------------------------------------- * period * * @written may have decreased due to folio_redirty_for_writepage(). * Avoid underflowing @bw calculation. */ bw = written - min(written, wb->written_stamp); bw *= HZ; if (unlikely(elapsed > period)) { bw = div64_ul(bw, elapsed); avg = bw; goto out; } bw += (u64)wb->write_bandwidth * (period - elapsed); bw >>= ilog2(period); /* * one more level of smoothing, for filtering out sudden spikes */ if (avg > old && old >= (unsigned long)bw) avg -= (avg - old) >> 3; if (avg < old && old <= (unsigned long)bw) avg += (old - avg) >> 3; out: /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */ avg = max(avg, 1LU); if (wb_has_dirty_io(wb)) { long delta = avg - wb->avg_write_bandwidth; WARN_ON_ONCE(atomic_long_add_return(delta, &wb->bdi->tot_write_bandwidth) <= 0); } wb->write_bandwidth = bw; WRITE_ONCE(wb->avg_write_bandwidth, avg); } static void update_dirty_limit(struct dirty_throttle_control *dtc) { struct wb_domain *dom = dtc_dom(dtc); unsigned long thresh = dtc->thresh; unsigned long limit = dom->dirty_limit; /* * Follow up in one step. */ if (limit < thresh) { limit = thresh; goto update; } /* * Follow down slowly. Use the higher one as the target, because thresh * may drop below dirty. This is exactly the reason to introduce * dom->dirty_limit which is guaranteed to lie above the dirty pages. */ thresh = max(thresh, dtc->dirty); if (limit > thresh) { limit -= (limit - thresh) >> 5; goto update; } return; update: dom->dirty_limit = limit; } static void domain_update_dirty_limit(struct dirty_throttle_control *dtc, unsigned long now) { struct wb_domain *dom = dtc_dom(dtc); /* * check locklessly first to optimize away locking for the most time */ if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) return; spin_lock(&dom->lock); if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) { update_dirty_limit(dtc); dom->dirty_limit_tstamp = now; } spin_unlock(&dom->lock); } /* * Maintain wb->dirty_ratelimit, the base dirty throttle rate. * * Normal wb tasks will be curbed at or below it in long term. * Obviously it should be around (write_bw / N) when there are N dd tasks. */ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, unsigned long dirtied, unsigned long elapsed) { struct bdi_writeback *wb = dtc->wb; unsigned long dirty = dtc->dirty; unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); unsigned long setpoint = (freerun + limit) / 2; unsigned long write_bw = wb->avg_write_bandwidth; unsigned long dirty_ratelimit = wb->dirty_ratelimit; unsigned long dirty_rate; unsigned long task_ratelimit; unsigned long balanced_dirty_ratelimit; unsigned long step; unsigned long x; unsigned long shift; /* * The dirty rate will match the writeout rate in long term, except * when dirty pages are truncated by userspace or re-dirtied by FS. */ dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; /* * task_ratelimit reflects each dd's dirty rate for the past 200ms. */ task_ratelimit = (u64)dirty_ratelimit * dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ /* * A linear estimation of the "balanced" throttle rate. The theory is, * if there are N dd tasks, each throttled at task_ratelimit, the wb's * dirty_rate will be measured to be (N * task_ratelimit). So the below * formula will yield the balanced rate limit (write_bw / N). * * Note that the expanded form is not a pure rate feedback: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) * but also takes pos_ratio into account: * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) * * (1) is not realistic because pos_ratio also takes part in balancing * the dirty rate. Consider the state * pos_ratio = 0.5 (3) * rate = 2 * (write_bw / N) (4) * If (1) is used, it will stuck in that state! Because each dd will * be throttled at * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) * yielding * dirty_rate = N * task_ratelimit = write_bw (6) * put (6) into (1) we get * rate_(i+1) = rate_(i) (7) * * So we end up using (2) to always keep * rate_(i+1) ~= (write_bw / N) (8) * regardless of the value of pos_ratio. As long as (8) is satisfied, * pos_ratio is able to drive itself to 1.0, which is not only where * the dirty count meet the setpoint, but also where the slope of * pos_ratio is most flat and hence task_ratelimit is least fluctuated. */ balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, dirty_rate | 1); /* * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw */ if (unlikely(balanced_dirty_ratelimit > write_bw)) balanced_dirty_ratelimit = write_bw; /* * We could safely do this and return immediately: * * wb->dirty_ratelimit = balanced_dirty_ratelimit; * * However to get a more stable dirty_ratelimit, the below elaborated * code makes use of task_ratelimit to filter out singular points and * limit the step size. * * The below code essentially only uses the relative value of * * task_ratelimit - dirty_ratelimit * = (pos_ratio - 1) * dirty_ratelimit * * which reflects the direction and size of dirty position error. */ /* * dirty_ratelimit will follow balanced_dirty_ratelimit iff * task_ratelimit is on the same side of dirty_ratelimit, too. * For example, when * - dirty_ratelimit > balanced_dirty_ratelimit * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) * lowering dirty_ratelimit will help meet both the position and rate * control targets. Otherwise, don't update dirty_ratelimit if it will * only help meet the rate target. After all, what the users ultimately * feel and care are stable dirty rate and small position error. * * |task_ratelimit - dirty_ratelimit| is used to limit the step size * and filter out the singular points of balanced_dirty_ratelimit. Which * keeps jumping around randomly and can even leap far away at times * due to the small 200ms estimation period of dirty_rate (we want to * keep that period small to reduce time lags). */ step = 0; /* * For strictlimit case, calculations above were based on wb counters * and limits (starting from pos_ratio = wb_position_ratio() and up to * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use wb_dirty as * "dirty" and wb_setpoint as "setpoint". * * We rampup dirty_ratelimit forcibly if wb_dirty is low because * it's possible that wb_thresh is close to zero due to inactivity * of backing device. */ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { dirty = dtc->wb_dirty; if (dtc->wb_dirty < 8) setpoint = dtc->wb_dirty + 1; else setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; } if (dirty < setpoint) { x = min3(wb->balanced_dirty_ratelimit, balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit < x) step = x - dirty_ratelimit; } else { x = max3(wb->balanced_dirty_ratelimit, balanced_dirty_ratelimit, task_ratelimit); if (dirty_ratelimit > x) step = dirty_ratelimit - x; } /* * Don't pursue 100% rate matching. It's impossible since the balanced * rate itself is constantly fluctuating. So decrease the track speed * when it gets close to the target. Helps eliminate pointless tremors. */ shift = dirty_ratelimit / (2 * step + 1); if (shift < BITS_PER_LONG) step = DIV_ROUND_UP(step >> shift, 8); else step = 0; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step; WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL)); wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); } static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc, struct dirty_throttle_control *mdtc, bool update_ratelimit) { struct bdi_writeback *wb = gdtc->wb; unsigned long now = jiffies; unsigned long elapsed; unsigned long dirtied; unsigned long written; spin_lock(&wb->list_lock); /* * Lockless checks for elapsed time are racy and delayed update after * IO completion doesn't do it at all (to make sure written pages are * accounted reasonably quickly). Make sure elapsed >= 1 to avoid * division errors. */ elapsed = max(now - wb->bw_time_stamp, 1UL); dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); written = percpu_counter_read(&wb->stat[WB_WRITTEN]); if (update_ratelimit) { domain_update_dirty_limit(gdtc, now); wb_update_dirty_ratelimit(gdtc, dirtied, elapsed); /* * @mdtc is always NULL if !CGROUP_WRITEBACK but the * compiler has no way to figure that out. Help it. */ if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) { domain_update_dirty_limit(mdtc, now); wb_update_dirty_ratelimit(mdtc, dirtied, elapsed); } } wb_update_write_bandwidth(wb, elapsed, written); wb->dirtied_stamp = dirtied; wb->written_stamp = written; WRITE_ONCE(wb->bw_time_stamp, now); spin_unlock(&wb->list_lock); } void wb_update_bandwidth(struct bdi_writeback *wb) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; __wb_update_bandwidth(&gdtc, NULL, false); } /* Interval after which we consider wb idle and don't estimate bandwidth */ #define WB_BANDWIDTH_IDLE_JIF (HZ) static void wb_bandwidth_estimate_start(struct bdi_writeback *wb) { unsigned long now = jiffies; unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp); if (elapsed > WB_BANDWIDTH_IDLE_JIF && !atomic_read(&wb->writeback_inodes)) { spin_lock(&wb->list_lock); wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED); wb->written_stamp = wb_stat(wb, WB_WRITTEN); WRITE_ONCE(wb->bw_time_stamp, now); spin_unlock(&wb->list_lock); } } /* * After a task dirtied this many pages, balance_dirty_pages_ratelimited() * will look to see if it needs to start dirty throttling. * * If dirty_poll_interval is too low, big NUMA machines will call the expensive * global_zone_page_state() too often. So scale it near-sqrt to the safety margin * (the number of pages we may dirty without exceeding the dirty limits). */ static unsigned long dirty_poll_interval(unsigned long dirty, unsigned long thresh) { if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1); return 1; } static unsigned long wb_max_pause(struct bdi_writeback *wb, unsigned long wb_dirty) { unsigned long bw = READ_ONCE(wb->avg_write_bandwidth); unsigned long t; /* * Limit pause time for small memory systems. If sleeping for too long * time, a small pool of dirty/writeback pages may go empty and disk go * idle. * * 8 serves as the safety ratio. */ t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); t++; return min_t(unsigned long, t, MAX_PAUSE); } static long wb_min_pause(struct bdi_writeback *wb, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause) { long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); long t; /* target pause */ long pause; /* estimated next pause */ int pages; /* target nr_dirtied_pause */ /* target for 10ms pause on 1-dd case */ t = max(1, HZ / 100); /* * Scale up pause time for concurrent dirtiers in order to reduce CPU * overheads. * * (N * 10ms) on 2^N concurrent tasks. */ if (hi > lo) t += (hi - lo) * (10 * HZ) / 1024; /* * This is a bit convoluted. We try to base the next nr_dirtied_pause * on the much more stable dirty_ratelimit. However the next pause time * will be computed based on task_ratelimit and the two rate limits may * depart considerably at some time. Especially if task_ratelimit goes * below dirty_ratelimit/2 and the target pause is max_pause, the next * pause time will be max_pause*2 _trimmed down_ to max_pause. As a * result task_ratelimit won't be executed faithfully, which could * eventually bring down dirty_ratelimit. * * We apply two rules to fix it up: * 1) try to estimate the next pause time and if necessary, use a lower * nr_dirtied_pause so as not to exceed max_pause. When this happens, * nr_dirtied_pause will be "dancing" with task_ratelimit. * 2) limit the target pause time to max_pause/2, so that the normal * small fluctuations of task_ratelimit won't trigger rule (1) and * nr_dirtied_pause will remain as stable as dirty_ratelimit. */ t = min(t, 1 + max_pause / 2); pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); /* * Tiny nr_dirtied_pause is found to hurt I/O performance in the test * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. * When the 16 consecutive reads are often interrupted by some dirty * throttling pause during the async writes, cfq will go into idles * (deadline is fine). So push nr_dirtied_pause as high as possible * until reaches DIRTY_POLL_THRESH=32 pages. */ if (pages < DIRTY_POLL_THRESH) { t = max_pause; pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); if (pages > DIRTY_POLL_THRESH) { pages = DIRTY_POLL_THRESH; t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; } } pause = HZ * pages / (task_ratelimit + 1); if (pause > max_pause) { t = max_pause; pages = task_ratelimit * t / roundup_pow_of_two(HZ); } *nr_dirtied_pause = pages; /* * The minimal pause time will normally be half the target pause time. */ return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; } static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) { struct bdi_writeback *wb = dtc->wb; unsigned long wb_reclaimable; /* * wb_thresh is not treated as some limiting factor as * dirty_thresh, due to reasons * - in JBOD setup, wb_thresh can fluctuate a lot * - in a system with HDD and USB key, the USB key may somehow * go into state (wb_dirty >> wb_thresh) either because * wb_dirty starts high, or because wb_thresh drops low. * In this case we don't want to hard throttle the USB key * dirtiers for 100 seconds until wb_dirty drops under * wb_thresh. Instead the auxiliary wb control line in * wb_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down wb_dirty. */ dtc->wb_thresh = __wb_calc_thresh(dtc); dtc->wb_bg_thresh = dtc->thresh ? div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; /* * In order to avoid the stacked BDI deadlock we need * to ensure we accurately count the 'dirty' pages when * the threshold is low. * * Otherwise it would be possible to get thresh+n pages * reported dirty, even though there are thresh-m pages * actually dirty; with m+n sitting in the percpu * deltas. */ if (dtc->wb_thresh < 2 * wb_stat_error()) { wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); } else { wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); } } /* * balance_dirty_pages() must be called by processes which are generating dirty * data. It looks at the number of dirty pages in the machine and will force * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. * If we're over `background_thresh' then the writeback threads are woken to * perform some writeout. */ static int balance_dirty_pages(struct bdi_writeback *wb, unsigned long pages_dirtied, unsigned int flags) { struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; struct dirty_throttle_control * const gdtc = &gdtc_stor; struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ? &mdtc_stor : NULL; struct dirty_throttle_control *sdtc; unsigned long nr_reclaimable; /* = file_dirty */ long period; long pause; long max_pause; long min_pause; int nr_dirtied_pause; bool dirty_exceeded = false; unsigned long task_ratelimit; unsigned long dirty_ratelimit; struct backing_dev_info *bdi = wb->bdi; bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsigned long start_time = jiffies; int ret = 0; for (;;) { unsigned long now = jiffies; unsigned long dirty, thresh, bg_thresh; unsigned long m_dirty = 0; /* stop bogus uninit warnings */ unsigned long m_thresh = 0; unsigned long m_bg_thresh = 0; nr_reclaimable = global_node_page_state(NR_FILE_DIRTY); gdtc->avail = global_dirtyable_memory(); gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); domain_dirty_limits(gdtc); if (unlikely(strictlimit)) { wb_dirty_limits(gdtc); dirty = gdtc->wb_dirty; thresh = gdtc->wb_thresh; bg_thresh = gdtc->wb_bg_thresh; } else { dirty = gdtc->dirty; thresh = gdtc->thresh; bg_thresh = gdtc->bg_thresh; } if (mdtc) { unsigned long filepages, headroom, writeback; /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain. */ mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, &writeback); mdtc->dirty += writeback; mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); if (unlikely(strictlimit)) { wb_dirty_limits(mdtc); m_dirty = mdtc->wb_dirty; m_thresh = mdtc->wb_thresh; m_bg_thresh = mdtc->wb_bg_thresh; } else { m_dirty = mdtc->dirty; m_thresh = mdtc->thresh; m_bg_thresh = mdtc->bg_thresh; } } /* * In laptop mode, we wait until hitting the higher threshold * before starting background writeout, and then write out all * the way down to the lower threshold. So slow writers cause * minimal disk activity. * * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh && !writeback_in_progress(wb)) wb_start_background_writeback(wb); /* * Throttle it only when the background writeback cannot * catch-up. This avoids (excessively) small writeouts * when the wb limits are ramping up in case of !strictlimit. * * In strictlimit case make decision based on the wb counters * and limits. Small writeouts when the wb limits are ramping * up are the price we consciously pay for strictlimit-ing. * * If memcg domain is in effect, @dirty should be under * both global and memcg freerun ceilings. */ if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) && (!mdtc || m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) { unsigned long intv; unsigned long m_intv; free_running: intv = dirty_poll_interval(dirty, thresh); m_intv = ULONG_MAX; current->dirty_paused_when = now; current->nr_dirtied = 0; if (mdtc) m_intv = dirty_poll_interval(m_dirty, m_thresh); current->nr_dirtied_pause = min(intv, m_intv); break; } /* Start writeback even when in laptop mode */ if (unlikely(!writeback_in_progress(wb))) wb_start_background_writeback(wb); mem_cgroup_flush_foreign(wb); /* * Calculate global domain's pos_ratio and select the * global dtc by default. */ if (!strictlimit) { wb_dirty_limits(gdtc); if ((current->flags & PF_LOCAL_THROTTLE) && gdtc->wb_dirty < dirty_freerun_ceiling(gdtc->wb_thresh, gdtc->wb_bg_thresh)) /* * LOCAL_THROTTLE tasks must not be throttled * when below the per-wb freerun ceiling. */ goto free_running; } dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) && ((gdtc->dirty > gdtc->thresh) || strictlimit); wb_position_ratio(gdtc); sdtc = gdtc; if (mdtc) { /* * If memcg domain is in effect, calculate its * pos_ratio. @wb should satisfy constraints from * both global and memcg domains. Choose the one * w/ lower pos_ratio. */ if (!strictlimit) { wb_dirty_limits(mdtc); if ((current->flags & PF_LOCAL_THROTTLE) && mdtc->wb_dirty < dirty_freerun_ceiling(mdtc->wb_thresh, mdtc->wb_bg_thresh)) /* * LOCAL_THROTTLE tasks must not be * throttled when below the per-wb * freerun ceiling. */ goto free_running; } dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) && ((mdtc->dirty > mdtc->thresh) || strictlimit); wb_position_ratio(mdtc); if (mdtc->pos_ratio < gdtc->pos_ratio) sdtc = mdtc; } if (dirty_exceeded != wb->dirty_exceeded) wb->dirty_exceeded = dirty_exceeded; if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + BANDWIDTH_INTERVAL)) __wb_update_bandwidth(gdtc, mdtc, true); /* throttle according to the chosen dtc */ dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit); task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >> RATELIMIT_CALC_SHIFT; max_pause = wb_max_pause(wb, sdtc->wb_dirty); min_pause = wb_min_pause(wb, max_pause, task_ratelimit, dirty_ratelimit, &nr_dirtied_pause); if (unlikely(task_ratelimit == 0)) { period = max_pause; pause = max_pause; goto pause; } period = HZ * pages_dirtied / task_ratelimit; pause = period; if (current->dirty_paused_when) pause -= now - current->dirty_paused_when; /* * For less than 1s think time (ext3/4 may block the dirtier * for up to 800ms from time to time on 1-HDD; so does xfs, * however at much less frequency), try to compensate it in * future periods by updating the virtual time; otherwise just * do a reset, as it may be a light dirtier. */ if (pause < min_pause) { trace_balance_dirty_pages(wb, sdtc->thresh, sdtc->bg_thresh, sdtc->dirty, sdtc->wb_thresh, sdtc->wb_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, min(pause, 0L), start_time); if (pause < -HZ) { current->dirty_paused_when = now; current->nr_dirtied = 0; } else if (period) { current->dirty_paused_when += period; current->nr_dirtied = 0; } else if (current->nr_dirtied_pause <= pages_dirtied) current->nr_dirtied_pause += pages_dirtied; break; } if (unlikely(pause > max_pause)) { /* for occasional dropped task_ratelimit */ now += min(pause - max_pause, max_pause); pause = max_pause; } pause: trace_balance_dirty_pages(wb, sdtc->thresh, sdtc->bg_thresh, sdtc->dirty, sdtc->wb_thresh, sdtc->wb_dirty, dirty_ratelimit, task_ratelimit, pages_dirtied, period, pause, start_time); if (flags & BDP_ASYNC) { ret = -EAGAIN; break; } __set_current_state(TASK_KILLABLE); wb->dirty_sleep = now; io_schedule_timeout(pause); current->dirty_paused_when = now + pause; current->nr_dirtied = 0; current->nr_dirtied_pause = nr_dirtied_pause; /* * This is typically equal to (dirty < thresh) and can also * keep "1000+ dd on a slow USB stick" under control. */ if (task_ratelimit) break; /* * In the case of an unresponsive NFS server and the NFS dirty * pages exceeds dirty_thresh, give the other good wb's a pipe * to go through, so that tasks on them still remain responsive. * * In theory 1 page is enough to keep the consumer-producer * pipe going: the flusher cleans 1 page => the task dirties 1 * more page. However wb_dirty has accounting errors. So use * the larger and more IO friendly wb_stat_error. */ if (sdtc->wb_dirty <= wb_stat_error()) break; if (fatal_signal_pending(current)) break; } return ret; } static DEFINE_PER_CPU(int, bdp_ratelimits); /* * Normal tasks are throttled by * loop { * dirty tsk->nr_dirtied_pause pages; * take a snap in balance_dirty_pages(); * } * However there is a worst case. If every task exit immediately when dirtied * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be * called to throttle the page dirties. The solution is to save the not yet * throttled page dirties in dirty_throttle_leaks on task exit and charge them * randomly into the running tasks. This works well for the above worst case, * as the new task will pick up and accumulate the old task's leaked dirty * count and eventually get throttled. */ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; /** * balance_dirty_pages_ratelimited_flags - Balance dirty memory state. * @mapping: address_space which was dirtied. * @flags: BDP flags. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * See balance_dirty_pages_ratelimited() for details. * * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to * indicate that memory is out of balance and the caller must wait * for I/O to complete. Otherwise, it will return 0 to indicate * that either memory was already in balance, or it was able to sleep * until the amount of dirty memory returned to balance. */ int balance_dirty_pages_ratelimited_flags(struct address_space *mapping, unsigned int flags) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); struct bdi_writeback *wb = NULL; int ratelimit; int ret = 0; int *p; if (!(bdi->capabilities & BDI_CAP_WRITEBACK)) return ret; if (inode_cgwb_enabled(inode)) wb = wb_get_create_current(bdi, GFP_KERNEL); if (!wb) wb = &bdi->wb; ratelimit = current->nr_dirtied_pause; if (wb->dirty_exceeded) ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); preempt_disable(); /* * This prevents one CPU to accumulate too many dirtied pages without * calling into balance_dirty_pages(), which can happen when there are * 1000+ tasks, all of them start dirtying pages at exactly the same * time, hence all honoured too large initial task->nr_dirtied_pause. */ p = this_cpu_ptr(&bdp_ratelimits); if (unlikely(current->nr_dirtied >= ratelimit)) *p = 0; else if (unlikely(*p >= ratelimit_pages)) { *p = 0; ratelimit = 0; } /* * Pick up the dirtied pages by the exited tasks. This avoids lots of * short-lived tasks (eg. gcc invocations in a kernel build) escaping * the dirty throttling and livelock other long-run dirtiers. */ p = this_cpu_ptr(&dirty_throttle_leaks); if (*p > 0 && current->nr_dirtied < ratelimit) { unsigned long nr_pages_dirtied; nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); *p -= nr_pages_dirtied; current->nr_dirtied += nr_pages_dirtied; } preempt_enable(); if (unlikely(current->nr_dirtied >= ratelimit)) ret = balance_dirty_pages(wb, current->nr_dirtied, flags); wb_put(wb); return ret; } EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags); /** * balance_dirty_pages_ratelimited - balance dirty memory state. * @mapping: address_space which was dirtied. * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's * dirty state and will initiate writeback if needed. * * Once we're over the dirty memory limit we decrease the ratelimiting * by a lot, to prevent individual processes from overshooting the limit * by (ratelimit_pages) each. */ void balance_dirty_pages_ratelimited(struct address_space *mapping) { balance_dirty_pages_ratelimited_flags(mapping, 0); } EXPORT_SYMBOL(balance_dirty_pages_ratelimited); /** * wb_over_bg_thresh - does @wb need to be written back? * @wb: bdi_writeback of interest * * Determines whether background writeback should keep writing @wb or it's * clean enough. * * Return: %true if writeback should continue. */ bool wb_over_bg_thresh(struct bdi_writeback *wb) { struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; struct dirty_throttle_control * const gdtc = &gdtc_stor; struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ? &mdtc_stor : NULL; unsigned long reclaimable; unsigned long thresh; /* * Similar to balance_dirty_pages() but ignores pages being written * as we're trying to decide whether to put more under writeback. */ gdtc->avail = global_dirtyable_memory(); gdtc->dirty = global_node_page_state(NR_FILE_DIRTY); domain_dirty_limits(gdtc); if (gdtc->dirty > gdtc->bg_thresh) return true; thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh); if (thresh < 2 * wb_stat_error()) reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); else reclaimable = wb_stat(wb, WB_RECLAIMABLE); if (reclaimable > thresh) return true; if (mdtc) { unsigned long filepages, headroom, writeback; mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, &writeback); mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); /* ditto, ignore writeback */ if (mdtc->dirty > mdtc->bg_thresh) return true; thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh); if (thresh < 2 * wb_stat_error()) reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); else reclaimable = wb_stat(wb, WB_RECLAIMABLE); if (reclaimable > thresh) return true; } return false; } #ifdef CONFIG_SYSCTL /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { unsigned int old_interval = dirty_writeback_interval; int ret; ret = proc_dointvec(table, write, buffer, length, ppos); /* * Writing 0 to dirty_writeback_interval will disable periodic writeback * and a different non-zero value will wakeup the writeback threads. * wb_wakeup_delayed() would be more appropriate, but it's a pain to * iterate over all bdis and wbs. * The reason we do this is to make the change take effect immediately. */ if (!ret && write && dirty_writeback_interval && dirty_writeback_interval != old_interval) wakeup_flusher_threads(WB_REASON_PERIODIC); return ret; } #endif void laptop_mode_timer_fn(struct timer_list *t) { struct backing_dev_info *backing_dev_info = from_timer(backing_dev_info, t, laptop_mode_wb_timer); wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER); } /* * We've spun up the disk and we're in laptop mode: schedule writeback * of all dirty data a few seconds from now. If the flush is already scheduled * then push it back - the user is still using the disk. */ void laptop_io_completion(struct backing_dev_info *info) { mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); } /* * We're in laptop mode and we've just synced. The sync's writes will have * caused another writeback to be scheduled by laptop_io_completion. * Nothing needs to be written back anymore, so we unschedule the writeback. */ void laptop_sync_completion(void) { struct backing_dev_info *bdi; rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) del_timer(&bdi->laptop_mode_wb_timer); rcu_read_unlock(); } /* * If ratelimit_pages is too high then we can get into dirty-data overload * if a large number of processes all perform writes at the same time. * * Here we set ratelimit_pages to a level which ensures that when all CPUs are * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory * thresholds. */ void writeback_set_ratelimit(void) { struct wb_domain *dom = &global_wb_domain; unsigned long background_thresh; unsigned long dirty_thresh; global_dirty_limits(&background_thresh, &dirty_thresh); dom->dirty_limit = dirty_thresh; ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16; } static int page_writeback_cpu_online(unsigned int cpu) { writeback_set_ratelimit(); return 0; } #ifdef CONFIG_SYSCTL /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE; static struct ctl_table vm_page_writeback_sysctls[] = { { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = SYSCTL_LONG_ONE, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = (void *)&dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, {} }; #endif /* * Called early on to tune the page writeback dirty limits. * * We used to scale dirty pages according to how total memory * related to pages that could be allocated for buffers. * * However, that was when we used "dirty_ratio" to scale with * all memory, and we don't do that any more. "dirty_ratio" * is now applied to total non-HIGHPAGE memory, and as such we can't * get into the old insane situation any more where we had * large amounts of dirty pages compared to a small amount of * non-HIGHMEM memory. * * But we might still want to scale the dirty_ratio by how * much memory the box has.. */ void __init page_writeback_init(void) { BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online", page_writeback_cpu_online, NULL); cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL, page_writeback_cpu_online); #ifdef CONFIG_SYSCTL register_sysctl_init("vm", vm_page_writeback_sysctls); #endif } /** * tag_pages_for_writeback - tag pages to be written by write_cache_pages * @mapping: address space structure to write * @start: starting page index * @end: ending page index (inclusive) * * This function scans the page range from @start to @end (inclusive) and tags * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is * that write_cache_pages (or whoever calls this function) will then use * TOWRITE tag to identify pages eligible for writeback. This mechanism is * used to avoid livelocking of writeback by a process steadily creating new * dirty pages in the file (thus it is important for this function to be quick * so that it can tag pages faster than a dirtying process can create them). */ void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); unsigned int tagged = 0; void *page; xas_lock_irq(&xas); xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); if (++tagged % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); } EXPORT_SYMBOL(tag_pages_for_writeback); /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page * @data: data passed to writepage function * * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. * * To avoid livelocks (when other process dirties new pages), we first tag * pages which should be written back with TOWRITE tag and only then start * writing them. For data-integrity sync we have to be careful so that we do * not miss some pages (e.g., because some other process has cleared TOWRITE * tag we set). The rule we follow is that TOWRITE tag can be cleared only * by the process clearing the DIRTY tag (and submitting the page for IO). * * To avoid deadlocks between range_cyclic writeback and callers that hold * pages in PageWriteback to aggregate IO until write_cache_pages() returns, * we do not loop back to the start of the file. Doing so causes a page * lock/page writeback access order inversion - we should only ever lock * multiple pages in ascending page->index order, and looping back to the start * of the file violates that rule and causes deadlocks. * * Return: %0 on success, negative error code otherwise */ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data) { int ret = 0; int done = 0; int error; struct folio_batch fbatch; int nr_folios; pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; int range_whole = 0; xa_mark_t tag; folio_batch_init(&fbatch); if (wbc->range_cyclic) { index = mapping->writeback_index; /* prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; } if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) { tag_pages_for_writeback(mapping, index, end); tag = PAGECACHE_TAG_TOWRITE; } else { tag = PAGECACHE_TAG_DIRTY; } done_index = index; while (!done && (index <= end)) { int i; nr_folios = filemap_get_folios_tag(mapping, &index, end, tag, &fbatch); if (nr_folios == 0) break; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; unsigned long nr; done_index = folio->index; folio_lock(folio); /* * Page truncated or invalidated. We can freely skip it * then, even for data integrity operations: the page * has disappeared concurrently, so there could be no * real expectation of this data integrity operation * even if there is now a new, dirty page at the same * pagecache address. */ if (unlikely(folio->mapping != mapping)) { continue_unlock: folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } if (folio_test_writeback(folio)) { if (wbc->sync_mode != WB_SYNC_NONE) folio_wait_writeback(folio); else goto continue_unlock; } BUG_ON(folio_test_writeback(folio)); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); error = writepage(folio, wbc, data); nr = folio_nr_pages(folio); if (unlikely(error)) { /* * Handle errors according to the type of * writeback. There's no need to continue for * background writeback. Just push done_index * past this page so media errors won't choke * writeout for the entire file. For integrity * writeback, we must process the entire dirty * set regardless of errors because the fs may * still have state to clear for each page. In * that case we continue processing and return * the first error. */ if (error == AOP_WRITEPAGE_ACTIVATE) { folio_unlock(folio); error = 0; } else if (wbc->sync_mode != WB_SYNC_ALL) { ret = error; done_index = folio->index + nr; done = 1; break; } if (!ret) ret = error; } /* * We stop writing back only if we are not doing * integrity sync. In case of integrity sync we have to * keep going until we have written all the pages * we tagged for writeback prior to entering this loop. */ wbc->nr_to_write -= nr; if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) { done = 1; break; } } folio_batch_release(&fbatch); cond_resched(); } /* * If we hit the last page and there is more work to be done: wrap * back the index back to the start of the file for the next * time we are called. */ if (wbc->range_cyclic && !done) done_index = 0; if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = done_index; return ret; } EXPORT_SYMBOL(write_cache_pages); static int writepage_cb(struct folio *folio, struct writeback_control *wbc, void *data) { struct address_space *mapping = data; int ret = mapping->a_ops->writepage(&folio->page, wbc); mapping_set_error(mapping, ret); return ret; } int do_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; struct bdi_writeback *wb; if (wbc->nr_to_write <= 0) return 0; wb = inode_to_wb_wbc(mapping->host, wbc); wb_bandwidth_estimate_start(wb); while (1) { if (mapping->a_ops->writepages) { ret = mapping->a_ops->writepages(mapping, wbc); } else if (mapping->a_ops->writepage) { struct blk_plug plug; blk_start_plug(&plug); ret = write_cache_pages(mapping, wbc, writepage_cb, mapping); blk_finish_plug(&plug); } else { /* deal with chardevs and other special files */ ret = 0; } if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL) break; /* * Lacking an allocation context or the locality or writeback * state of any of the inode's pages, throttle based on * writeback activity on the local node. It's as good a * guess as any. */ reclaim_throttle(NODE_DATA(numa_node_id()), VMSCAN_THROTTLE_WRITEBACK); } /* * Usually few pages are written by now from those we've just submitted * but if there's constant writeback being submitted, this makes sure * writeback bandwidth is updated once in a while. */ if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + BANDWIDTH_INTERVAL)) wb_update_bandwidth(wb); return ret; } /* * For address_spaces which do not use buffers nor write back. */ bool noop_dirty_folio(struct address_space *mapping, struct folio *folio) { if (!folio_test_dirty(folio)) return !folio_test_set_dirty(folio); return false; } EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. * * Caller must hold folio_memcg_lock(). * * NOTE: This relies on being atomic wrt interrupts. */ static void folio_account_dirtied(struct folio *folio, struct address_space *mapping) { struct inode *inode = mapping->host; trace_writeback_dirty_folio(folio, mapping); if (mapping_can_writeback(mapping)) { struct bdi_writeback *wb; long nr = folio_nr_pages(folio); inode_attach_wb(inode, folio); wb = inode_to_wb(inode); __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr); __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); __node_stat_mod_folio(folio, NR_DIRTIED, nr); wb_stat_mod(wb, WB_RECLAIMABLE, nr); wb_stat_mod(wb, WB_DIRTIED, nr); task_io_account_write(nr * PAGE_SIZE); current->nr_dirtied += nr; __this_cpu_add(bdp_ratelimits, nr); mem_cgroup_track_foreign_dirty(folio, wb); } } /* * Helper function for deaccounting dirty page without writeback. * * Caller must hold folio_memcg_lock(). */ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { long nr = folio_nr_pages(folio); lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); wb_stat_mod(wb, WB_RECLAIMABLE, -nr); task_io_account_cancelled_write(nr * PAGE_SIZE); } /* * Mark the folio dirty, and set it dirty in the page cache, and mark * the inode dirty. * * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. * * The caller must hold folio_memcg_lock(). Most callers have the folio * locked. A few have the folio blocked from truncation through other * means (eg zap_vma_pages() has it mapped and is holding the page table * lock). This can also be called from mark_buffer_dirty(), which I * cannot prove is always protected against truncate. */ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, int warn) { unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); if (folio->mapping) { /* Race with truncate? */ WARN_ON_ONCE(warn && !folio_test_uptodate(folio)); folio_account_dirtied(folio, mapping); __xa_set_mark(&mapping->i_pages, folio_index(folio), PAGECACHE_TAG_DIRTY); } xa_unlock_irqrestore(&mapping->i_pages, flags); } /** * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads. * @mapping: Address space this folio belongs to. * @folio: Folio to be marked as dirty. * * Filesystems which do not use buffer heads should call this function * from their set_page_dirty address space operation. It ignores the * contents of folio_get_private(), so if the filesystem marks individual * blocks as dirty, the filesystem should handle that itself. * * This is also sometimes used by filesystems which use buffer_heads when * a single buffer is being dirtied: we want to set the folio dirty in * that case, but not all the buffers. This is a "bottom-up" dirtying, * whereas block_dirty_folio() is a "top-down" dirtying. * * The caller must ensure this doesn't race with truncation. Most will * simply hold the folio lock, but e.g. zap_pte_range() calls with the * folio mapped and the pte lock held, which also locks out truncation. */ bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio) { folio_memcg_lock(folio); if (folio_test_set_dirty(folio)) { folio_memcg_unlock(folio); return false; } __folio_mark_dirty(folio, mapping, !folio_test_private(folio)); folio_memcg_unlock(folio); if (mapping->host) { /* !PageAnon && !swapper_space */ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); } return true; } EXPORT_SYMBOL(filemap_dirty_folio); /** * folio_redirty_for_writepage - Decline to write a dirty folio. * @wbc: The writeback control. * @folio: The folio. * * When a writepage implementation decides that it doesn't want to write * @folio for some reason, it should call this function, unlock @folio and * return 0. * * Return: True if we redirtied the folio. False if someone else dirtied * it first. */ bool folio_redirty_for_writepage(struct writeback_control *wbc, struct folio *folio) { struct address_space *mapping = folio->mapping; long nr = folio_nr_pages(folio); bool ret; wbc->pages_skipped += nr; ret = filemap_dirty_folio(mapping, folio); if (mapping && mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; wb = unlocked_inode_to_wb_begin(inode, &cookie); current->nr_dirtied -= nr; node_stat_mod_folio(folio, NR_DIRTIED, -nr); wb_stat_mod(wb, WB_DIRTIED, -nr); unlocked_inode_to_wb_end(inode, &cookie); } return ret; } EXPORT_SYMBOL(folio_redirty_for_writepage); /** * folio_mark_dirty - Mark a folio as being modified. * @folio: The folio. * * The folio may not be truncated while this function is running. * Holding the folio lock is sufficient to prevent truncation, but some * callers cannot acquire a sleeping lock. These callers instead hold * the page table lock for a page table which contains at least one page * in this folio. Truncation will block on the page table lock as it * unmaps pages before removing the folio from its mapping. * * Return: True if the folio was newly dirtied, false if it was already dirty. */ bool folio_mark_dirty(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); if (likely(mapping)) { /* * readahead/folio_deactivate could remain * PG_readahead/PG_reclaim due to race with folio_end_writeback * About readahead, if the folio is written, the flags would be * reset. So no problem. * About folio_deactivate, if the folio is redirtied, * the flag will be reset. So no problem. but if the * folio is used by readahead it will confuse readahead * and make it restart the size rampup process. But it's * a trivial problem. */ if (folio_test_reclaim(folio)) folio_clear_reclaim(folio); return mapping->a_ops->dirty_folio(mapping, folio); } return noop_dirty_folio(mapping, folio); } EXPORT_SYMBOL(folio_mark_dirty); /* * set_page_dirty() is racy if the caller has no reference against * page->mapping->host, and if the page is unlocked. This is because another * CPU could truncate the page off the mapping and then free the mapping. * * Usually, the page _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * * In other cases, the page should be locked before running set_page_dirty(). */ int set_page_dirty_lock(struct page *page) { int ret; lock_page(page); ret = set_page_dirty(page); unlock_page(page); return ret; } EXPORT_SYMBOL(set_page_dirty_lock); /* * This cancels just the dirty bit on the kernel page itself, it does NOT * actually remove dirty bits on any mmap's that may be around. It also * leaves the page tagged dirty, so any sync activity will still find it on * the dirty lists, and in particular, clear_page_dirty_for_io() will still * look at the dirty bits in the VM. * * Doing this should *normally* only ever be done when a page is truncated, * and is not actually mapped anywhere at all. However, fs/buffer.c does * this when it notices that somebody has cleaned out all the buffers on a * page without actually doing it through the VM. Can you say "ext3 is * horribly ugly"? Thought you could. */ void __folio_cancel_dirty(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); if (mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; folio_memcg_lock(folio); wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) folio_account_cleaned(folio, wb); unlocked_inode_to_wb_end(inode, &cookie); folio_memcg_unlock(folio); } else { folio_clear_dirty(folio); } } EXPORT_SYMBOL(__folio_cancel_dirty); /* * Clear a folio's dirty flag, while caring for dirty memory accounting. * Returns true if the folio was previously dirty. * * This is for preparing to put the folio under writeout. We leave * the folio tagged as dirty in the xarray so that a concurrent * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk. * The ->writepage implementation will run either folio_start_writeback() * or folio_mark_dirty(), at which stage we bring the folio's dirty flag * and xarray dirty tag back into sync. * * This incoherency between the folio's dirty flag and xarray tag is * unfortunate, but it only exists while the folio is locked. */ bool folio_clear_dirty_for_io(struct folio *folio) { struct address_space *mapping = folio_mapping(folio); bool ret = false; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (mapping && mapping_can_writeback(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; struct wb_lock_cookie cookie = {}; /* * Yes, Virginia, this is indeed insane. * * We use this sequence to make sure that * (a) we account for dirty stats properly * (b) we tell the low-level filesystem to * mark the whole folio dirty if it was * dirty in a pagetable. Only to then * (c) clean the folio again and return 1 to * cause the writeback. * * This way we avoid all nasty races with the * dirty bit in multiple places and clearing * them concurrently from different threads. * * Note! Normally the "folio_mark_dirty(folio)" * has no effect on the actual dirty bit - since * that will already usually be set. But we * need the side effects, and it can help us * avoid races. * * We basically use the folio "master dirty bit" * as a serialization point for all the different * threads doing their things. */ if (folio_mkclean(folio)) folio_mark_dirty(folio); /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the folio dirty * at this point. We do this by having them hold the * page lock while dirtying the folio, and folios are * always locked coming in here, so we get the desired * exclusion. */ wb = unlocked_inode_to_wb_begin(inode, &cookie); if (folio_test_clear_dirty(folio)) { long nr = folio_nr_pages(folio); lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); wb_stat_mod(wb, WB_RECLAIMABLE, -nr); ret = true; } unlocked_inode_to_wb_end(inode, &cookie); return ret; } return folio_test_clear_dirty(folio); } EXPORT_SYMBOL(folio_clear_dirty_for_io); static void wb_inode_writeback_start(struct bdi_writeback *wb) { atomic_inc(&wb->writeback_inodes); } static void wb_inode_writeback_end(struct bdi_writeback *wb) { unsigned long flags; atomic_dec(&wb->writeback_inodes); /* * Make sure estimate of writeback throughput gets updated after * writeback completed. We delay the update by BANDWIDTH_INTERVAL * (which is the interval other bandwidth updates use for batching) so * that if multiple inodes end writeback at a similar time, they get * batched into one bandwidth update. */ spin_lock_irqsave(&wb->work_lock, flags); if (test_bit(WB_registered, &wb->state)) queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); spin_unlock_irqrestore(&wb->work_lock, flags); } bool __folio_end_writeback(struct folio *folio) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); bool ret; folio_memcg_lock(folio); if (mapping && mapping_use_writeback_tags(mapping)) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); ret = folio_test_clear_writeback(folio); if (ret) { __xa_clear_mark(&mapping->i_pages, folio_index(folio), PAGECACHE_TAG_WRITEBACK); if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { struct bdi_writeback *wb = inode_to_wb(inode); wb_stat_mod(wb, WB_WRITEBACK, -nr); __wb_writeout_add(wb, nr); if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) wb_inode_writeback_end(wb); } } if (mapping->host && !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) sb_clear_inode_writeback(mapping->host); xa_unlock_irqrestore(&mapping->i_pages, flags); } else { ret = folio_test_clear_writeback(folio); } if (ret) { lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); node_stat_mod_folio(folio, NR_WRITTEN, nr); } folio_memcg_unlock(folio); return ret; } bool __folio_start_writeback(struct folio *folio, bool keep_write) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); bool ret; int access_ret; folio_memcg_lock(folio); if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio_index(folio)); struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); unsigned long flags; xas_lock_irqsave(&xas, flags); xas_load(&xas); ret = folio_test_set_writeback(folio); if (!ret) { bool on_wblist; on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { struct bdi_writeback *wb = inode_to_wb(inode); wb_stat_mod(wb, WB_WRITEBACK, nr); if (!on_wblist) wb_inode_writeback_start(wb); } /* * We can come through here when swapping * anonymous folios, so we don't necessarily * have an inode to track for sync. */ if (mapping->host && !on_wblist) sb_mark_inode_writeback(mapping->host); } if (!folio_test_dirty(folio)) xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); if (!keep_write) xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irqrestore(&xas, flags); } else { ret = folio_test_set_writeback(folio); } if (!ret) { lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); } folio_memcg_unlock(folio); access_ret = arch_make_folio_accessible(folio); /* * If writeback has been triggered on a page that cannot be made * accessible, it is too late to recover here. */ VM_BUG_ON_FOLIO(access_ret != 0, folio); return ret; } EXPORT_SYMBOL(__folio_start_writeback); /** * folio_wait_writeback - Wait for a folio to finish writeback. * @folio: The folio to wait for. * * If the folio is currently being written back to storage, wait for the * I/O to complete. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. */ void folio_wait_writeback(struct folio *folio) { while (folio_test_writeback(folio)) { trace_folio_wait_writeback(folio, folio_mapping(folio)); folio_wait_bit(folio, PG_writeback); } } EXPORT_SYMBOL_GPL(folio_wait_writeback); /** * folio_wait_writeback_killable - Wait for a folio to finish writeback. * @folio: The folio to wait for. * * If the folio is currently being written back to storage, wait for the * I/O to complete or a fatal signal to arrive. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. * Return: 0 on success, -EINTR if we get a fatal signal while waiting. */ int folio_wait_writeback_killable(struct folio *folio) { while (folio_test_writeback(folio)) { trace_folio_wait_writeback(folio, folio_mapping(folio)); if (folio_wait_bit_killable(folio, PG_writeback)) return -EINTR; } return 0; } EXPORT_SYMBOL_GPL(folio_wait_writeback_killable); /** * folio_wait_stable() - wait for writeback to finish, if necessary. * @folio: The folio to wait on. * * This function determines if the given folio is related to a backing * device that requires folio contents to be held stable during writeback. * If so, then it will wait for any pending writeback to complete. * * Context: Sleeps. Must be called in process context and with * no spinlocks held. Caller should hold a reference on the folio. * If the folio is not locked, writeback may start again after writeback * has finished. */ void folio_wait_stable(struct folio *folio) { if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES) folio_wait_writeback(folio); } EXPORT_SYMBOL_GPL(folio_wait_stable);
linux-master
mm/page-writeback.c
// SPDX-License-Identifier: GPL-2.0 /* * Device Memory Migration functionality. * * Originally written by Jérôme Glisse. */ #include <linux/export.h> #include <linux/memremap.h> #include <linux/migrate.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/oom.h> #include <linux/pagewalk.h> #include <linux/rmap.h> #include <linux/swapops.h> #include <asm/tlbflush.h> #include "internal.h" static int migrate_vma_collect_skip(unsigned long start, unsigned long end, struct mm_walk *walk) { struct migrate_vma *migrate = walk->private; unsigned long addr; for (addr = start; addr < end; addr += PAGE_SIZE) { migrate->dst[migrate->npages] = 0; migrate->src[migrate->npages++] = 0; } return 0; } static int migrate_vma_collect_hole(unsigned long start, unsigned long end, __always_unused int depth, struct mm_walk *walk) { struct migrate_vma *migrate = walk->private; unsigned long addr; /* Only allow populating anonymous memory. */ if (!vma_is_anonymous(walk->vma)) return migrate_vma_collect_skip(start, end, walk); for (addr = start; addr < end; addr += PAGE_SIZE) { migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; migrate->dst[migrate->npages] = 0; migrate->npages++; migrate->cpages++; } return 0; } static int migrate_vma_collect_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, struct mm_walk *walk) { struct migrate_vma *migrate = walk->private; struct vm_area_struct *vma = walk->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = start, unmapped = 0; spinlock_t *ptl; pte_t *ptep; again: if (pmd_none(*pmdp)) return migrate_vma_collect_hole(start, end, -1, walk); if (pmd_trans_huge(*pmdp)) { struct page *page; ptl = pmd_lock(mm, pmdp); if (unlikely(!pmd_trans_huge(*pmdp))) { spin_unlock(ptl); goto again; } page = pmd_page(*pmdp); if (is_huge_zero_page(page)) { spin_unlock(ptl); split_huge_pmd(vma, pmdp, addr); } else { int ret; get_page(page); spin_unlock(ptl); if (unlikely(!trylock_page(page))) return migrate_vma_collect_skip(start, end, walk); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return migrate_vma_collect_skip(start, end, walk); } } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); if (!ptep) goto again; arch_enter_lazy_mmu_mode(); for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn = 0, pfn; struct page *page; swp_entry_t entry; pte_t pte; pte = ptep_get(ptep); if (pte_none(pte)) { if (vma_is_anonymous(vma)) { mpfn = MIGRATE_PFN_MIGRATE; migrate->cpages++; } goto next; } if (!pte_present(pte)) { /* * Only care about unaddressable device page special * page table entry. Other special swap entries are not * migratable, and we ignore regular swapped page. */ entry = pte_to_swp_entry(pte); if (!is_device_private_entry(entry)) goto next; page = pfn_swap_entry_to_page(entry); if (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_PRIVATE) || page->pgmap->owner != migrate->pgmap_owner) goto next; mpfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; if (is_writable_device_private_entry(entry)) mpfn |= MIGRATE_PFN_WRITE; } else { pfn = pte_pfn(pte); if (is_zero_pfn(pfn) && (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) { mpfn = MIGRATE_PFN_MIGRATE; migrate->cpages++; goto next; } page = vm_normal_page(migrate->vma, addr, pte); if (page && !is_zone_device_page(page) && !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) goto next; else if (page && is_device_coherent_page(page) && (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) || page->pgmap->owner != migrate->pgmap_owner)) goto next; mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; } /* FIXME support THP */ if (!page || !page->mapping || PageTransCompound(page)) { mpfn = 0; goto next; } /* * By getting a reference on the page we pin it and that blocks * any kind of migration. Side effect is that it "freezes" the * pte. * * We drop this reference after isolating the page from the lru * for non device page (device page are not on the lru and thus * can't be dropped from it). */ get_page(page); /* * We rely on trylock_page() to avoid deadlock between * concurrent migrations where each is waiting on the others * page lock. If we can't immediately lock the page we fail this * migration as it is only best effort anyway. * * If we can lock the page it's safe to set up a migration entry * now. In the common case where the page is mapped once in a * single process setting up the migration entry now is an * optimisation to avoid walking the rmap later with * try_to_migrate(). */ if (trylock_page(page)) { bool anon_exclusive; pte_t swp_pte; flush_cache_page(vma, addr, pte_pfn(pte)); anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); if (page_try_share_anon_rmap(page)) { set_pte_at(mm, addr, ptep, pte); unlock_page(page); put_page(page); mpfn = 0; goto next; } } else { pte = ptep_get_and_clear(mm, addr, ptep); } migrate->cpages++; /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pte)) folio_mark_dirty(page_folio(page)); /* Setup special migration page table entry */ if (mpfn & MIGRATE_PFN_WRITE) entry = make_writable_migration_entry( page_to_pfn(page)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry( page_to_pfn(page)); else entry = make_readable_migration_entry( page_to_pfn(page)); if (pte_present(pte)) { if (pte_young(pte)) entry = make_migration_entry_young(entry); if (pte_dirty(pte)) entry = make_migration_entry_dirty(entry); } swp_pte = swp_entry_to_pte(entry); if (pte_present(pte)) { if (pte_soft_dirty(pte)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pte)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } else { if (pte_swp_soft_dirty(pte)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pte)) swp_pte = pte_swp_mkuffd_wp(swp_pte); } set_pte_at(mm, addr, ptep, swp_pte); /* * This is like regular unmap: we remove the rmap and * drop page refcount. Page won't be freed, as we took * a reference just above. */ page_remove_rmap(page, vma, false); put_page(page); if (pte_present(pte)) unmapped++; } else { put_page(page); mpfn = 0; } next: migrate->dst[migrate->npages] = 0; migrate->src[migrate->npages++] = mpfn; } /* Only flush the TLB if we actually modified any entries */ if (unmapped) flush_tlb_range(walk->vma, start, end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(ptep - 1, ptl); return 0; } static const struct mm_walk_ops migrate_vma_walk_ops = { .pmd_entry = migrate_vma_collect_pmd, .pte_hole = migrate_vma_collect_hole, .walk_lock = PGWALK_RDLOCK, }; /* * migrate_vma_collect() - collect pages over a range of virtual addresses * @migrate: migrate struct containing all migration information * * This will walk the CPU page table. For each virtual address backed by a * valid page, it updates the src array and takes a reference on the page, in * order to pin the page until we lock it and unmap it. */ static void migrate_vma_collect(struct migrate_vma *migrate) { struct mmu_notifier_range range; /* * Note that the pgmap_owner is passed to the mmu notifier callback so * that the registered device driver can skip invalidating device * private page mappings that won't be migrated. */ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma->vm_mm, migrate->start, migrate->end, migrate->pgmap_owner); mmu_notifier_invalidate_range_start(&range); walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, &migrate_vma_walk_ops, migrate); mmu_notifier_invalidate_range_end(&range); migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); } /* * migrate_vma_check_page() - check if page is pinned or not * @page: struct page to check * * Pinned pages cannot be migrated. This is the same test as in * folio_migrate_mapping(), except that here we allow migration of a * ZONE_DEVICE page. */ static bool migrate_vma_check_page(struct page *page, struct page *fault_page) { /* * One extra ref because caller holds an extra reference, either from * isolate_lru_page() for a regular page, or migrate_vma_collect() for * a device page. */ int extra = 1 + (page == fault_page); /* * FIXME support THP (transparent huge page), it is bit more complex to * check them than regular pages, because they can be mapped with a pmd * or with a pte (split pte mapping). */ if (PageCompound(page)) return false; /* Page from ZONE_DEVICE have one extra reference */ if (is_zone_device_page(page)) extra++; /* For file back page */ if (page_mapping(page)) extra += 1 + page_has_private(page); if ((page_count(page) - extra) > page_mapcount(page)) return false; return true; } /* * Unmaps pages for migration. Returns number of source pfns marked as * migrating. */ static unsigned long migrate_device_unmap(unsigned long *src_pfns, unsigned long npages, struct page *fault_page) { unsigned long i, restore = 0; bool allow_drain = true; unsigned long unmapped = 0; lru_add_drain(); for (i = 0; i < npages; i++) { struct page *page = migrate_pfn_to_page(src_pfns[i]); struct folio *folio; if (!page) { if (src_pfns[i] & MIGRATE_PFN_MIGRATE) unmapped++; continue; } /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { /* Drain CPU's lru cache */ lru_add_drain_all(); allow_drain = false; } if (!isolate_lru_page(page)) { src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; restore++; continue; } /* Drop the reference we took in collect */ put_page(page); } folio = page_folio(page); if (folio_mapped(folio)) try_to_migrate(folio, 0); if (page_mapped(page) || !migrate_vma_check_page(page, fault_page)) { if (!is_zone_device_page(page)) { get_page(page); putback_lru_page(page); } src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; restore++; continue; } unmapped++; } for (i = 0; i < npages && restore; i++) { struct page *page = migrate_pfn_to_page(src_pfns[i]); struct folio *folio; if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) continue; folio = page_folio(page); remove_migration_ptes(folio, folio, false); src_pfns[i] = 0; folio_unlock(folio); folio_put(folio); restore--; } return unmapped; } /* * migrate_vma_unmap() - replace page mapping with special migration pte entry * @migrate: migrate struct containing all migration information * * Isolate pages from the LRU and replace mappings (CPU page table pte) with a * special migration pte entry and check if it has been pinned. Pinned pages are * restored because we cannot migrate them. * * This is the last step before we call the device driver callback to allocate * destination memory and copy contents of original page over to new page. */ static void migrate_vma_unmap(struct migrate_vma *migrate) { migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages, migrate->fault_page); } /** * migrate_vma_setup() - prepare to migrate a range of memory * @args: contains the vma, start, and pfns arrays for the migration * * Returns: negative errno on failures, 0 when 0 or more pages were migrated * without an error. * * Prepare to migrate a range of memory virtual address range by collecting all * the pages backing each virtual address in the range, saving them inside the * src array. Then lock those pages and unmap them. Once the pages are locked * and unmapped, check whether each page is pinned or not. Pages that aren't * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the * corresponding src array entry. Then restores any pages that are pinned, by * remapping and unlocking those pages. * * The caller should then allocate destination memory and copy source memory to * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * flag set). Once these are allocated and copied, the caller must update each * corresponding entry in the dst array with the pfn value of the destination * page and with MIGRATE_PFN_VALID. Destination pages must be locked via * lock_page(). * * Note that the caller does not have to migrate all the pages that are marked * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from * device memory to system memory. If the caller cannot migrate a device page * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe * consequences for the userspace process, so it must be avoided if at all * possible. * * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus * allowing the caller to allocate device memory for those unbacked virtual * addresses. For this the caller simply has to allocate device memory and * properly set the destination entry like for regular migration. Note that * this can still fail, and thus inside the device driver you must check if the * migration was successful for those entries after calling migrate_vma_pages(), * just like for regular migration. * * After that, the callers must call migrate_vma_pages() to go over each entry * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set, * then migrate_vma_pages() to migrate struct page information from the source * struct page to the destination struct page. If it fails to migrate the * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the * src array. * * At this point all successfully migrated pages have an entry in the src * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst * array entry with MIGRATE_PFN_VALID flag set. * * Once migrate_vma_pages() returns the caller may inspect which pages were * successfully migrated, and which were not. Successfully migrated pages will * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. * * It is safe to update device page table after migrate_vma_pages() because * both destination and source page are still locked, and the mmap_lock is held * in read mode (hence no one can unmap the range being migrated). * * Once the caller is done cleaning up things and updating its page table (if it * chose to do so, this is not an obligation) it finally calls * migrate_vma_finalize() to update the CPU page table to point to new pages * for successfully migrated pages or otherwise restore the CPU page table to * point to the original source pages. */ int migrate_vma_setup(struct migrate_vma *args) { long nr_pages = (args->end - args->start) >> PAGE_SHIFT; args->start &= PAGE_MASK; args->end &= PAGE_MASK; if (!args->vma || is_vm_hugetlb_page(args->vma) || (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) return -EINVAL; if (nr_pages <= 0) return -EINVAL; if (args->start < args->vma->vm_start || args->start >= args->vma->vm_end) return -EINVAL; if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) return -EINVAL; if (!args->src || !args->dst) return -EINVAL; if (args->fault_page && !is_device_private_page(args->fault_page)) return -EINVAL; memset(args->src, 0, sizeof(*args->src) * nr_pages); args->cpages = 0; args->npages = 0; migrate_vma_collect(args); if (args->cpages) migrate_vma_unmap(args); /* * At this point pages are locked and unmapped, and thus they have * stable content and can safely be copied to destination memory that * is allocated by the drivers. */ return 0; } EXPORT_SYMBOL(migrate_vma_setup); /* * This code closely matches the code in: * __handle_mm_fault() * handle_pte_fault() * do_anonymous_page() * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE * private or coherent page. */ static void migrate_vma_insert_page(struct migrate_vma *migrate, unsigned long addr, struct page *page, unsigned long *src) { struct vm_area_struct *vma = migrate->vma; struct mm_struct *mm = vma->vm_mm; bool flush = false; spinlock_t *ptl; pte_t entry; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pte_t orig_pte; /* Only allow populating anonymous memory */ if (!vma_is_anonymous(vma)) goto abort; pgdp = pgd_offset(mm, addr); p4dp = p4d_alloc(mm, pgdp, addr); if (!p4dp) goto abort; pudp = pud_alloc(mm, p4dp, addr); if (!pudp) goto abort; pmdp = pmd_alloc(mm, pudp, addr); if (!pmdp) goto abort; if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) goto abort; if (pte_alloc(mm, pmdp)) goto abort; if (unlikely(anon_vma_prepare(vma))) goto abort; if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) goto abort; /* * The memory barrier inside __SetPageUptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); if (is_device_private_page(page)) { swp_entry_t swp_entry; if (vma->vm_flags & VM_WRITE) swp_entry = make_writable_device_private_entry( page_to_pfn(page)); else swp_entry = make_readable_device_private_entry( page_to_pfn(page)); entry = swp_entry_to_pte(swp_entry); } else { if (is_zone_device_page(page) && !is_device_coherent_page(page)) { pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); goto abort; } entry = mk_pte(page, vma->vm_page_prot); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry), vma); } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); if (!ptep) goto abort; orig_pte = ptep_get(ptep); if (check_stable_address_space(mm)) goto unlock_abort; if (pte_present(orig_pte)) { unsigned long pfn = pte_pfn(orig_pte); if (!is_zero_pfn(pfn)) goto unlock_abort; flush = true; } else if (!pte_none(orig_pte)) goto unlock_abort; /* * Check for userfaultfd but do not deliver the fault. Instead, * just back off. */ if (userfaultfd_missing(vma)) goto unlock_abort; inc_mm_counter(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, addr); if (!is_zone_device_page(page)) lru_cache_add_inactive_or_unevictable(page, vma); get_page(page); if (flush) { flush_cache_page(vma, addr, pte_pfn(orig_pte)); ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, entry); update_mmu_cache(vma, addr, ptep); } else { /* No need to invalidate - it was non-present before */ set_pte_at(mm, addr, ptep, entry); update_mmu_cache(vma, addr, ptep); } pte_unmap_unlock(ptep, ptl); *src = MIGRATE_PFN_MIGRATE; return; unlock_abort: pte_unmap_unlock(ptep, ptl); abort: *src &= ~MIGRATE_PFN_MIGRATE; } static void __migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages, struct migrate_vma *migrate) { struct mmu_notifier_range range; unsigned long i; bool notified = false; for (i = 0; i < npages; i++) { struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); struct page *page = migrate_pfn_to_page(src_pfns[i]); struct address_space *mapping; int r; if (!newpage) { src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; continue; } if (!page) { unsigned long addr; if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE)) continue; /* * The only time there is no vma is when called from * migrate_device_coherent_page(). However this isn't * called if the page could not be unmapped. */ VM_BUG_ON(!migrate); addr = migrate->start + i*PAGE_SIZE; if (!notified) { notified = true; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0, migrate->vma->vm_mm, addr, migrate->end, migrate->pgmap_owner); mmu_notifier_invalidate_range_start(&range); } migrate_vma_insert_page(migrate, addr, newpage, &src_pfns[i]); continue; } mapping = page_mapping(page); if (is_device_private_page(newpage) || is_device_coherent_page(newpage)) { if (mapping) { struct folio *folio; folio = page_folio(page); /* * For now only support anonymous memory migrating to * device private or coherent memory. * * Try to get rid of swap cache if possible. */ if (!folio_test_anon(folio) || !folio_free_swap(folio)) { src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; continue; } } } else if (is_zone_device_page(newpage)) { /* * Other types of ZONE_DEVICE page are not supported. */ src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; continue; } if (migrate && migrate->fault_page == page) r = migrate_folio_extra(mapping, page_folio(newpage), page_folio(page), MIGRATE_SYNC_NO_COPY, 1); else r = migrate_folio(mapping, page_folio(newpage), page_folio(page), MIGRATE_SYNC_NO_COPY); if (r != MIGRATEPAGE_SUCCESS) src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; } if (notified) mmu_notifier_invalidate_range_end(&range); } /** * migrate_device_pages() - migrate meta-data from src page to dst page * @src_pfns: src_pfns returned from migrate_device_range() * @dst_pfns: array of pfns allocated by the driver to migrate memory to * @npages: number of pages in the range * * Equivalent to migrate_vma_pages(). This is called to migrate struct page * meta-data from source struct page to destination. */ void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) { __migrate_device_pages(src_pfns, dst_pfns, npages, NULL); } EXPORT_SYMBOL(migrate_device_pages); /** * migrate_vma_pages() - migrate meta-data from src page to dst page * @migrate: migrate struct containing all migration information * * This migrates struct page meta-data from source struct page to destination * struct page. This effectively finishes the migration from source page to the * destination page. */ void migrate_vma_pages(struct migrate_vma *migrate) { __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate); } EXPORT_SYMBOL(migrate_vma_pages); /* * migrate_device_finalize() - complete page migration * @src_pfns: src_pfns returned from migrate_device_range() * @dst_pfns: array of pfns allocated by the driver to migrate memory to * @npages: number of pages in the range * * Completes migration of the page by removing special migration entries. * Drivers must ensure copying of page data is complete and visible to the CPU * before calling this. */ void migrate_device_finalize(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) { unsigned long i; for (i = 0; i < npages; i++) { struct folio *dst, *src; struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); struct page *page = migrate_pfn_to_page(src_pfns[i]); if (!page) { if (newpage) { unlock_page(newpage); put_page(newpage); } continue; } if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) { if (newpage) { unlock_page(newpage); put_page(newpage); } newpage = page; } src = page_folio(page); dst = page_folio(newpage); remove_migration_ptes(src, dst, false); folio_unlock(src); if (is_zone_device_page(page)) put_page(page); else putback_lru_page(page); if (newpage != page) { unlock_page(newpage); if (is_zone_device_page(newpage)) put_page(newpage); else putback_lru_page(newpage); } } } EXPORT_SYMBOL(migrate_device_finalize); /** * migrate_vma_finalize() - restore CPU page table entry * @migrate: migrate struct containing all migration information * * This replaces the special migration pte entry with either a mapping to the * new page if migration was successful for that page, or to the original page * otherwise. * * This also unlocks the pages and puts them back on the lru, or drops the extra * refcount, for device pages. */ void migrate_vma_finalize(struct migrate_vma *migrate) { migrate_device_finalize(migrate->src, migrate->dst, migrate->npages); } EXPORT_SYMBOL(migrate_vma_finalize); /** * migrate_device_range() - migrate device private pfns to normal memory. * @src_pfns: array large enough to hold migrating source device private pfns. * @start: starting pfn in the range to migrate. * @npages: number of pages to migrate. * * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that * instead of looking up pages based on virtual address mappings a range of * device pfns that should be migrated to system memory is used instead. * * This is useful when a driver needs to free device memory but doesn't know the * virtual mappings of every page that may be in device memory. For example this * is often the case when a driver is being unloaded or unbound from a device. * * Like migrate_vma_setup() this function will take a reference and lock any * migrating pages that aren't free before unmapping them. Drivers may then * allocate destination pages and start copying data from the device to CPU * memory before calling migrate_device_pages(). */ int migrate_device_range(unsigned long *src_pfns, unsigned long start, unsigned long npages) { unsigned long i, pfn; for (pfn = start, i = 0; i < npages; pfn++, i++) { struct page *page = pfn_to_page(pfn); if (!get_page_unless_zero(page)) { src_pfns[i] = 0; continue; } if (!trylock_page(page)) { src_pfns[i] = 0; put_page(page); continue; } src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; } migrate_device_unmap(src_pfns, npages, NULL); return 0; } EXPORT_SYMBOL(migrate_device_range); /* * Migrate a device coherent page back to normal memory. The caller should have * a reference on page which will be copied to the new page if migration is * successful or dropped on failure. */ int migrate_device_coherent_page(struct page *page) { unsigned long src_pfn, dst_pfn = 0; struct page *dpage; WARN_ON_ONCE(PageCompound(page)); lock_page(page); src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; /* * We don't have a VMA and don't need to walk the page tables to find * the source page. So call migrate_vma_unmap() directly to unmap the * page as migrate_vma_setup() will fail if args.vma == NULL. */ migrate_device_unmap(&src_pfn, 1, NULL); if (!(src_pfn & MIGRATE_PFN_MIGRATE)) return -EBUSY; dpage = alloc_page(GFP_USER | __GFP_NOWARN); if (dpage) { lock_page(dpage); dst_pfn = migrate_pfn(page_to_pfn(dpage)); } migrate_device_pages(&src_pfn, &dst_pfn, 1); if (src_pfn & MIGRATE_PFN_MIGRATE) copy_highpage(dpage, page); migrate_device_finalize(&src_pfn, &dst_pfn, 1); if (src_pfn & MIGRATE_PFN_MIGRATE) return 0; return -EBUSY; }
linux-master
mm/migrate_device.c
// SPDX-License-Identifier: GPL-2.0 // Copyright(c) 2018 Intel Corporation. All rights reserved. #include <linux/mm.h> #include <linux/init.h> #include <linux/mmzone.h> #include <linux/random.h> #include <linux/moduleparam.h> #include "internal.h" #include "shuffle.h" DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key); static bool shuffle_param; static __meminit int shuffle_param_set(const char *val, const struct kernel_param *kp) { if (param_set_bool(val, kp)) return -EINVAL; if (*(bool *)kp->arg) static_branch_enable(&page_alloc_shuffle_key); return 0; } static const struct kernel_param_ops shuffle_param_ops = { .set = shuffle_param_set, .get = param_get_bool, }; module_param_cb(shuffle, &shuffle_param_ops, &shuffle_param, 0400); /* * For two pages to be swapped in the shuffle, they must be free (on a * 'free_area' lru), have the same order, and have the same migratetype. */ static struct page * __meminit shuffle_valid_page(struct zone *zone, unsigned long pfn, int order) { struct page *page = pfn_to_online_page(pfn); /* * Given we're dealing with randomly selected pfns in a zone we * need to ask questions like... */ /* ... is the page managed by the buddy? */ if (!page) return NULL; /* ... is the page assigned to the same zone? */ if (page_zone(page) != zone) return NULL; /* ...is the page free and currently on a free_area list? */ if (!PageBuddy(page)) return NULL; /* * ...is the page on the same list as the page we will * shuffle it with? */ if (buddy_order(page) != order) return NULL; return page; } /* * Fisher-Yates shuffle the freelist which prescribes iterating through an * array, pfns in this case, and randomly swapping each entry with another in * the span, end_pfn - start_pfn. * * To keep the implementation simple it does not attempt to correct for sources * of bias in the distribution, like modulo bias or pseudo-random number * generator bias. I.e. the expectation is that this shuffling raises the bar * for attacks that exploit the predictability of page allocations, but need not * be a perfect shuffle. */ #define SHUFFLE_RETRY 10 void __meminit __shuffle_zone(struct zone *z) { unsigned long i, flags; unsigned long start_pfn = z->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(z); const int order = SHUFFLE_ORDER; const int order_pages = 1 << order; spin_lock_irqsave(&z->lock, flags); start_pfn = ALIGN(start_pfn, order_pages); for (i = start_pfn; i < end_pfn; i += order_pages) { unsigned long j; int migratetype, retry; struct page *page_i, *page_j; /* * We expect page_i, in the sub-range of a zone being added * (@start_pfn to @end_pfn), to more likely be valid compared to * page_j randomly selected in the span @zone_start_pfn to * @spanned_pages. */ page_i = shuffle_valid_page(z, i, order); if (!page_i) continue; for (retry = 0; retry < SHUFFLE_RETRY; retry++) { /* * Pick a random order aligned page in the zone span as * a swap target. If the selected pfn is a hole, retry * up to SHUFFLE_RETRY attempts find a random valid pfn * in the zone. */ j = z->zone_start_pfn + ALIGN_DOWN(get_random_long() % z->spanned_pages, order_pages); page_j = shuffle_valid_page(z, j, order); if (page_j && page_j != page_i) break; } if (retry >= SHUFFLE_RETRY) { pr_debug("%s: failed to swap %#lx\n", __func__, i); continue; } /* * Each migratetype corresponds to its own list, make sure the * types match otherwise we're moving pages to lists where they * do not belong. */ migratetype = get_pageblock_migratetype(page_i); if (get_pageblock_migratetype(page_j) != migratetype) { pr_debug("%s: migratetype mismatch %#lx\n", __func__, i); continue; } list_swap(&page_i->lru, &page_j->lru); pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j); /* take it easy on the zone lock */ if ((i % (100 * order_pages)) == 0) { spin_unlock_irqrestore(&z->lock, flags); cond_resched(); spin_lock_irqsave(&z->lock, flags); } } spin_unlock_irqrestore(&z->lock, flags); } /* * __shuffle_free_memory - reduce the predictability of the page allocator * @pgdat: node page data */ void __meminit __shuffle_free_memory(pg_data_t *pgdat) { struct zone *z; for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) shuffle_zone(z); } bool shuffle_pick_tail(void) { static u64 rand; static u8 rand_bits; bool ret; /* * The lack of locking is deliberate. If 2 threads race to * update the rand state it just adds to the entropy. */ if (rand_bits == 0) { rand_bits = 64; rand = get_random_u64(); } ret = rand & 1; rand_bits--; rand >>= 1; return ret; }
linux-master
mm/shuffle.c
// SPDX-License-Identifier: GPL-2.0 /* * Bootmem core functions. * * Copyright (c) 2020, Bytedance. * * Author: Muchun Song <[email protected]> * */ #include <linux/mm.h> #include <linux/compiler.h> #include <linux/memblock.h> #include <linux/bootmem_info.h> #include <linux/memory_hotplug.h> #include <linux/kmemleak.h> void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) { page->index = type; SetPagePrivate(page); set_page_private(page, info); page_ref_inc(page); } void put_page_bootmem(struct page *page) { unsigned long type = page->index; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); if (page_ref_dec_return(page) == 1) { page->index = 0; ClearPagePrivate(page); set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); kmemleak_free_part(page_to_virt(page), PAGE_SIZE); free_reserved_page(page); } } #ifndef CONFIG_SPARSEMEM_VMEMMAP static void __init register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; struct mem_section_usage *usage; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); /* Get section's memmap address */ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); /* * Get page for the memmap's phys address * XXX: need more consideration for sparse_vmemmap... */ page = virt_to_page(memmap); mapsize = sizeof(struct page) * PAGES_PER_SECTION; mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; /* remember memmap's page */ for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, SECTION_INFO); usage = ms->usage; page = virt_to_page(usage); mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #else /* CONFIG_SPARSEMEM_VMEMMAP */ static void __init register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; struct mem_section_usage *usage; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); usage = ms->usage; page = virt_to_page(usage); mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ void __init register_page_bootmem_info_node(struct pglist_data *pgdat) { unsigned long i, pfn, end_pfn, nr_pages; int node = pgdat->node_id; struct page *page; nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; page = virt_to_page(pgdat); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); pfn = pgdat->node_start_pfn; end_pfn = pgdat_end_pfn(pgdat); /* register section info */ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { /* * Some platforms can assign the same pfn to multiple nodes - on * node0 as well as nodeN. To avoid registering a pfn against * multiple nodes we check that this pfn does not already * reside in some other nodes. */ if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) register_page_bootmem_info_section(pfn); } }
linux-master
mm/bootmem_info.c
#include <linux/gfp.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/mmdebug.h> #include <linux/mm_types.h> #include <linux/mm_inline.h> #include <linux/pagemap.h> #include <linux/rcupdate.h> #include <linux/smp.h> #include <linux/swap.h> #include <linux/rmap.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #ifndef CONFIG_MMU_GATHER_NO_GATHER static bool tlb_next_batch(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; /* Limit batching if we have delayed rmaps pending */ if (tlb->delayed_rmap && tlb->active != &tlb->local) return false; batch = tlb->active; if (batch->next) { tlb->active = batch->next; return true; } if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) return false; batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (!batch) return false; tlb->batch_count++; batch->next = NULL; batch->nr = 0; batch->max = MAX_GATHER_BATCH; tlb->active->next = batch; tlb->active = batch; return true; } #ifdef CONFIG_SMP static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) { for (int i = 0; i < batch->nr; i++) { struct encoded_page *enc = batch->encoded_pages[i]; if (encoded_page_flags(enc)) { struct page *page = encoded_page_ptr(enc); page_remove_rmap(page, vma, false); } } } /** * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB * @tlb: the current mmu_gather * @vma: The memory area from which the pages are being removed. * * Note that because of how tlb_next_batch() above works, we will * never start multiple new batches with pending delayed rmaps, so * we only need to walk through the current active batch and the * original local one. */ void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->delayed_rmap) return; tlb_flush_rmap_batch(&tlb->local, vma); if (tlb->active != &tlb->local) tlb_flush_rmap_batch(tlb->active, vma); tlb->delayed_rmap = 0; } #endif static void tlb_batch_pages_flush(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { struct encoded_page **pages = batch->encoded_pages; do { /* * limit free batch count when PAGE_SIZE > 4K */ unsigned int nr = min(512U, batch->nr); free_pages_and_swap_cache(pages, nr); pages += nr; batch->nr -= nr; cond_resched(); } while (batch->nr); } tlb->active = &tlb->local; } static void tlb_batch_list_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch, *next; for (batch = tlb->local.next; batch; batch = next) { next = batch->next; free_pages((unsigned long)batch, 0); } tlb->local.next = NULL; } bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); #ifdef CONFIG_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); #endif batch = tlb->active; /* * Add the page and check if we are full. If so * force a flush. */ batch->encoded_pages[batch->nr++] = page; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page)); return false; } #endif /* MMU_GATHER_NO_GATHER */ #ifdef CONFIG_MMU_GATHER_TABLE_FREE static void __tlb_remove_table_free(struct mmu_table_batch *batch) { int i; for (i = 0; i < batch->nr; i++) __tlb_remove_table(batch->tables[i]); free_page((unsigned long)batch); } #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE /* * Semi RCU freeing of the page directories. * * This is needed by some architectures to implement software pagetable walkers. * * gup_fast() and other software pagetable walkers do a lockless page-table * walk and therefore needs some synchronization with the freeing of the page * directories. The chosen means to accomplish that is by disabling IRQs over * the walk. * * Architectures that use IPIs to flush TLBs will then automagically DTRT, * since we unlink the page, flush TLBs, free the page. Since the disabling of * IRQs delays the completion of the TLB flush we can never observe an already * freed page. * * Architectures that do not have this (PPC) need to delay the freeing by some * other means, this is that means. * * What we do is batch the freed directory pages (tables) and RCU free them. * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling * holds off grace periods. * * However, in order to batch these pages we need to allocate storage, this * allocation is deep inside the MM code and can thus easily fail on memory * pressure. To guarantee progress we fall back to single table freeing, see * the implementation of tlb_remove_table_one(). * */ static void tlb_remove_table_smp_sync(void *arg) { /* Simply deliver the interrupt */ } void tlb_remove_table_sync_one(void) { /* * This isn't an RCU grace period and hence the page-tables cannot be * assumed to be actually RCU-freed. * * It is however sufficient for software page-table walkers that rely on * IRQ disabling. */ smp_call_function(tlb_remove_table_smp_sync, NULL, 1); } static void tlb_remove_table_rcu(struct rcu_head *head) { __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); } static void tlb_remove_table_free(struct mmu_table_batch *batch) { call_rcu(&batch->rcu, tlb_remove_table_rcu); } #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ static void tlb_remove_table_free(struct mmu_table_batch *batch) { __tlb_remove_table_free(batch); } #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ /* * If we want tlb_remove_table() to imply TLB invalidates. */ static inline void tlb_table_invalidate(struct mmu_gather *tlb) { if (tlb_needs_table_invalidate()) { /* * Invalidate page-table caches used by hardware walkers. Then * we still need to RCU-sched wait while freeing the pages * because software walkers can still be in-flight. */ tlb_flush_mmu_tlbonly(tlb); } } static void tlb_remove_table_one(void *table) { tlb_remove_table_sync_one(); __tlb_remove_table(table); } static void tlb_table_flush(struct mmu_gather *tlb) { struct mmu_table_batch **batch = &tlb->batch; if (*batch) { tlb_table_invalidate(tlb); tlb_remove_table_free(*batch); *batch = NULL; } } void tlb_remove_table(struct mmu_gather *tlb, void *table) { struct mmu_table_batch **batch = &tlb->batch; if (*batch == NULL) { *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_table_invalidate(tlb); tlb_remove_table_one(table); return; } (*batch)->nr = 0; } (*batch)->tables[(*batch)->nr++] = table; if ((*batch)->nr == MAX_TABLE_BATCH) tlb_table_flush(tlb); } static inline void tlb_table_init(struct mmu_gather *tlb) { tlb->batch = NULL; } #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ static inline void tlb_table_flush(struct mmu_gather *tlb) { } static inline void tlb_table_init(struct mmu_gather *tlb) { } #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ static void tlb_flush_mmu_free(struct mmu_gather *tlb) { tlb_table_flush(tlb); #ifndef CONFIG_MMU_GATHER_NO_GATHER tlb_batch_pages_flush(tlb); #endif } void tlb_flush_mmu(struct mmu_gather *tlb) { tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) { tlb->mm = mm; tlb->fullmm = fullmm; #ifndef CONFIG_MMU_GATHER_NO_GATHER tlb->need_flush_all = 0; tlb->local.next = NULL; tlb->local.nr = 0; tlb->local.max = ARRAY_SIZE(tlb->__pages); tlb->active = &tlb->local; tlb->batch_count = 0; #endif tlb->delayed_rmap = 0; tlb_table_init(tlb); #ifdef CONFIG_MMU_GATHER_PAGE_SIZE tlb->page_size = 0; #endif __tlb_reset_range(tlb); inc_tlb_flush_pending(tlb->mm); } /** * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down * @tlb: the mmu_gather structure to initialize * @mm: the mm_struct of the target address space * * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. */ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) { __tlb_gather_mmu(tlb, mm, false); } /** * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down * @tlb: the mmu_gather structure to initialize * @mm: the mm_struct of the target address space * * In this case, @mm is without users and we're going to destroy the * full address space (exit/execve). * * Called to initialize an (on-stack) mmu_gather structure for page-table * tear-down from @mm. */ void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) { __tlb_gather_mmu(tlb, mm, true); } /** * tlb_finish_mmu - finish an mmu_gather structure * @tlb: the mmu_gather structure to finish * * Called at the end of the shootdown operation to free up any resources that * were required. */ void tlb_finish_mmu(struct mmu_gather *tlb) { /* * If there are parallel threads are doing PTE changes on same range * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB * flush by batching, one thread may end up seeing inconsistent PTEs * and result in having stale TLB entries. So flush TLB forcefully * if we detect parallel PTE batching threads. * * However, some syscalls, e.g. munmap(), may free page tables, this * needs force flush everything in the given range. Otherwise this * may result in having stale TLB entries for some architectures, * e.g. aarch64, that could specify flush what level TLB. */ if (mm_tlb_flush_nested(tlb->mm)) { /* * The aarch64 yields better performance with fullmm by * avoiding multiple CPUs spamming TLBI messages at the * same time. * * On x86 non-fullmm doesn't yield significant difference * against fullmm. */ tlb->fullmm = 1; __tlb_reset_range(tlb); tlb->freed_tables = 1; } tlb_flush_mmu(tlb); #ifndef CONFIG_MMU_GATHER_NO_GATHER tlb_batch_list_free(tlb); #endif dec_tlb_flush_pending(tlb->mm); }
linux-master
mm/mmu_gather.c
// SPDX-License-Identifier: GPL-2.0 /* * Workingset detection * * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner */ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/writeback.h> #include <linux/shmem_fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/module.h> #include <linux/swap.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> /* * Double CLOCK lists * * Per node, two clock lists are maintained for file pages: the * inactive and the active list. Freshly faulted pages start out at * the head of the inactive list and page reclaim scans pages from the * tail. Pages that are accessed multiple times on the inactive list * are promoted to the active list, to protect them from reclaim, * whereas active pages are demoted to the inactive list when the * active list grows too big. * * fault ------------------------+ * | * +--------------+ | +-------------+ * reclaim <- | inactive | <-+-- demotion | active | <--+ * +--------------+ +-------------+ | * | | * +-------------- promotion ------------------+ * * * Access frequency and refault distance * * A workload is thrashing when its pages are frequently used but they * are evicted from the inactive list every time before another access * would have promoted them to the active list. * * In cases where the average access distance between thrashing pages * is bigger than the size of memory there is nothing that can be * done - the thrashing set could never fit into memory under any * circumstance. * * However, the average access distance could be bigger than the * inactive list, yet smaller than the size of memory. In this case, * the set could fit into memory if it weren't for the currently * active pages - which may be used more, hopefully less frequently: * * +-memory available to cache-+ * | | * +-inactive------+-active----+ * a b | c d e f g h i | J K L M N | * +---------------+-----------+ * * It is prohibitively expensive to accurately track access frequency * of pages. But a reasonable approximation can be made to measure * thrashing on the inactive list, after which refaulting pages can be * activated optimistically to compete with the existing active pages. * * Approximating inactive page access frequency - Observations: * * 1. When a page is accessed for the first time, it is added to the * head of the inactive list, slides every existing inactive page * towards the tail by one slot, and pushes the current tail page * out of memory. * * 2. When a page is accessed for the second time, it is promoted to * the active list, shrinking the inactive list by one slot. This * also slides all inactive pages that were faulted into the cache * more recently than the activated page towards the tail of the * inactive list. * * Thus: * * 1. The sum of evictions and activations between any two points in * time indicate the minimum number of inactive pages accessed in * between. * * 2. Moving one inactive page N page slots towards the tail of the * list requires at least N inactive page accesses. * * Combining these: * * 1. When a page is finally evicted from memory, the number of * inactive pages accessed while the page was in cache is at least * the number of page slots on the inactive list. * * 2. In addition, measuring the sum of evictions and activations (E) * at the time of a page's eviction, and comparing it to another * reading (R) at the time the page faults back into memory tells * the minimum number of accesses while the page was not cached. * This is called the refault distance. * * Because the first access of the page was the fault and the second * access the refault, we combine the in-cache distance with the * out-of-cache distance to get the complete minimum access distance * of this page: * * NR_inactive + (R - E) * * And knowing the minimum access distance of a page, we can easily * tell if the page would be able to stay in cache assuming all page * slots in the cache were available: * * NR_inactive + (R - E) <= NR_inactive + NR_active * * If we have swap we should consider about NR_inactive_anon and * NR_active_anon, so for page cache and anonymous respectively: * * NR_inactive_file + (R - E) <= NR_inactive_file + NR_active_file * + NR_inactive_anon + NR_active_anon * * NR_inactive_anon + (R - E) <= NR_inactive_anon + NR_active_anon * + NR_inactive_file + NR_active_file * * Which can be further simplified to: * * (R - E) <= NR_active_file + NR_inactive_anon + NR_active_anon * * (R - E) <= NR_active_anon + NR_inactive_file + NR_active_file * * Put into words, the refault distance (out-of-cache) can be seen as * a deficit in inactive list space (in-cache). If the inactive list * had (R - E) more page slots, the page would not have been evicted * in between accesses, but activated instead. And on a full system, * the only thing eating into inactive list space is active pages. * * * Refaulting inactive pages * * All that is known about the active list is that the pages have been * accessed more than once in the past. This means that at any given * time there is actually a good chance that pages on the active list * are no longer in active use. * * So when a refault distance of (R - E) is observed and there are at * least (R - E) pages in the userspace workingset, the refaulting page * is activated optimistically in the hope that (R - E) pages are actually * used less frequently than the refaulting page - or even not used at * all anymore. * * That means if inactive cache is refaulting with a suitable refault * distance, we assume the cache workingset is transitioning and put * pressure on the current workingset. * * If this is wrong and demotion kicks in, the pages which are truly * used more frequently will be reactivated while the less frequently * used once will be evicted from memory. * * But if this is right, the stale pages will be pushed out of memory * and the used pages get to stay in cache. * * Refaulting active pages * * If on the other hand the refaulting pages have recently been * deactivated, it means that the active list is no longer protecting * actively used cache from reclaim. The cache is NOT transitioning to * a different workingset; the existing workingset is thrashing in the * space allocated to the page cache. * * * Implementation * * For each node's LRU lists, a counter for inactive evictions and * activations is maintained (node->nonresident_age). * * On eviction, a snapshot of this counter (along with some bits to * identify the node) is stored in the now empty page cache * slot of the evicted page. This is called a shadow entry. * * On cache misses for which there are shadow entries, an eligible * refault distance will immediately activate the refaulting page. */ #define WORKINGSET_SHIFT 1 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \ WORKINGSET_SHIFT + NODES_SHIFT + \ MEM_CGROUP_ID_SHIFT) #define EVICTION_MASK (~0UL >> EVICTION_SHIFT) /* * Eviction timestamps need to be able to cover the full range of * actionable refaults. However, bits are tight in the xarray * entry, and after storing the identifier for the lruvec there might * not be enough left to represent every single actionable refault. In * that case, we have to sacrifice granularity for distance, and group * evictions into coarser buckets by shaving off lower timestamp bits. */ static unsigned int bucket_order __read_mostly; static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, bool workingset) { eviction &= EVICTION_MASK; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; eviction = (eviction << WORKINGSET_SHIFT) | workingset; return xa_mk_value(eviction); } static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, unsigned long *evictionp, bool *workingsetp) { unsigned long entry = xa_to_value(shadow); int memcgid, nid; bool workingset; workingset = entry & ((1UL << WORKINGSET_SHIFT) - 1); entry >>= WORKINGSET_SHIFT; nid = entry & ((1UL << NODES_SHIFT) - 1); entry >>= NODES_SHIFT; memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1); entry >>= MEM_CGROUP_ID_SHIFT; *memcgidp = memcgid; *pgdat = NODE_DATA(nid); *evictionp = entry; *workingsetp = workingset; } #ifdef CONFIG_LRU_GEN static void *lru_gen_eviction(struct folio *folio) { int hist; unsigned long token; unsigned long min_seq; struct lruvec *lruvec; struct lru_gen_folio *lrugen; int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); int refs = folio_lru_refs(folio); int tier = lru_tier_from_refs(refs); struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = folio_pgdat(folio); BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT); lruvec = mem_cgroup_lruvec(memcg, pgdat); lrugen = &lruvec->lrugen; min_seq = READ_ONCE(lrugen->min_seq[type]); token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0); hist = lru_hist_from_seq(min_seq); atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); } /* * Tests if the shadow entry is for a folio that was recently evicted. * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. */ static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, unsigned long *token, bool *workingset) { int memcg_id; unsigned long min_seq; struct mem_cgroup *memcg; struct pglist_data *pgdat; unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); memcg = mem_cgroup_from_id(memcg_id); *lruvec = mem_cgroup_lruvec(memcg, pgdat); min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); } static void lru_gen_refault(struct folio *folio, void *shadow) { bool recent; int hist, tier, refs; bool workingset; unsigned long token; struct lruvec *lruvec; struct lru_gen_folio *lrugen; int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); rcu_read_lock(); recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); if (lruvec != folio_lruvec(folio)) goto unlock; mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); if (!recent) goto unlock; lrugen = &lruvec->lrugen; hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); /* see the comment in folio_lru_refs() */ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; tier = lru_tier_from_refs(refs); atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); /* * Count the following two cases as stalls: * 1. For pages accessed through page tables, hotter pages pushed out * hot pages which refaulted immediately. * 2. For pages accessed multiple times through file descriptors, * numbers of accesses might have been out of the range. */ if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) { folio_set_workingset(folio); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); } unlock: rcu_read_unlock(); } #else /* !CONFIG_LRU_GEN */ static void *lru_gen_eviction(struct folio *folio) { return NULL; } static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, unsigned long *token, bool *workingset) { return false; } static void lru_gen_refault(struct folio *folio, void *shadow) { } #endif /* CONFIG_LRU_GEN */ /** * workingset_age_nonresident - age non-resident entries as LRU ages * @lruvec: the lruvec that was aged * @nr_pages: the number of pages to count * * As in-memory pages are aged, non-resident pages need to be aged as * well, in order for the refault distances later on to be comparable * to the in-memory dimensions. This function allows reclaim and LRU * operations to drive the non-resident aging along in parallel. */ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) { /* * Reclaiming a cgroup means reclaiming all its children in a * round-robin fashion. That means that each cgroup has an LRU * order that is composed of the LRU orders of its child * cgroups; and every page has an LRU position not just in the * cgroup that owns it, but in all of that group's ancestors. * * So when the physical inactive list of a leaf cgroup ages, * the virtual inactive lists of all its parents, including * the root cgroup's, age as well. */ do { atomic_long_add(nr_pages, &lruvec->nonresident_age); } while ((lruvec = parent_lruvec(lruvec))); } /** * workingset_eviction - note the eviction of a folio from memory * @target_memcg: the cgroup that is causing the reclaim * @folio: the folio being evicted * * Return: a shadow entry to be stored in @folio->mapping->i_pages in place * of the evicted @folio so that a later refault can be detected. */ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) { struct pglist_data *pgdat = folio_pgdat(folio); unsigned long eviction; struct lruvec *lruvec; int memcgid; /* Folio is fully exclusive and pins folio's memory cgroup pointer */ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (lru_gen_enabled()) return lru_gen_eviction(folio); lruvec = mem_cgroup_lruvec(target_memcg, pgdat); /* XXX: target_memcg can be NULL, go through lruvec */ memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); eviction = atomic_long_read(&lruvec->nonresident_age); eviction >>= bucket_order; workingset_age_nonresident(lruvec, folio_nr_pages(folio)); return pack_shadow(memcgid, pgdat, eviction, folio_test_workingset(folio)); } /** * workingset_test_recent - tests if the shadow entry is for a folio that was * recently evicted. Also fills in @workingset with the value unpacked from * shadow. * @shadow: the shadow entry to be tested. * @file: whether the corresponding folio is from the file lru. * @workingset: where the workingset value unpacked from shadow should * be stored. * * Return: true if the shadow is for a recently evicted folio; false otherwise. */ bool workingset_test_recent(void *shadow, bool file, bool *workingset) { struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; unsigned long workingset_size; unsigned long refault; int memcgid; struct pglist_data *pgdat; unsigned long eviction; if (lru_gen_enabled()) return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset); unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; /* * Look up the memcg associated with the stored ID. It might * have been deleted since the folio's eviction. * * Note that in rare events the ID could have been recycled * for a new cgroup that refaults a shared folio. This is * impossible to tell from the available data. However, this * should be a rare and limited disturbance, and activations * are always speculative anyway. Ultimately, it's the aging * algorithm's job to shake out the minimum access frequency * for the active cache. * * XXX: On !CONFIG_MEMCG, this will always return NULL; it * would be better if the root_mem_cgroup existed in all * configurations instead. */ eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) return false; eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); /* * Calculate the refault distance * * The unsigned subtraction here gives an accurate distance * across nonresident_age overflows in most cases. There is a * special case: usually, shadow entries have a short lifetime * and are either refaulted or reclaimed along with the inode * before they get too old. But it is not impossible for the * nonresident_age to lap a shadow entry in the field, which * can then result in a false small refault distance, leading * to a false activation should this old entry actually * refault again. However, earlier kernels used to deactivate * unconditionally with *every* reclaim invocation for the * longest time, so the occasional inappropriate activation * leading to pressure on the active list is not a problem. */ refault_distance = (refault - eviction) & EVICTION_MASK; /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if * all the memory was available to the workingset. Whether * workingset competition needs to consider anon or not depends * on having free swap space. */ workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE); if (!file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_FILE); } if (mem_cgroup_get_nr_swap_pages(eviction_memcg) > 0) { workingset_size += lruvec_page_state(eviction_lruvec, NR_ACTIVE_ANON); if (file) { workingset_size += lruvec_page_state(eviction_lruvec, NR_INACTIVE_ANON); } } return refault_distance <= workingset_size; } /** * workingset_refault - Evaluate the refault of a previously evicted folio. * @folio: The freshly allocated replacement folio. * @shadow: Shadow entry of the evicted folio. * * Calculates and evaluates the refault distance of the previously * evicted folio in the context of the node and the memcg whose memory * pressure caused the eviction. */ void workingset_refault(struct folio *folio, void *shadow) { bool file = folio_is_file_lru(folio); struct pglist_data *pgdat; struct mem_cgroup *memcg; struct lruvec *lruvec; bool workingset; long nr; if (lru_gen_enabled()) { lru_gen_refault(folio, shadow); return; } /* Flush stats (and potentially sleep) before holding RCU read lock */ mem_cgroup_flush_stats_ratelimited(); rcu_read_lock(); /* * The activation decision for this folio is made at the level * where the eviction occurred, as that is where the LRU order * during folio reclaim is being determined. * * However, the cgroup that will own the folio is the one that * is actually experiencing the refault event. */ nr = folio_nr_pages(folio); memcg = folio_memcg(folio); pgdat = folio_pgdat(folio); lruvec = mem_cgroup_lruvec(memcg, pgdat); mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); if (!workingset_test_recent(shadow, file, &workingset)) goto out; folio_set_active(folio); workingset_age_nonresident(lruvec, nr); mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file, nr); /* Folio was active prior to eviction */ if (workingset) { folio_set_workingset(folio); /* * XXX: Move to folio_add_lru() when it supports new vs * putback */ lru_note_cost_refault(folio); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); } out: rcu_read_unlock(); } /** * workingset_activation - note a page activation * @folio: Folio that is being activated. */ void workingset_activation(struct folio *folio) { struct mem_cgroup *memcg; rcu_read_lock(); /* * Filter non-memcg pages here, e.g. unmap can call * mark_page_accessed() on VDSO pages. * * XXX: See workingset_refault() - this should return * root_mem_cgroup even for !CONFIG_MEMCG. */ memcg = folio_memcg_rcu(folio); if (!mem_cgroup_disabled() && !memcg) goto out; workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio)); out: rcu_read_unlock(); } /* * Shadow entries reflect the share of the working set that does not * fit into memory, so their number depends on the access pattern of * the workload. In most cases, they will refault or get reclaimed * along with the inode, but a (malicious) workload that streams * through files with a total size several times that of available * memory, while preventing the inodes from being reclaimed, can * create excessive amounts of shadow nodes. To keep a lid on this, * track shadow nodes and reclaim them when they grow way past the * point where they would still be useful. */ struct list_lru shadow_nodes; void workingset_update_node(struct xa_node *node) { struct address_space *mapping; /* * Track non-empty nodes that contain only shadow entries; * unlink those that contain pages or are being freed. * * Avoid acquiring the list_lru lock when the nodes are * already where they should be. The list_empty() test is safe * as node->private_list is protected by the i_pages lock. */ mapping = container_of(node->array, struct address_space, i_pages); lockdep_assert_held(&mapping->i_pages.xa_lock); if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) { list_lru_add(&shadow_nodes, &node->private_list); __inc_lruvec_kmem_state(node, WORKINGSET_NODES); } } else { if (!list_empty(&node->private_list)) { list_lru_del(&shadow_nodes, &node->private_list); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); } } } static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { unsigned long max_nodes; unsigned long nodes; unsigned long pages; nodes = list_lru_shrink_count(&shadow_nodes, sc); if (!nodes) return SHRINK_EMPTY; /* * Approximate a reasonable limit for the nodes * containing shadow entries. We don't need to keep more * shadow entries than possible pages on the active list, * since refault distances bigger than that are dismissed. * * The size of the active list converges toward 100% of * overall page cache as memory grows, with only a tiny * inactive list. Assume the total cache size for that. * * Nodes might be sparsely populated, with only one shadow * entry in the extreme case. Obviously, we cannot keep one * node for every eligible shadow entry, so compromise on a * worst-case density of 1/8th. Below that, not all eligible * refaults can be detected anymore. * * On 64-bit with 7 xa_nodes per page and 64 slots * each, this will reclaim shadow entries when they consume * ~1.8% of available memory: * * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE */ #ifdef CONFIG_MEMCG if (sc->memcg) { struct lruvec *lruvec; int i; mem_cgroup_flush_stats(); lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) pages += lruvec_page_state_local(lruvec, NR_LRU_BASE + i); pages += lruvec_page_state_local( lruvec, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; pages += lruvec_page_state_local( lruvec, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; } else #endif pages = node_present_pages(sc->nid); max_nodes = pages >> (XA_CHUNK_SHIFT - 3); if (nodes <= max_nodes) return 0; return nodes - max_nodes; } static enum lru_status shadow_lru_isolate(struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) __must_hold(lru_lock) { struct xa_node *node = container_of(item, struct xa_node, private_list); struct address_space *mapping; int ret; /* * Page cache insertions and deletions synchronously maintain * the shadow node LRU under the i_pages lock and the * lru_lock. Because the page cache tree is emptied before * the inode can be destroyed, holding the lru_lock pins any * address_space that has nodes on the LRU. * * We can then safely transition to the i_pages lock to * pin only the address_space of the particular node we want * to reclaim, take the node off-LRU, and drop the lru_lock. */ mapping = container_of(node->array, struct address_space, i_pages); /* Coming from the list, invert the lock order */ if (!xa_trylock(&mapping->i_pages)) { spin_unlock_irq(lru_lock); ret = LRU_RETRY; goto out; } /* For page cache we need to hold i_lock */ if (mapping->host != NULL) { if (!spin_trylock(&mapping->host->i_lock)) { xa_unlock(&mapping->i_pages); spin_unlock_irq(lru_lock); ret = LRU_RETRY; goto out; } } list_lru_isolate(lru, item); __dec_lruvec_kmem_state(node, WORKINGSET_NODES); spin_unlock(lru_lock); /* * The nodes should only contain one or more shadow entries, * no pages, so we expect to be able to remove them all and * delete and free the empty node afterwards. */ if (WARN_ON_ONCE(!node->nr_values)) goto out_invalid; if (WARN_ON_ONCE(node->count != node->nr_values)) goto out_invalid; xa_delete_node(node, workingset_update_node); __inc_lruvec_kmem_state(node, WORKINGSET_NODERECLAIM); out_invalid: xa_unlock_irq(&mapping->i_pages); if (mapping->host != NULL) { if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); } ret = LRU_REMOVED_RETRY; out: cond_resched(); spin_lock_irq(lru_lock); return ret; } static unsigned long scan_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) { /* list_lru lock nests inside the IRQ-safe i_pages lock */ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, NULL); } static struct shrinker workingset_shadow_shrinker = { .count_objects = count_shadow_nodes, .scan_objects = scan_shadow_nodes, .seeks = 0, /* ->count reports only fully expendable nodes */ .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, }; /* * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe * i_pages lock. */ static struct lock_class_key shadow_nodes_key; static int __init workingset_init(void) { unsigned int timestamp_bits; unsigned int max_order; int ret; BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT); /* * Calculate the eviction bucket size to cover the longest * actionable refault distance, which is currently half of * memory (totalram_pages/2). However, memory hotplug may add * some more pages at runtime, so keep working with up to * double the initial memory by using totalram_pages as-is. */ timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT; max_order = fls_long(totalram_pages() - 1); if (max_order > timestamp_bits) bucket_order = max_order - timestamp_bits; pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); ret = prealloc_shrinker(&workingset_shadow_shrinker, "mm-shadow"); if (ret) goto err; ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key, &workingset_shadow_shrinker); if (ret) goto err_list_lru; register_shrinker_prepared(&workingset_shadow_shrinker); return 0; err_list_lru: free_prealloced_shrinker(&workingset_shadow_shrinker); err: return ret; } module_init(workingset_init);
linux-master
mm/workingset.c
// SPDX-License-Identifier: GPL-2.0-only /* * This implements the various checks for CONFIG_HARDENED_USERCOPY*, * which are designed to protect kernel memory from needless exposure * and overwrite under many unintended conditions. This code is based * on PAX_USERCOPY, which is: * * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source * Security Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/highmem.h> #include <linux/kstrtox.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/thread_info.h> #include <linux/vmalloc.h> #include <linux/atomic.h> #include <linux/jump_label.h> #include <asm/sections.h> #include "slab.h" /* * Checks if a given pointer and length is contained by the current * stack frame (if possible). * * Returns: * NOT_STACK: not at all on the stack * GOOD_FRAME: fully within a valid stack frame * GOOD_STACK: within the current stack (when can't frame-check exactly) * BAD_STACK: error condition (invalid stack position or bad stack frame) */ static noinline int check_stack_object(const void *obj, unsigned long len) { const void * const stack = task_stack_page(current); const void * const stackend = stack + THREAD_SIZE; int ret; /* Object is not on the stack at all. */ if (obj + len <= stack || stackend <= obj) return NOT_STACK; /* * Reject: object partially overlaps the stack (passing the * check above means at least one end is within the stack, * so if this check fails, the other end is outside the stack). */ if (obj < stack || stackend < obj + len) return BAD_STACK; /* Check if object is safely within a valid frame. */ ret = arch_within_stack_frames(stack, stackend, obj, len); if (ret) return ret; /* Finally, check stack depth if possible. */ #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER if (IS_ENABLED(CONFIG_STACK_GROWSUP)) { if ((void *)current_stack_pointer < obj + len) return BAD_STACK; } else { if (obj < (void *)current_stack_pointer) return BAD_STACK; } #endif return GOOD_STACK; } /* * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found * an unexpected state during a copy_from_user() or copy_to_user() call. * There are several checks being performed on the buffer by the * __check_object_size() function. Normal stack buffer usage should never * trip the checks, and kernel text addressing will always trip the check. * For cache objects, it is checking that only the whitelisted range of * bytes for a given cache is being accessed (via the cache's usersize and * useroffset fields). To adjust a cache whitelist, use the usercopy-aware * kmem_cache_create_usercopy() function to create the cache (and * carefully audit the whitelist range). */ void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len) { pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", to_user ? "exposure" : "overwrite", to_user ? "from" : "to", name ? : "unknown?!", detail ? " '" : "", detail ? : "", detail ? "'" : "", offset, len); /* * For greater effect, it would be nice to do do_group_exit(), * but BUG() actually hooks all the lock-breaking and per-arch * Oops code, so that is used here instead. */ BUG(); } /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ static bool overlaps(const unsigned long ptr, unsigned long n, unsigned long low, unsigned long high) { const unsigned long check_low = ptr; unsigned long check_high = check_low + n; /* Does not overlap if entirely above or entirely below. */ if (check_low >= high || check_high <= low) return false; return true; } /* Is this address range in the kernel text area? */ static inline void check_kernel_text_object(const unsigned long ptr, unsigned long n, bool to_user) { unsigned long textlow = (unsigned long)_stext; unsigned long texthigh = (unsigned long)_etext; unsigned long textlow_linear, texthigh_linear; if (overlaps(ptr, n, textlow, texthigh)) usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); /* * Some architectures have virtual memory mappings with a secondary * mapping of the kernel text, i.e. there is more than one virtual * kernel address that points to the kernel image. It is usually * when there is a separate linear physical memory mapping, in that * __pa() is not just the reverse of __va(). This can be detected * and checked: */ textlow_linear = (unsigned long)lm_alias(textlow); /* No different mapping: we're done. */ if (textlow_linear == textlow) return; /* Check the secondary mapping... */ texthigh_linear = (unsigned long)lm_alias(texthigh); if (overlaps(ptr, n, textlow_linear, texthigh_linear)) usercopy_abort("linear kernel text", NULL, to_user, ptr - textlow_linear, n); } static inline void check_bogus_address(const unsigned long ptr, unsigned long n, bool to_user) { /* Reject if object wraps past end of memory. */ if (ptr + (n - 1) < ptr) usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); /* Reject if NULL or ZERO-allocation. */ if (ZERO_OR_NULL_PTR(ptr)) usercopy_abort("null address", NULL, to_user, ptr, n); } static inline void check_heap_object(const void *ptr, unsigned long n, bool to_user) { unsigned long addr = (unsigned long)ptr; unsigned long offset; struct folio *folio; if (is_kmap_addr(ptr)) { offset = offset_in_page(ptr); if (n > PAGE_SIZE - offset) usercopy_abort("kmap", NULL, to_user, offset, n); return; } if (is_vmalloc_addr(ptr) && !pagefault_disabled()) { struct vmap_area *area = find_vmap_area(addr); if (!area) usercopy_abort("vmalloc", "no area", to_user, 0, n); if (n > area->va_end - addr) { offset = addr - area->va_start; usercopy_abort("vmalloc", NULL, to_user, offset, n); } return; } if (!virt_addr_valid(ptr)) return; folio = virt_to_folio(ptr); if (folio_test_slab(folio)) { /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); } else if (folio_test_large(folio)) { offset = ptr - folio_address(folio); if (n > folio_size(folio) - offset) usercopy_abort("page alloc", NULL, to_user, offset, n); } } static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks); /* * Validates that the given object is: * - not bogus address * - fully contained by stack (or stack frame, when available) * - fully within SLAB object (or object whitelist area, when available) * - not in kernel text */ void __check_object_size(const void *ptr, unsigned long n, bool to_user) { if (static_branch_unlikely(&bypass_usercopy_checks)) return; /* Skip all tests if size is zero. */ if (!n) return; /* Check for invalid addresses. */ check_bogus_address((const unsigned long)ptr, n, to_user); /* Check for bad stack object. */ switch (check_stack_object(ptr, n)) { case NOT_STACK: /* Object is not touching the current process stack. */ break; case GOOD_FRAME: case GOOD_STACK: /* * Object is either in the correct frame (when it * is possible to check) or just generally on the * process stack (when frame checking not available). */ return; default: usercopy_abort("process stack", NULL, to_user, #ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER IS_ENABLED(CONFIG_STACK_GROWSUP) ? ptr - (void *)current_stack_pointer : (void *)current_stack_pointer - ptr, #else 0, #endif n); } /* Check for bad heap object. */ check_heap_object(ptr, n, to_user); /* Check for object in kernel to avoid text exposure. */ check_kernel_text_object((const unsigned long)ptr, n, to_user); } EXPORT_SYMBOL(__check_object_size); static bool enable_checks __initdata = true; static int __init parse_hardened_usercopy(char *str) { if (kstrtobool(str, &enable_checks)) pr_warn("Invalid option string for hardened_usercopy: '%s'\n", str); return 1; } __setup("hardened_usercopy=", parse_hardened_usercopy); static int __init set_hardened_usercopy(void) { if (enable_checks == false) static_branch_enable(&bypass_usercopy_checks); return 1; } late_initcall(set_hardened_usercopy);
linux-master
mm/usercopy.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/page_reporting.h> #include <linux/gfp.h> #include <linux/export.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include "page_reporting.h" #include "internal.h" /* Initialize to an unsupported value */ unsigned int page_reporting_order = -1; static int page_order_update_notify(const char *val, const struct kernel_param *kp) { /* * If param is set beyond this limit, order is set to default * pageblock_order value */ return param_set_uint_minmax(val, kp, 0, MAX_ORDER); } static const struct kernel_param_ops page_reporting_param_ops = { .set = &page_order_update_notify, /* * For the get op, use param_get_int instead of param_get_uint. * This is to make sure that when unset the initialized value of * -1 is shown correctly */ .get = &param_get_int, }; module_param_cb(page_reporting_order, &page_reporting_param_ops, &page_reporting_order, 0644); MODULE_PARM_DESC(page_reporting_order, "Set page reporting order"); /* * This symbol is also a kernel parameter. Export the page_reporting_order * symbol so that other drivers can access it to control order values without * having to introduce another configurable parameter. Only one driver can * register with the page_reporting driver for the service, so we have just * one control parameter for the use case(which can be accessed in both * drivers) */ EXPORT_SYMBOL_GPL(page_reporting_order); #define PAGE_REPORTING_DELAY (2 * HZ) static struct page_reporting_dev_info __rcu *pr_dev_info __read_mostly; enum { PAGE_REPORTING_IDLE = 0, PAGE_REPORTING_REQUESTED, PAGE_REPORTING_ACTIVE }; /* request page reporting */ static void __page_reporting_request(struct page_reporting_dev_info *prdev) { unsigned int state; /* Check to see if we are in desired state */ state = atomic_read(&prdev->state); if (state == PAGE_REPORTING_REQUESTED) return; /* * If reporting is already active there is nothing we need to do. * Test against 0 as that represents PAGE_REPORTING_IDLE. */ state = atomic_xchg(&prdev->state, PAGE_REPORTING_REQUESTED); if (state != PAGE_REPORTING_IDLE) return; /* * Delay the start of work to allow a sizable queue to build. For * now we are limiting this to running no more than once every * couple of seconds. */ schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); } /* notify prdev of free page reporting request */ void __page_reporting_notify(void) { struct page_reporting_dev_info *prdev; /* * We use RCU to protect the pr_dev_info pointer. In almost all * cases this should be present, however in the unlikely case of * a shutdown this will be NULL and we should exit. */ rcu_read_lock(); prdev = rcu_dereference(pr_dev_info); if (likely(prdev)) __page_reporting_request(prdev); rcu_read_unlock(); } static void page_reporting_drain(struct page_reporting_dev_info *prdev, struct scatterlist *sgl, unsigned int nents, bool reported) { struct scatterlist *sg = sgl; /* * Drain the now reported pages back into their respective * free lists/areas. We assume at least one page is populated. */ do { struct page *page = sg_page(sg); int mt = get_pageblock_migratetype(page); unsigned int order = get_order(sg->length); __putback_isolated_page(page, order, mt); /* If the pages were not reported due to error skip flagging */ if (!reported) continue; /* * If page was not comingled with another page we can * consider the result to be "reported" since the page * hasn't been modified, otherwise we will need to * report on the new larger page when we make our way * up to that higher order. */ if (PageBuddy(page) && buddy_order(page) == order) __SetPageReported(page); } while ((sg = sg_next(sg))); /* reinitialize scatterlist now that it is empty */ sg_init_table(sgl, nents); } /* * The page reporting cycle consists of 4 stages, fill, report, drain, and * idle. We will cycle through the first 3 stages until we cannot obtain a * full scatterlist of pages, in that case we will switch to idle. */ static int page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone, unsigned int order, unsigned int mt, struct scatterlist *sgl, unsigned int *offset) { struct free_area *area = &zone->free_area[order]; struct list_head *list = &area->free_list[mt]; unsigned int page_len = PAGE_SIZE << order; struct page *page, *next; long budget; int err = 0; /* * Perform early check, if free area is empty there is * nothing to process so we can skip this free_list. */ if (list_empty(list)) return err; spin_lock_irq(&zone->lock); /* * Limit how many calls we will be making to the page reporting * device for this list. By doing this we avoid processing any * given list for too long. * * The current value used allows us enough calls to process over a * sixteenth of the current list plus one additional call to handle * any pages that may have already been present from the previous * list processed. This should result in us reporting all pages on * an idle system in about 30 seconds. * * The division here should be cheap since PAGE_REPORTING_CAPACITY * should always be a power of 2. */ budget = DIV_ROUND_UP(area->nr_free, PAGE_REPORTING_CAPACITY * 16); /* loop through free list adding unreported pages to sg list */ list_for_each_entry_safe(page, next, list, lru) { /* We are going to skip over the reported pages. */ if (PageReported(page)) continue; /* * If we fully consumed our budget then update our * state to indicate that we are requesting additional * processing and exit this list. */ if (budget < 0) { atomic_set(&prdev->state, PAGE_REPORTING_REQUESTED); next = page; break; } /* Attempt to pull page from list and place in scatterlist */ if (*offset) { if (!__isolate_free_page(page, order)) { next = page; break; } /* Add page to scatter list */ --(*offset); sg_set_page(&sgl[*offset], page, page_len, 0); continue; } /* * Make the first non-reported page in the free list * the new head of the free list before we release the * zone lock. */ if (!list_is_first(&page->lru, list)) list_rotate_to_front(&page->lru, list); /* release lock before waiting on report processing */ spin_unlock_irq(&zone->lock); /* begin processing pages in local list */ err = prdev->report(prdev, sgl, PAGE_REPORTING_CAPACITY); /* reset offset since the full list was reported */ *offset = PAGE_REPORTING_CAPACITY; /* update budget to reflect call to report function */ budget--; /* reacquire zone lock and resume processing */ spin_lock_irq(&zone->lock); /* flush reported pages from the sg list */ page_reporting_drain(prdev, sgl, PAGE_REPORTING_CAPACITY, !err); /* * Reset next to first entry, the old next isn't valid * since we dropped the lock to report the pages */ next = list_first_entry(list, struct page, lru); /* exit on error */ if (err) break; } /* Rotate any leftover pages to the head of the freelist */ if (!list_entry_is_head(next, list, lru) && !list_is_first(&next->lru, list)) list_rotate_to_front(&next->lru, list); spin_unlock_irq(&zone->lock); return err; } static int page_reporting_process_zone(struct page_reporting_dev_info *prdev, struct scatterlist *sgl, struct zone *zone) { unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; unsigned long watermark; int err = 0; /* Generate minimum watermark to be able to guarantee progress */ watermark = low_wmark_pages(zone) + (PAGE_REPORTING_CAPACITY << page_reporting_order); /* * Cancel request if insufficient free memory or if we failed * to allocate page reporting statistics for the zone. */ if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) return err; /* Process each free list starting from lowest order/mt */ for (order = page_reporting_order; order <= MAX_ORDER; order++) { for (mt = 0; mt < MIGRATE_TYPES; mt++) { /* We do not pull pages from the isolate free list */ if (is_migrate_isolate(mt)) continue; err = page_reporting_cycle(prdev, zone, order, mt, sgl, &offset); if (err) return err; } } /* report the leftover pages before going idle */ leftover = PAGE_REPORTING_CAPACITY - offset; if (leftover) { sgl = &sgl[offset]; err = prdev->report(prdev, sgl, leftover); /* flush any remaining pages out from the last report */ spin_lock_irq(&zone->lock); page_reporting_drain(prdev, sgl, leftover, !err); spin_unlock_irq(&zone->lock); } return err; } static void page_reporting_process(struct work_struct *work) { struct delayed_work *d_work = to_delayed_work(work); struct page_reporting_dev_info *prdev = container_of(d_work, struct page_reporting_dev_info, work); int err = 0, state = PAGE_REPORTING_ACTIVE; struct scatterlist *sgl; struct zone *zone; /* * Change the state to "Active" so that we can track if there is * anyone requests page reporting after we complete our pass. If * the state is not altered by the end of the pass we will switch * to idle and quit scheduling reporting runs. */ atomic_set(&prdev->state, state); /* allocate scatterlist to store pages being reported on */ sgl = kmalloc_array(PAGE_REPORTING_CAPACITY, sizeof(*sgl), GFP_KERNEL); if (!sgl) goto err_out; sg_init_table(sgl, PAGE_REPORTING_CAPACITY); for_each_zone(zone) { err = page_reporting_process_zone(prdev, sgl, zone); if (err) break; } kfree(sgl); err_out: /* * If the state has reverted back to requested then there may be * additional pages to be processed. We will defer for 2s to allow * more pages to accumulate. */ state = atomic_cmpxchg(&prdev->state, state, PAGE_REPORTING_IDLE); if (state == PAGE_REPORTING_REQUESTED) schedule_delayed_work(&prdev->work, PAGE_REPORTING_DELAY); } static DEFINE_MUTEX(page_reporting_mutex); DEFINE_STATIC_KEY_FALSE(page_reporting_enabled); int page_reporting_register(struct page_reporting_dev_info *prdev) { int err = 0; mutex_lock(&page_reporting_mutex); /* nothing to do if already in use */ if (rcu_dereference_protected(pr_dev_info, lockdep_is_held(&page_reporting_mutex))) { err = -EBUSY; goto err_out; } /* * If the page_reporting_order value is not set, we check if * an order is provided from the driver that is performing the * registration. If that is not provided either, we default to * pageblock_order. */ if (page_reporting_order == -1) { if (prdev->order > 0 && prdev->order <= MAX_ORDER) page_reporting_order = prdev->order; else page_reporting_order = pageblock_order; } /* initialize state and work structures */ atomic_set(&prdev->state, PAGE_REPORTING_IDLE); INIT_DELAYED_WORK(&prdev->work, &page_reporting_process); /* Begin initial flush of zones */ __page_reporting_request(prdev); /* Assign device to allow notifications */ rcu_assign_pointer(pr_dev_info, prdev); /* enable page reporting notification */ if (!static_key_enabled(&page_reporting_enabled)) { static_branch_enable(&page_reporting_enabled); pr_info("Free page reporting enabled\n"); } err_out: mutex_unlock(&page_reporting_mutex); return err; } EXPORT_SYMBOL_GPL(page_reporting_register); void page_reporting_unregister(struct page_reporting_dev_info *prdev) { mutex_lock(&page_reporting_mutex); if (prdev == rcu_dereference_protected(pr_dev_info, lockdep_is_held(&page_reporting_mutex))) { /* Disable page reporting notification */ RCU_INIT_POINTER(pr_dev_info, NULL); synchronize_rcu(); /* Flush any existing work, and lock it out */ cancel_delayed_work_sync(&prdev->work); } mutex_unlock(&page_reporting_mutex); } EXPORT_SYMBOL_GPL(page_reporting_unregister);
linux-master
mm/page_reporting.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * zswap.c - zswap driver file * * zswap is a cache that takes pages that are in the process * of being swapped out and attempts to compress and store them in a * RAM-based memory pool. This can result in a significant I/O reduction on * the swap device and, in the case where decompressing from RAM is faster * than reading from the swap device, can also improve workload performance. * * Copyright (C) 2012 Seth Jennings <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/cpu.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/rbtree.h> #include <linux/swap.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/mempool.h> #include <linux/zpool.h> #include <crypto/acompress.h> #include <linux/zswap.h> #include <linux/mm_types.h> #include <linux/page-flags.h> #include <linux/swapops.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/workqueue.h> #include "swap.h" #include "internal.h" /********************************* * statistics **********************************/ /* Total bytes used by the compressed storage */ u64 zswap_pool_total_size; /* The number of compressed pages currently stored in zswap */ atomic_t zswap_stored_pages = ATOMIC_INIT(0); /* The number of same-value filled pages currently stored in zswap */ static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0); /* * The statistics below are not protected from concurrent access for * performance reasons so they may not be a 100% accurate. However, * they do provide useful information on roughly how many times a * certain event is occurring. */ /* Pool limit was hit (see zswap_max_pool_percent) */ static u64 zswap_pool_limit_hit; /* Pages written back when pool limit was reached */ static u64 zswap_written_back_pages; /* Store failed due to a reclaim failure after pool limit was reached */ static u64 zswap_reject_reclaim_fail; /* Compressed page was too big for the allocator to (optimally) store */ static u64 zswap_reject_compress_poor; /* Store failed because underlying allocator could not get memory */ static u64 zswap_reject_alloc_fail; /* Store failed because the entry metadata could not be allocated (rare) */ static u64 zswap_reject_kmemcache_fail; /* Duplicate store was encountered (rare) */ static u64 zswap_duplicate_entry; /* Shrinker work queue */ static struct workqueue_struct *shrink_wq; /* Pool limit was hit, we need to calm down */ static bool zswap_pool_reached_full; /********************************* * tunables **********************************/ #define ZSWAP_PARAM_UNSET "" static int zswap_setup(void); /* Enable/disable zswap */ static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); static int zswap_enabled_param_set(const char *, const struct kernel_param *); static const struct kernel_param_ops zswap_enabled_param_ops = { .set = zswap_enabled_param_set, .get = param_get_bool, }; module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644); /* Crypto compressor to use */ static char *zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; static int zswap_compressor_param_set(const char *, const struct kernel_param *); static const struct kernel_param_ops zswap_compressor_param_ops = { .set = zswap_compressor_param_set, .get = param_get_charp, .free = param_free_charp, }; module_param_cb(compressor, &zswap_compressor_param_ops, &zswap_compressor, 0644); /* Compressed storage zpool to use */ static char *zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; static int zswap_zpool_param_set(const char *, const struct kernel_param *); static const struct kernel_param_ops zswap_zpool_param_ops = { .set = zswap_zpool_param_set, .get = param_get_charp, .free = param_free_charp, }; module_param_cb(zpool, &zswap_zpool_param_ops, &zswap_zpool_type, 0644); /* The maximum percentage of memory that the compressed pool can occupy */ static unsigned int zswap_max_pool_percent = 20; module_param_named(max_pool_percent, zswap_max_pool_percent, uint, 0644); /* The threshold for accepting new pages after the max_pool_percent was hit */ static unsigned int zswap_accept_thr_percent = 90; /* of max pool size */ module_param_named(accept_threshold_percent, zswap_accept_thr_percent, uint, 0644); /* * Enable/disable handling same-value filled pages (enabled by default). * If disabled every page is considered non-same-value filled. */ static bool zswap_same_filled_pages_enabled = true; module_param_named(same_filled_pages_enabled, zswap_same_filled_pages_enabled, bool, 0644); /* Enable/disable handling non-same-value filled pages (enabled by default) */ static bool zswap_non_same_filled_pages_enabled = true; module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, bool, 0644); static bool zswap_exclusive_loads_enabled = IS_ENABLED( CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON); module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); /* Number of zpools in zswap_pool (empirically determined for scalability) */ #define ZSWAP_NR_ZPOOLS 32 /********************************* * data structures **********************************/ struct crypto_acomp_ctx { struct crypto_acomp *acomp; struct acomp_req *req; struct crypto_wait wait; u8 *dstmem; struct mutex *mutex; }; /* * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. * The only case where lru_lock is not acquired while holding tree.lock is * when a zswap_entry is taken off the lru for writeback, in that case it * needs to be verified that it's still valid in the tree. */ struct zswap_pool { struct zpool *zpools[ZSWAP_NR_ZPOOLS]; struct crypto_acomp_ctx __percpu *acomp_ctx; struct kref kref; struct list_head list; struct work_struct release_work; struct work_struct shrink_work; struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; struct list_head lru; spinlock_t lru_lock; }; /* * struct zswap_entry * * This structure contains the metadata for tracking a single compressed * page within zswap. * * rbnode - links the entry into red-black tree for the appropriate swap type * swpentry - associated swap entry, the offset indexes into the red-black tree * refcount - the number of outstanding reference to the entry. This is needed * to protect against premature freeing of the entry by code * concurrent calls to load, invalidate, and writeback. The lock * for the zswap_tree structure that contains the entry must * be held while changing the refcount. Since the lock must * be held, there is no reason to also make refcount atomic. * length - the length in bytes of the compressed page data. Needed during * decompression. For a same value filled page length is 0, and both * pool and lru are invalid and must be ignored. * pool - the zswap_pool the entry's data is in * handle - zpool allocation handle that stores the compressed page data * value - value of the same-value filled pages which have same content * objcg - the obj_cgroup that the compressed memory is charged to * lru - handle to the pool's lru used to evict pages. */ struct zswap_entry { struct rb_node rbnode; swp_entry_t swpentry; int refcount; unsigned int length; struct zswap_pool *pool; union { unsigned long handle; unsigned long value; }; struct obj_cgroup *objcg; struct list_head lru; }; /* * The tree lock in the zswap_tree struct protects a few things: * - the rbtree * - the refcount field of each entry in the tree */ struct zswap_tree { struct rb_root rbroot; spinlock_t lock; }; static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; /* RCU-protected iteration */ static LIST_HEAD(zswap_pools); /* protects zswap_pools list modification */ static DEFINE_SPINLOCK(zswap_pools_lock); /* pool counter to provide unique names to zpool */ static atomic_t zswap_pools_count = ATOMIC_INIT(0); enum zswap_init_type { ZSWAP_UNINIT, ZSWAP_INIT_SUCCEED, ZSWAP_INIT_FAILED }; static enum zswap_init_type zswap_init_state; /* used to ensure the integrity of initialization */ static DEFINE_MUTEX(zswap_init_lock); /* init completed, but couldn't create the initial pool */ static bool zswap_has_pool; /********************************* * helpers and fwd declarations **********************************/ #define zswap_pool_debug(msg, p) \ pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpools[0])) static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); static bool zswap_is_full(void) { return totalram_pages() * zswap_max_pool_percent / 100 < DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); } static bool zswap_can_accept(void) { return totalram_pages() * zswap_accept_thr_percent / 100 * zswap_max_pool_percent / 100 > DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); } static void zswap_update_total_size(void) { struct zswap_pool *pool; u64 total = 0; int i; rcu_read_lock(); list_for_each_entry_rcu(pool, &zswap_pools, list) for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) total += zpool_get_total_size(pool->zpools[i]); rcu_read_unlock(); zswap_pool_total_size = total; } /********************************* * zswap entry functions **********************************/ static struct kmem_cache *zswap_entry_cache; static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) { struct zswap_entry *entry; entry = kmem_cache_alloc(zswap_entry_cache, gfp); if (!entry) return NULL; entry->refcount = 1; RB_CLEAR_NODE(&entry->rbnode); return entry; } static void zswap_entry_cache_free(struct zswap_entry *entry) { kmem_cache_free(zswap_entry_cache, entry); } /********************************* * rbtree functions **********************************/ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) { struct rb_node *node = root->rb_node; struct zswap_entry *entry; pgoff_t entry_offset; while (node) { entry = rb_entry(node, struct zswap_entry, rbnode); entry_offset = swp_offset(entry->swpentry); if (entry_offset > offset) node = node->rb_left; else if (entry_offset < offset) node = node->rb_right; else return entry; } return NULL; } /* * In the case that a entry with the same offset is found, a pointer to * the existing entry is stored in dupentry and the function returns -EEXIST */ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, struct zswap_entry **dupentry) { struct rb_node **link = &root->rb_node, *parent = NULL; struct zswap_entry *myentry; pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry); while (*link) { parent = *link; myentry = rb_entry(parent, struct zswap_entry, rbnode); myentry_offset = swp_offset(myentry->swpentry); if (myentry_offset > entry_offset) link = &(*link)->rb_left; else if (myentry_offset < entry_offset) link = &(*link)->rb_right; else { *dupentry = myentry; return -EEXIST; } } rb_link_node(&entry->rbnode, parent, link); rb_insert_color(&entry->rbnode, root); return 0; } static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) { if (!RB_EMPTY_NODE(&entry->rbnode)) { rb_erase(&entry->rbnode, root); RB_CLEAR_NODE(&entry->rbnode); return true; } return false; } static struct zpool *zswap_find_zpool(struct zswap_entry *entry) { int i = 0; if (ZSWAP_NR_ZPOOLS > 1) i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS)); return entry->pool->zpools[i]; } /* * Carries out the common pattern of freeing and entry's zpool allocation, * freeing the entry itself, and decrementing the number of stored pages. */ static void zswap_free_entry(struct zswap_entry *entry) { if (entry->objcg) { obj_cgroup_uncharge_zswap(entry->objcg, entry->length); obj_cgroup_put(entry->objcg); } if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { spin_lock(&entry->pool->lru_lock); list_del(&entry->lru); spin_unlock(&entry->pool->lru_lock); zpool_free(zswap_find_zpool(entry), entry->handle); zswap_pool_put(entry->pool); } zswap_entry_cache_free(entry); atomic_dec(&zswap_stored_pages); zswap_update_total_size(); } /* caller must hold the tree lock */ static void zswap_entry_get(struct zswap_entry *entry) { entry->refcount++; } /* caller must hold the tree lock * remove from the tree and free it, if nobody reference the entry */ static void zswap_entry_put(struct zswap_tree *tree, struct zswap_entry *entry) { int refcount = --entry->refcount; WARN_ON_ONCE(refcount < 0); if (refcount == 0) { WARN_ON_ONCE(!RB_EMPTY_NODE(&entry->rbnode)); zswap_free_entry(entry); } } /* caller must hold the tree lock */ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, pgoff_t offset) { struct zswap_entry *entry; entry = zswap_rb_search(root, offset); if (entry) zswap_entry_get(entry); return entry; } /********************************* * per-cpu code **********************************/ static DEFINE_PER_CPU(u8 *, zswap_dstmem); /* * If users dynamically change the zpool type and compressor at runtime, i.e. * zswap is running, zswap can have more than one zpool on one cpu, but they * are sharing dtsmem. So we need this mutex to be per-cpu. */ static DEFINE_PER_CPU(struct mutex *, zswap_mutex); static int zswap_dstmem_prepare(unsigned int cpu) { struct mutex *mutex; u8 *dst; dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); if (!dst) return -ENOMEM; mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu)); if (!mutex) { kfree(dst); return -ENOMEM; } mutex_init(mutex); per_cpu(zswap_dstmem, cpu) = dst; per_cpu(zswap_mutex, cpu) = mutex; return 0; } static int zswap_dstmem_dead(unsigned int cpu) { struct mutex *mutex; u8 *dst; mutex = per_cpu(zswap_mutex, cpu); kfree(mutex); per_cpu(zswap_mutex, cpu) = NULL; dst = per_cpu(zswap_dstmem, cpu); kfree(dst); per_cpu(zswap_dstmem, cpu) = NULL; return 0; } static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); struct crypto_acomp *acomp; struct acomp_req *req; acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); if (IS_ERR(acomp)) { pr_err("could not alloc crypto acomp %s : %ld\n", pool->tfm_name, PTR_ERR(acomp)); return PTR_ERR(acomp); } acomp_ctx->acomp = acomp; req = acomp_request_alloc(acomp_ctx->acomp); if (!req) { pr_err("could not alloc crypto acomp_request %s\n", pool->tfm_name); crypto_free_acomp(acomp_ctx->acomp); return -ENOMEM; } acomp_ctx->req = req; crypto_init_wait(&acomp_ctx->wait); /* * if the backend of acomp is async zip, crypto_req_done() will wakeup * crypto_wait_req(); if the backend of acomp is scomp, the callback * won't be called, crypto_wait_req() will return without blocking. */ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &acomp_ctx->wait); acomp_ctx->mutex = per_cpu(zswap_mutex, cpu); acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu); return 0; } static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) { struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); if (!IS_ERR_OR_NULL(acomp_ctx)) { if (!IS_ERR_OR_NULL(acomp_ctx->req)) acomp_request_free(acomp_ctx->req); if (!IS_ERR_OR_NULL(acomp_ctx->acomp)) crypto_free_acomp(acomp_ctx->acomp); } return 0; } /********************************* * pool functions **********************************/ static struct zswap_pool *__zswap_pool_current(void) { struct zswap_pool *pool; pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); WARN_ONCE(!pool && zswap_has_pool, "%s: no page storage pool!\n", __func__); return pool; } static struct zswap_pool *zswap_pool_current(void) { assert_spin_locked(&zswap_pools_lock); return __zswap_pool_current(); } static struct zswap_pool *zswap_pool_current_get(void) { struct zswap_pool *pool; rcu_read_lock(); pool = __zswap_pool_current(); if (!zswap_pool_get(pool)) pool = NULL; rcu_read_unlock(); return pool; } static struct zswap_pool *zswap_pool_last_get(void) { struct zswap_pool *pool, *last = NULL; rcu_read_lock(); list_for_each_entry_rcu(pool, &zswap_pools, list) last = pool; WARN_ONCE(!last && zswap_has_pool, "%s: no page storage pool!\n", __func__); if (!zswap_pool_get(last)) last = NULL; rcu_read_unlock(); return last; } /* type and compressor must be null-terminated */ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) { struct zswap_pool *pool; assert_spin_locked(&zswap_pools_lock); list_for_each_entry_rcu(pool, &zswap_pools, list) { if (strcmp(pool->tfm_name, compressor)) continue; /* all zpools share the same type */ if (strcmp(zpool_get_type(pool->zpools[0]), type)) continue; /* if we can't get it, it's about to be destroyed */ if (!zswap_pool_get(pool)) continue; return pool; } return NULL; } /* * If the entry is still valid in the tree, drop the initial ref and remove it * from the tree. This function must be called with an additional ref held, * otherwise it may race with another invalidation freeing the entry. */ static void zswap_invalidate_entry(struct zswap_tree *tree, struct zswap_entry *entry) { if (zswap_rb_erase(&tree->rbroot, entry)) zswap_entry_put(tree, entry); } static int zswap_reclaim_entry(struct zswap_pool *pool) { struct zswap_entry *entry; struct zswap_tree *tree; pgoff_t swpoffset; int ret; /* Get an entry off the LRU */ spin_lock(&pool->lru_lock); if (list_empty(&pool->lru)) { spin_unlock(&pool->lru_lock); return -EINVAL; } entry = list_last_entry(&pool->lru, struct zswap_entry, lru); list_del_init(&entry->lru); /* * Once the lru lock is dropped, the entry might get freed. The * swpoffset is copied to the stack, and entry isn't deref'd again * until the entry is verified to still be alive in the tree. */ swpoffset = swp_offset(entry->swpentry); tree = zswap_trees[swp_type(entry->swpentry)]; spin_unlock(&pool->lru_lock); /* Check for invalidate() race */ spin_lock(&tree->lock); if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) { ret = -EAGAIN; goto unlock; } /* Hold a reference to prevent a free during writeback */ zswap_entry_get(entry); spin_unlock(&tree->lock); ret = zswap_writeback_entry(entry, tree); spin_lock(&tree->lock); if (ret) { /* Writeback failed, put entry back on LRU */ spin_lock(&pool->lru_lock); list_move(&entry->lru, &pool->lru); spin_unlock(&pool->lru_lock); goto put_unlock; } /* * Writeback started successfully, the page now belongs to the * swapcache. Drop the entry from zswap - unless invalidate already * took it out while we had the tree->lock released for IO. */ zswap_invalidate_entry(tree, entry); put_unlock: /* Drop local reference */ zswap_entry_put(tree, entry); unlock: spin_unlock(&tree->lock); return ret ? -EAGAIN : 0; } static void shrink_worker(struct work_struct *w) { struct zswap_pool *pool = container_of(w, typeof(*pool), shrink_work); int ret, failures = 0; do { ret = zswap_reclaim_entry(pool); if (ret) { zswap_reject_reclaim_fail++; if (ret != -EAGAIN) break; if (++failures == MAX_RECLAIM_RETRIES) break; } cond_resched(); } while (!zswap_can_accept()); zswap_pool_put(pool); } static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { int i; struct zswap_pool *pool; char name[38]; /* 'zswap' + 32 char (max) num + \0 */ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; int ret; if (!zswap_has_pool) { /* if either are unset, pool initialization failed, and we * need both params to be set correctly before trying to * create a pool. */ if (!strcmp(type, ZSWAP_PARAM_UNSET)) return NULL; if (!strcmp(compressor, ZSWAP_PARAM_UNSET)) return NULL; } pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) { /* unique name for each pool specifically required by zsmalloc */ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); pool->zpools[i] = zpool_create_pool(type, name, gfp); if (!pool->zpools[i]) { pr_err("%s zpool not available\n", type); goto error; } } pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0])); strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx); if (!pool->acomp_ctx) { pr_err("percpu alloc failed\n"); goto error; } ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); if (ret) goto error; pr_debug("using %s compressor\n", pool->tfm_name); /* being the current pool takes 1 ref; this func expects the * caller to always add the new pool as the current pool */ kref_init(&pool->kref); INIT_LIST_HEAD(&pool->list); INIT_LIST_HEAD(&pool->lru); spin_lock_init(&pool->lru_lock); INIT_WORK(&pool->shrink_work, shrink_worker); zswap_pool_debug("created", pool); return pool; error: if (pool->acomp_ctx) free_percpu(pool->acomp_ctx); while (i--) zpool_destroy_pool(pool->zpools[i]); kfree(pool); return NULL; } static struct zswap_pool *__zswap_pool_create_fallback(void) { bool has_comp, has_zpool; has_comp = crypto_has_acomp(zswap_compressor, 0, 0); if (!has_comp && strcmp(zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) { pr_err("compressor %s not available, using default %s\n", zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT); param_free_charp(&zswap_compressor); zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT; has_comp = crypto_has_acomp(zswap_compressor, 0, 0); } if (!has_comp) { pr_err("default compressor %s not available\n", zswap_compressor); param_free_charp(&zswap_compressor); zswap_compressor = ZSWAP_PARAM_UNSET; } has_zpool = zpool_has_pool(zswap_zpool_type); if (!has_zpool && strcmp(zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT)) { pr_err("zpool %s not available, using default %s\n", zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT); param_free_charp(&zswap_zpool_type); zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT; has_zpool = zpool_has_pool(zswap_zpool_type); } if (!has_zpool) { pr_err("default zpool %s not available\n", zswap_zpool_type); param_free_charp(&zswap_zpool_type); zswap_zpool_type = ZSWAP_PARAM_UNSET; } if (!has_comp || !has_zpool) return NULL; return zswap_pool_create(zswap_zpool_type, zswap_compressor); } static void zswap_pool_destroy(struct zswap_pool *pool) { int i; zswap_pool_debug("destroying", pool); cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); free_percpu(pool->acomp_ctx); for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) zpool_destroy_pool(pool->zpools[i]); kfree(pool); } static int __must_check zswap_pool_get(struct zswap_pool *pool) { if (!pool) return 0; return kref_get_unless_zero(&pool->kref); } static void __zswap_pool_release(struct work_struct *work) { struct zswap_pool *pool = container_of(work, typeof(*pool), release_work); synchronize_rcu(); /* nobody should have been able to get a kref... */ WARN_ON(kref_get_unless_zero(&pool->kref)); /* pool is now off zswap_pools list and has no references. */ zswap_pool_destroy(pool); } static void __zswap_pool_empty(struct kref *kref) { struct zswap_pool *pool; pool = container_of(kref, typeof(*pool), kref); spin_lock(&zswap_pools_lock); WARN_ON(pool == zswap_pool_current()); list_del_rcu(&pool->list); INIT_WORK(&pool->release_work, __zswap_pool_release); schedule_work(&pool->release_work); spin_unlock(&zswap_pools_lock); } static void zswap_pool_put(struct zswap_pool *pool) { kref_put(&pool->kref, __zswap_pool_empty); } /********************************* * param callbacks **********************************/ static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) { /* no change required */ if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) return false; return true; } /* val must be a null-terminated string */ static int __zswap_param_set(const char *val, const struct kernel_param *kp, char *type, char *compressor) { struct zswap_pool *pool, *put_pool = NULL; char *s = strstrip((char *)val); int ret = 0; bool new_pool = false; mutex_lock(&zswap_init_lock); switch (zswap_init_state) { case ZSWAP_UNINIT: /* if this is load-time (pre-init) param setting, * don't create a pool; that's done during init. */ ret = param_set_charp(s, kp); break; case ZSWAP_INIT_SUCCEED: new_pool = zswap_pool_changed(s, kp); break; case ZSWAP_INIT_FAILED: pr_err("can't set param, initialization failed\n"); ret = -ENODEV; } mutex_unlock(&zswap_init_lock); /* no need to create a new pool, return directly */ if (!new_pool) return ret; if (!type) { if (!zpool_has_pool(s)) { pr_err("zpool %s not available\n", s); return -ENOENT; } type = s; } else if (!compressor) { if (!crypto_has_acomp(s, 0, 0)) { pr_err("compressor %s not available\n", s); return -ENOENT; } compressor = s; } else { WARN_ON(1); return -EINVAL; } spin_lock(&zswap_pools_lock); pool = zswap_pool_find_get(type, compressor); if (pool) { zswap_pool_debug("using existing", pool); WARN_ON(pool == zswap_pool_current()); list_del_rcu(&pool->list); } spin_unlock(&zswap_pools_lock); if (!pool) pool = zswap_pool_create(type, compressor); if (pool) ret = param_set_charp(s, kp); else ret = -EINVAL; spin_lock(&zswap_pools_lock); if (!ret) { put_pool = zswap_pool_current(); list_add_rcu(&pool->list, &zswap_pools); zswap_has_pool = true; } else if (pool) { /* add the possibly pre-existing pool to the end of the pools * list; if it's new (and empty) then it'll be removed and * destroyed by the put after we drop the lock */ list_add_tail_rcu(&pool->list, &zswap_pools); put_pool = pool; } spin_unlock(&zswap_pools_lock); if (!zswap_has_pool && !pool) { /* if initial pool creation failed, and this pool creation also * failed, maybe both compressor and zpool params were bad. * Allow changing this param, so pool creation will succeed * when the other param is changed. We already verified this * param is ok in the zpool_has_pool() or crypto_has_acomp() * checks above. */ ret = param_set_charp(s, kp); } /* drop the ref from either the old current pool, * or the new pool we failed to add */ if (put_pool) zswap_pool_put(put_pool); return ret; } static int zswap_compressor_param_set(const char *val, const struct kernel_param *kp) { return __zswap_param_set(val, kp, zswap_zpool_type, NULL); } static int zswap_zpool_param_set(const char *val, const struct kernel_param *kp) { return __zswap_param_set(val, kp, NULL, zswap_compressor); } static int zswap_enabled_param_set(const char *val, const struct kernel_param *kp) { int ret = -ENODEV; /* if this is load-time (pre-init) param setting, only set param. */ if (system_state != SYSTEM_RUNNING) return param_set_bool(val, kp); mutex_lock(&zswap_init_lock); switch (zswap_init_state) { case ZSWAP_UNINIT: if (zswap_setup()) break; fallthrough; case ZSWAP_INIT_SUCCEED: if (!zswap_has_pool) pr_err("can't enable, no pool configured\n"); else ret = param_set_bool(val, kp); break; case ZSWAP_INIT_FAILED: pr_err("can't enable, initialization failed\n"); } mutex_unlock(&zswap_init_lock); return ret; } /********************************* * writeback code **********************************/ /* * Attempts to free an entry by adding a page to the swap cache, * decompressing the entry data into the page, and issuing a * bio write to write the page back to the swap device. * * This can be thought of as a "resumed writeback" of the page * to the swap device. We are basically resuming the same swap * writeback path that was intercepted with the zswap_store() * in the first place. After the page has been decompressed into * the swap cache, the compressed version stored by zswap can be * freed. */ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree) { swp_entry_t swpentry = entry->swpentry; struct page *page; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; struct zpool *pool = zswap_find_zpool(entry); bool page_was_allocated; u8 *src, *tmp = NULL; unsigned int dlen; int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; if (!zpool_can_sleep_mapped(pool)) { tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; } /* try to allocate swap cache page */ page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0, &page_was_allocated); if (!page) { ret = -ENOMEM; goto fail; } /* Found an existing page, we raced with load/swapin */ if (!page_was_allocated) { put_page(page); ret = -EEXIST; goto fail; } /* * Page is locked, and the swapcache is now secured against * concurrent swapping to and from the slot. Verify that the * swap entry hasn't been invalidated and recycled behind our * backs (our zswap_entry reference doesn't prevent that), to * avoid overwriting a new swap page with old compressed data. */ spin_lock(&tree->lock); if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { spin_unlock(&tree->lock); delete_from_swap_cache(page_folio(page)); ret = -ENOMEM; goto fail; } spin_unlock(&tree->lock); /* decompress */ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(pool)) { memcpy(tmp, src, entry->length); src = tmp; zpool_unmap_handle(pool, entry->handle); } mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); sg_set_page(&output, page, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); dlen = acomp_ctx->req->dlen; mutex_unlock(acomp_ctx->mutex); if (!zpool_can_sleep_mapped(pool)) kfree(tmp); else zpool_unmap_handle(pool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); /* page is up to date */ SetPageUptodate(page); /* move it to the tail of the inactive list after end_writeback */ SetPageReclaim(page); /* start writeback */ __swap_writepage(page, &wbc); put_page(page); zswap_written_back_pages++; return ret; fail: if (!zpool_can_sleep_mapped(pool)) kfree(tmp); /* * If we get here because the page is already in swapcache, a * load may be happening concurrently. It is safe and okay to * not free the entry. It is also okay to return !0. */ return ret; } static int zswap_is_page_same_filled(void *ptr, unsigned long *value) { unsigned long *page; unsigned long val; unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; page = (unsigned long *)ptr; val = page[0]; if (val != page[last_pos]) return 0; for (pos = 1; pos < last_pos; pos++) { if (val != page[pos]) return 0; } *value = val; return 1; } static void zswap_fill_page(void *ptr, unsigned long value) { unsigned long *page; page = (unsigned long *)ptr; memset_l(page, value, PAGE_SIZE / sizeof(unsigned long)); } bool zswap_store(struct folio *folio) { swp_entry_t swp = folio->swap; int type = swp_type(swp); pgoff_t offset = swp_offset(swp); struct page *page = &folio->page; struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; struct obj_cgroup *objcg = NULL; struct zswap_pool *pool; struct zpool *zpool; unsigned int dlen = PAGE_SIZE; unsigned long handle, value; char *buf; u8 *src, *dst; gfp_t gfp; int ret; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); /* Large folios aren't supported */ if (folio_test_large(folio)) return false; if (!zswap_enabled || !tree) return false; /* * XXX: zswap reclaim does not work with cgroups yet. Without a * cgroup-aware entry LRU, we will push out entries system-wide based on * local cgroup limits. */ objcg = get_obj_cgroup_from_folio(folio); if (objcg && !obj_cgroup_may_zswap(objcg)) goto reject; /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; zswap_pool_reached_full = true; goto shrink; } if (zswap_pool_reached_full) { if (!zswap_can_accept()) goto shrink; else zswap_pool_reached_full = false; } /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL); if (!entry) { zswap_reject_kmemcache_fail++; goto reject; } if (zswap_same_filled_pages_enabled) { src = kmap_atomic(page); if (zswap_is_page_same_filled(src, &value)) { kunmap_atomic(src); entry->swpentry = swp_entry(type, offset); entry->length = 0; entry->value = value; atomic_inc(&zswap_same_filled_pages); goto insert_entry; } kunmap_atomic(src); } if (!zswap_non_same_filled_pages_enabled) goto freepage; /* if entry is successfully added, it keeps the reference */ entry->pool = zswap_pool_current_get(); if (!entry->pool) goto freepage; /* compress */ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); mutex_lock(acomp_ctx->mutex); dst = acomp_ctx->dstmem; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0); /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */ sg_init_one(&output, dst, PAGE_SIZE * 2); acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); /* * it maybe looks a little bit silly that we send an asynchronous request, * then wait for its completion synchronously. This makes the process look * synchronous in fact. * Theoretically, acomp supports users send multiple acomp requests in one * acomp instance, then get those requests done simultaneously. but in this * case, zswap actually does store and load page by page, there is no * existing method to send the second page before the first page is done * in one thread doing zwap. * but in different threads running on different cpu, we have different * acomp instance, so multiple threads can do (de)compression in parallel. */ ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); dlen = acomp_ctx->req->dlen; if (ret) goto put_dstmem; /* store */ zpool = zswap_find_zpool(entry); gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (zpool_malloc_support_movable(zpool)) gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; ret = zpool_malloc(zpool, dlen, gfp, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto put_dstmem; } if (ret) { zswap_reject_alloc_fail++; goto put_dstmem; } buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); memcpy(buf, dst, dlen); zpool_unmap_handle(zpool, handle); mutex_unlock(acomp_ctx->mutex); /* populate entry */ entry->swpentry = swp_entry(type, offset); entry->handle = handle; entry->length = dlen; insert_entry: entry->objcg = objcg; if (objcg) { obj_cgroup_charge_zswap(objcg, entry->length); /* Account before objcg ref is moved to tree */ count_objcg_event(objcg, ZSWPOUT); } /* map */ spin_lock(&tree->lock); while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { zswap_duplicate_entry++; zswap_invalidate_entry(tree, dupentry); } if (entry->length) { spin_lock(&entry->pool->lru_lock); list_add(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); } spin_unlock(&tree->lock); /* update stats */ atomic_inc(&zswap_stored_pages); zswap_update_total_size(); count_vm_event(ZSWPOUT); return true; put_dstmem: mutex_unlock(acomp_ctx->mutex); zswap_pool_put(entry->pool); freepage: zswap_entry_cache_free(entry); reject: if (objcg) obj_cgroup_put(objcg); return false; shrink: pool = zswap_pool_last_get(); if (pool) queue_work(shrink_wq, &pool->shrink_work); goto reject; } bool zswap_load(struct folio *folio) { swp_entry_t swp = folio->swap; int type = swp_type(swp); pgoff_t offset = swp_offset(swp); struct page *page = &folio->page; struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; u8 *src, *dst, *tmp; struct zpool *zpool; unsigned int dlen; bool ret; VM_WARN_ON_ONCE(!folio_test_locked(folio)); /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); if (!entry) { spin_unlock(&tree->lock); return false; } spin_unlock(&tree->lock); if (!entry->length) { dst = kmap_atomic(page); zswap_fill_page(dst, entry->value); kunmap_atomic(dst); ret = true; goto stats; } zpool = zswap_find_zpool(entry); if (!zpool_can_sleep_mapped(zpool)) { tmp = kmalloc(entry->length, GFP_KERNEL); if (!tmp) { ret = false; goto freeentry; } } /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(zpool)) { memcpy(tmp, src, entry->length); src = tmp; zpool_unmap_handle(zpool, entry->handle); } acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); sg_set_page(&output, page, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); if (crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)) WARN_ON(1); mutex_unlock(acomp_ctx->mutex); if (zpool_can_sleep_mapped(zpool)) zpool_unmap_handle(zpool, entry->handle); else kfree(tmp); ret = true; stats: count_vm_event(ZSWPIN); if (entry->objcg) count_objcg_event(entry->objcg, ZSWPIN); freeentry: spin_lock(&tree->lock); if (ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); folio_mark_dirty(folio); } else if (entry->length) { spin_lock(&entry->pool->lru_lock); list_move(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); } zswap_entry_put(tree, entry); spin_unlock(&tree->lock); return ret; } void zswap_invalidate(int type, pgoff_t offset) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; /* find */ spin_lock(&tree->lock); entry = zswap_rb_search(&tree->rbroot, offset); if (!entry) { /* entry was written back */ spin_unlock(&tree->lock); return; } zswap_invalidate_entry(tree, entry); spin_unlock(&tree->lock); } void zswap_swapon(int type) { struct zswap_tree *tree; tree = kzalloc(sizeof(*tree), GFP_KERNEL); if (!tree) { pr_err("alloc failed, zswap disabled for swap type %d\n", type); return; } tree->rbroot = RB_ROOT; spin_lock_init(&tree->lock); zswap_trees[type] = tree; } void zswap_swapoff(int type) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *n; if (!tree) return; /* walk the tree and free everything */ spin_lock(&tree->lock); rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode) zswap_free_entry(entry); tree->rbroot = RB_ROOT; spin_unlock(&tree->lock); kfree(tree); zswap_trees[type] = NULL; } /********************************* * debugfs functions **********************************/ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> static struct dentry *zswap_debugfs_root; static int zswap_debugfs_init(void) { if (!debugfs_initialized()) return -ENODEV; zswap_debugfs_root = debugfs_create_dir("zswap", NULL); debugfs_create_u64("pool_limit_hit", 0444, zswap_debugfs_root, &zswap_pool_limit_hit); debugfs_create_u64("reject_reclaim_fail", 0444, zswap_debugfs_root, &zswap_reject_reclaim_fail); debugfs_create_u64("reject_alloc_fail", 0444, zswap_debugfs_root, &zswap_reject_alloc_fail); debugfs_create_u64("reject_kmemcache_fail", 0444, zswap_debugfs_root, &zswap_reject_kmemcache_fail); debugfs_create_u64("reject_compress_poor", 0444, zswap_debugfs_root, &zswap_reject_compress_poor); debugfs_create_u64("written_back_pages", 0444, zswap_debugfs_root, &zswap_written_back_pages); debugfs_create_u64("duplicate_entry", 0444, zswap_debugfs_root, &zswap_duplicate_entry); debugfs_create_u64("pool_total_size", 0444, zswap_debugfs_root, &zswap_pool_total_size); debugfs_create_atomic_t("stored_pages", 0444, zswap_debugfs_root, &zswap_stored_pages); debugfs_create_atomic_t("same_filled_pages", 0444, zswap_debugfs_root, &zswap_same_filled_pages); return 0; } #else static int zswap_debugfs_init(void) { return 0; } #endif /********************************* * module init and exit **********************************/ static int zswap_setup(void) { struct zswap_pool *pool; int ret; zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); if (!zswap_entry_cache) { pr_err("entry cache creation failed\n"); goto cache_fail; } ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare", zswap_dstmem_prepare, zswap_dstmem_dead); if (ret) { pr_err("dstmem alloc failed\n"); goto dstmem_fail; } ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, "mm/zswap_pool:prepare", zswap_cpu_comp_prepare, zswap_cpu_comp_dead); if (ret) goto hp_fail; pool = __zswap_pool_create_fallback(); if (pool) { pr_info("loaded using pool %s/%s\n", pool->tfm_name, zpool_get_type(pool->zpools[0])); list_add(&pool->list, &zswap_pools); zswap_has_pool = true; } else { pr_err("pool creation failed\n"); zswap_enabled = false; } shrink_wq = create_workqueue("zswap-shrink"); if (!shrink_wq) goto fallback_fail; if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); zswap_init_state = ZSWAP_INIT_SUCCEED; return 0; fallback_fail: if (pool) zswap_pool_destroy(pool); hp_fail: cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); dstmem_fail: kmem_cache_destroy(zswap_entry_cache); cache_fail: /* if built-in, we aren't unloaded on failure; don't allow use */ zswap_init_state = ZSWAP_INIT_FAILED; zswap_enabled = false; return -ENOMEM; } static int __init zswap_init(void) { if (!zswap_enabled) return 0; return zswap_setup(); } /* must be late so crypto has time to come up */ late_initcall(zswap_init); MODULE_AUTHOR("Seth Jennings <[email protected]>"); MODULE_DESCRIPTION("Compressed cache for swap pages");
linux-master
mm/zswap.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/page_isolation.c */ #include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> #include <linux/memory.h> #include <linux/hugetlb.h> #include <linux/page_owner.h> #include <linux/migrate.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/page_isolation.h> /* * This function checks whether the range [start_pfn, end_pfn) includes * unmovable pages or not. The range must fall into a single pageblock and * consequently belong to a single zone. * * PageLRU check without isolation or lru_lock could race so that * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable * check without lock_page also may miss some movable non-lru pages at * race condition. So you can't expect this function should be exact. * * Returns a page without holding a reference. If the caller wants to * dereference that page (e.g., dumping), it has to make sure that it * cannot get removed (e.g., via memory unplug) concurrently. * */ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn, int migratetype, int flags) { struct page *page = pfn_to_page(start_pfn); struct zone *zone = page_zone(page); unsigned long pfn; VM_BUG_ON(pageblock_start_pfn(start_pfn) != pageblock_start_pfn(end_pfn - 1)); if (is_migrate_cma_page(page)) { /* * CMA allocations (alloc_contig_range) really need to mark * isolate CMA pageblocks even when they are not movable in fact * so consider them movable here. */ if (is_migrate_cma(migratetype)) return NULL; return page; } for (pfn = start_pfn; pfn < end_pfn; pfn++) { page = pfn_to_page(pfn); /* * Both, bootmem allocations and memory holes are marked * PG_reserved and are unmovable. We can even have unmovable * allocations inside ZONE_MOVABLE, for example when * specifying "movablecore". */ if (PageReserved(page)) return page; /* * If the zone is movable and we have ruled out all reserved * pages then it should be reasonably safe to assume the rest * is movable. */ if (zone_idx(zone) == ZONE_MOVABLE) continue; /* * Hugepages are not in LRU lists, but they're movable. * THPs are on the LRU, but need to be counted as #small pages. * We need not scan over tail pages because we don't * handle each tail page individually in migration. */ if (PageHuge(page) || PageTransCompound(page)) { struct folio *folio = page_folio(page); unsigned int skip_pages; if (PageHuge(page)) { if (!hugepage_migration_supported(folio_hstate(folio))) return page; } else if (!folio_test_lru(folio) && !__folio_test_movable(folio)) { return page; } skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page); pfn += skip_pages - 1; continue; } /* * We can't use page_count without pin a page * because another CPU can free compound page. * This check already skips compound tails of THP * because their page->_refcount is zero at all time. */ if (!page_ref_count(page)) { if (PageBuddy(page)) pfn += (1 << buddy_order(page)) - 1; continue; } /* * The HWPoisoned page may be not in buddy system, and * page_count() is not 0. */ if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) continue; /* * We treat all PageOffline() pages as movable when offlining * to give drivers a chance to decrement their reference count * in MEM_GOING_OFFLINE in order to indicate that these pages * can be offlined as there are no direct references anymore. * For actually unmovable PageOffline() where the driver does * not support this, we will fail later when trying to actually * move these pages that still have a reference count > 0. * (false negatives in this function only) */ if ((flags & MEMORY_OFFLINE) && PageOffline(page)) continue; if (__PageMovable(page) || PageLRU(page)) continue; /* * If there are RECLAIMABLE pages, we need to check * it. But now, memory offline itself doesn't call * shrink_node_slabs() and it still to be fixed. */ return page; } return NULL; } /* * This function set pageblock migratetype to isolate if no unmovable page is * present in [start_pfn, end_pfn). The pageblock must intersect with * [start_pfn, end_pfn). */ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags, unsigned long start_pfn, unsigned long end_pfn) { struct zone *zone = page_zone(page); struct page *unmovable; unsigned long flags; unsigned long check_unmovable_start, check_unmovable_end; spin_lock_irqsave(&zone->lock, flags); /* * We assume the caller intended to SET migrate type to isolate. * If it is already set, then someone else must have raced and * set it before us. */ if (is_migrate_isolate_page(page)) { spin_unlock_irqrestore(&zone->lock, flags); return -EBUSY; } /* * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * We just check MOVABLE pages. * * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock * to avoid redundant checks. */ check_unmovable_start = max(page_to_pfn(page), start_pfn); check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)), end_pfn); unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, migratetype, isol_flags); if (!unmovable) { unsigned long nr_pages; int mt = get_pageblock_migratetype(page); set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, NULL); __mod_zone_freepage_state(zone, -nr_pages, mt); spin_unlock_irqrestore(&zone->lock, flags); return 0; } spin_unlock_irqrestore(&zone->lock, flags); if (isol_flags & REPORT_FAILURE) { /* * printk() with zone->lock held will likely trigger a * lockdep splat, so defer it here. */ dump_page(unmovable, "unmovable page"); } return -EBUSY; } static void unset_migratetype_isolate(struct page *page, int migratetype) { struct zone *zone; unsigned long flags, nr_pages; bool isolated_page = false; unsigned int order; struct page *buddy; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (!is_migrate_isolate_page(page)) goto out; /* * Because freepage with more than pageblock_order on isolated * pageblock is restricted to merge due to freepage counting problem, * it is possible that there is free buddy page. * move_freepages_block() doesn't care of merge so we need other * approach in order to merge them. Isolation and free will make * these pages to be merged. */ if (PageBuddy(page)) { order = buddy_order(page); if (order >= pageblock_order && order < MAX_ORDER) { buddy = find_buddy_page_pfn(page, page_to_pfn(page), order, NULL); if (buddy && !is_migrate_isolate_page(buddy)) { isolated_page = !!__isolate_free_page(page, order); /* * Isolating a free page in an isolated pageblock * is expected to always work as watermarks don't * apply here. */ VM_WARN_ON(!isolated_page); } } } /* * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. * * We didn't actually touch any of the isolated pages, so place them * to the tail of the freelist. This is an optimization for memory * onlining - just onlined memory won't immediately be considered for * allocation. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype, NULL); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); if (isolated_page) __putback_isolated_page(page, order, migratetype); zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); } static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; for (i = 0; i < nr_pages; i++) { struct page *page; page = pfn_to_online_page(pfn + i); if (!page) continue; return page; } return NULL; } /** * isolate_single_pageblock() -- tries to isolate a pageblock that might be * within a free or in-use page. * @boundary_pfn: pageblock-aligned pfn that a page might cross * @flags: isolation flags * @gfp_flags: GFP flags used for migrating pages * @isolate_before: isolate the pageblock before the boundary_pfn * @skip_isolation: the flag to skip the pageblock isolation in second * isolate_single_pageblock() * @migratetype: migrate type to set in error recovery. * * Free and in-use pages can be as big as MAX_ORDER and contain more than one * pageblock. When not all pageblocks within a page are isolated at the same * time, free page accounting can go wrong. For example, in the case of * MAX_ORDER = pageblock_order + 1, a MAX_ORDER page has two pagelbocks. * [ MAX_ORDER ] * [ pageblock0 | pageblock1 ] * When either pageblock is isolated, if it is a free page, the page is not * split into separate migratetype lists, which is supposed to; if it is an * in-use page and freed later, __free_one_page() does not split the free page * either. The function handles this by splitting the free page or migrating * the in-use page then splitting the free page. */ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, gfp_t gfp_flags, bool isolate_before, bool skip_isolation, int migratetype) { unsigned long start_pfn; unsigned long isolate_pageblock; unsigned long pfn; struct zone *zone; int ret; VM_BUG_ON(!pageblock_aligned(boundary_pfn)); if (isolate_before) isolate_pageblock = boundary_pfn - pageblock_nr_pages; else isolate_pageblock = boundary_pfn; /* * scan at the beginning of MAX_ORDER_NR_PAGES aligned range to avoid * only isolating a subset of pageblocks from a bigger than pageblock * free or in-use page. Also make sure all to-be-isolated pageblocks * are within the same zone. */ zone = page_zone(pfn_to_page(isolate_pageblock)); start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES), zone->zone_start_pfn); if (skip_isolation) { int mt __maybe_unused = get_pageblock_migratetype(pfn_to_page(isolate_pageblock)); VM_BUG_ON(!is_migrate_isolate(mt)); } else { ret = set_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype, flags, isolate_pageblock, isolate_pageblock + pageblock_nr_pages); if (ret) return ret; } /* * Bail out early when the to-be-isolated pageblock does not form * a free or in-use page across boundary_pfn: * * 1. isolate before boundary_pfn: the page after is not online * 2. isolate after boundary_pfn: the page before is not online * * This also ensures correctness. Without it, when isolate after * boundary_pfn and [start_pfn, boundary_pfn) are not online, * __first_valid_page() will return unexpected NULL in the for loop * below. */ if (isolate_before) { if (!pfn_to_online_page(boundary_pfn)) return 0; } else { if (!pfn_to_online_page(boundary_pfn - 1)) return 0; } for (pfn = start_pfn; pfn < boundary_pfn;) { struct page *page = __first_valid_page(pfn, boundary_pfn - pfn); VM_BUG_ON(!page); pfn = page_to_pfn(page); /* * start_pfn is MAX_ORDER_NR_PAGES aligned, if there is any * free pages in [start_pfn, boundary_pfn), its head page will * always be in the range. */ if (PageBuddy(page)) { int order = buddy_order(page); if (pfn + (1UL << order) > boundary_pfn) { /* free page changed before split, check it again */ if (split_free_page(page, order, boundary_pfn - pfn)) continue; } pfn += 1UL << order; continue; } /* * migrate compound pages then let the free page handling code * above do the rest. If migration is not possible, just fail. */ if (PageCompound(page)) { struct page *head = compound_head(page); unsigned long head_pfn = page_to_pfn(head); unsigned long nr_pages = compound_nr(head); if (head_pfn + nr_pages <= boundary_pfn) { pfn = head_pfn + nr_pages; continue; } #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* * hugetlb, lru compound (THP), and movable compound pages * can be migrated. Otherwise, fail the isolation. */ if (PageHuge(page) || PageLRU(page) || __PageMovable(page)) { int order; unsigned long outer_pfn; int page_mt = get_pageblock_migratetype(page); bool isolate_page = !is_migrate_isolate_page(page); struct compact_control cc = { .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(head_pfn)), .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .no_set_skip_hint = true, .gfp_mask = gfp_flags, .alloc_contig = true, }; INIT_LIST_HEAD(&cc.migratepages); /* * XXX: mark the page as MIGRATE_ISOLATE so that * no one else can grab the freed page after migration. * Ideally, the page should be freed as two separate * pages to be added into separate migratetype free * lists. */ if (isolate_page) { ret = set_migratetype_isolate(page, page_mt, flags, head_pfn, head_pfn + nr_pages); if (ret) goto failed; } ret = __alloc_contig_migrate_range(&cc, head_pfn, head_pfn + nr_pages); /* * restore the page's migratetype so that it can * be split into separate migratetype free lists * later. */ if (isolate_page) unset_migratetype_isolate(page, page_mt); if (ret) goto failed; /* * reset pfn to the head of the free page, so * that the free page handling code above can split * the free page to the right migratetype list. * * head_pfn is not used here as a hugetlb page order * can be bigger than MAX_ORDER, but after it is * freed, the free page order is not. Use pfn within * the range to find the head of the free page. */ order = 0; outer_pfn = pfn; while (!PageBuddy(pfn_to_page(outer_pfn))) { /* stop if we cannot find the free page */ if (++order > MAX_ORDER) goto failed; outer_pfn &= ~0UL << order; } pfn = outer_pfn; continue; } else #endif goto failed; } pfn++; } return 0; failed: /* restore the original migratetype */ if (!skip_isolation) unset_migratetype_isolate(pfn_to_page(isolate_pageblock), migratetype); return -EBUSY; } /** * start_isolate_page_range() - mark page range MIGRATE_ISOLATE * @start_pfn: The first PFN of the range to be isolated. * @end_pfn: The last PFN of the range to be isolated. * @migratetype: Migrate type to set in error recovery. * @flags: The following flags are allowed (they can be combined in * a bit mask) * MEMORY_OFFLINE - isolate to offline (!allocate) memory * e.g., skip over PageHWPoison() pages * and PageOffline() pages. * REPORT_FAILURE - report details about the failure to * isolate the range * @gfp_flags: GFP flags used for migrating pages that sit across the * range boundaries. * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the * future will not be allocated again. If specified range includes migrate types * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all * pages in the range finally, the caller have to free all pages in the range. * test_page_isolated() can be used for test it. * * The function first tries to isolate the pageblocks at the beginning and end * of the range, since there might be pages across the range boundaries. * Afterwards, it isolates the rest of the range. * * There is no high level synchronization mechanism that prevents two threads * from trying to isolate overlapping ranges. If this happens, one thread * will notice pageblocks in the overlapping range already set to isolate. * This happens in set_migratetype_isolate, and set_migratetype_isolate * returns an error. We then clean up by restoring the migration type on * pageblocks we may have modified and return -EBUSY to caller. This * prevents two threads from simultaneously working on overlapping ranges. * * Please note that there is no strong synchronization with the page allocator * either. Pages might be freed while their page blocks are marked ISOLATED. * A call to drain_all_pages() after isolation can flush most of them. However * in some cases pages might still end up on pcp lists and that would allow * for their allocation even when they are in fact isolated already. Depending * on how strong of a guarantee the caller needs, zone_pcp_disable/enable() * might be used to flush and disable pcplist before isolation and enable after * unisolation. * * Return: 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype, int flags, gfp_t gfp_flags) { unsigned long pfn; struct page *page; /* isolation is done at page block granularity */ unsigned long isolate_start = pageblock_start_pfn(start_pfn); unsigned long isolate_end = pageblock_align(end_pfn); int ret; bool skip_isolation = false; /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */ ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, skip_isolation, migratetype); if (ret) return ret; if (isolate_start == isolate_end - pageblock_nr_pages) skip_isolation = true; /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */ ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, skip_isolation, migratetype); if (ret) { unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype); return ret; } /* skip isolated pageblocks at the beginning and end */ for (pfn = isolate_start + pageblock_nr_pages; pfn < isolate_end - pageblock_nr_pages; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && set_migratetype_isolate(page, migratetype, flags, start_pfn, end_pfn)) { undo_isolate_page_range(isolate_start, pfn, migratetype); unset_migratetype_isolate( pfn_to_page(isolate_end - pageblock_nr_pages), migratetype); return -EBUSY; } } return 0; } /** * undo_isolate_page_range - undo effects of start_isolate_page_range() * @start_pfn: The first PFN of the isolated range * @end_pfn: The last PFN of the isolated range * @migratetype: New migrate type to set on the range * * This finds every MIGRATE_ISOLATE page block in the given range * and switches it to @migratetype. */ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype) { unsigned long pfn; struct page *page; unsigned long isolate_start = pageblock_start_pfn(start_pfn); unsigned long isolate_end = pageblock_align(end_pfn); for (pfn = isolate_start; pfn < isolate_end; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (!page || !is_migrate_isolate_page(page)) continue; unset_migratetype_isolate(page, migratetype); } } /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * * Returns the last tested pfn. */ static unsigned long __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, int flags) { struct page *page; while (pfn < end_pfn) { page = pfn_to_page(pfn); if (PageBuddy(page)) /* * If the page is on a free list, it has to be on * the correct MIGRATE_ISOLATE freelist. There is no * simple way to verify that as VM_BUG_ON(), though. */ pfn += 1 << buddy_order(page); else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) /* A HWPoisoned page cannot be also PageBuddy */ pfn++; else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && !page_count(page)) /* * The responsible driver agreed to skip PageOffline() * pages when offlining memory by dropping its * reference in MEM_GOING_OFFLINE. */ pfn++; else break; } return pfn; } /** * test_pages_isolated - check if pageblocks in range are isolated * @start_pfn: The first PFN of the isolated range * @end_pfn: The first PFN *after* the isolated range * @isol_flags: Testing mode flags * * This tests if all in the specified range are free. * * If %MEMORY_OFFLINE is specified in @flags, it will consider * poisoned and offlined pages free as well. * * Caller must ensure the requested range doesn't span zones. * * Returns 0 if true, -EBUSY if one or more pages are in use. */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags) { unsigned long pfn, flags; struct page *page; struct zone *zone; int ret; /* * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages * are not aligned to pageblock_nr_pages. * Then we just check migratetype first. */ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); if (page && !is_migrate_isolate_page(page)) break; } page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) { ret = -EBUSY; goto out; } /* Check all pages are free or marked as ISOLATED */ zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); spin_unlock_irqrestore(&zone->lock, flags); ret = pfn < end_pfn ? -EBUSY : 0; out: trace_test_pages_isolated(start_pfn, end_pfn, pfn); return ret; }
linux-master
mm/page_isolation.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/readahead.c - address_space-level file readahead. * * Copyright (C) 2002, Linus Torvalds * * 09Apr2002 Andrew Morton * Initial version. */ /** * DOC: Readahead Overview * * Readahead is used to read content into the page cache before it is * explicitly requested by the application. Readahead only ever * attempts to read folios that are not yet in the page cache. If a * folio is present but not up-to-date, readahead will not try to read * it. In that case a simple ->read_folio() will be requested. * * Readahead is triggered when an application read request (whether a * system call or a page fault) finds that the requested folio is not in * the page cache, or that it is in the page cache and has the * readahead flag set. This flag indicates that the folio was read * as part of a previous readahead request and now that it has been * accessed, it is time for the next readahead. * * Each readahead request is partly synchronous read, and partly async * readahead. This is reflected in the struct file_ra_state which * contains ->size being the total number of pages, and ->async_size * which is the number of pages in the async section. The readahead * flag will be set on the first folio in this async section to trigger * a subsequent readahead. Once a series of sequential reads has been * established, there should be no need for a synchronous component and * all readahead request will be fully asynchronous. * * When either of the triggers causes a readahead, three numbers need * to be determined: the start of the region to read, the size of the * region, and the size of the async tail. * * The start of the region is simply the first page address at or after * the accessed address, which is not currently populated in the page * cache. This is found with a simple search in the page cache. * * The size of the async tail is determined by subtracting the size that * was explicitly requested from the determined request size, unless * this would be less than zero - then zero is used. NOTE THIS * CALCULATION IS WRONG WHEN THE START OF THE REGION IS NOT THE ACCESSED * PAGE. ALSO THIS CALCULATION IS NOT USED CONSISTENTLY. * * The size of the region is normally determined from the size of the * previous readahead which loaded the preceding pages. This may be * discovered from the struct file_ra_state for simple sequential reads, * or from examining the state of the page cache when multiple * sequential reads are interleaved. Specifically: where the readahead * was triggered by the readahead flag, the size of the previous * readahead is assumed to be the number of pages from the triggering * page to the start of the new readahead. In these cases, the size of * the previous readahead is scaled, often doubled, for the new * readahead, though see get_next_ra_size() for details. * * If the size of the previous read cannot be determined, the number of * preceding pages in the page cache is used to estimate the size of * a previous read. This estimate could easily be misled by random * reads being coincidentally adjacent, so it is ignored unless it is * larger than the current request, and it is not scaled up, unless it * is at the start of file. * * In general readahead is accelerated at the start of the file, as * reads from there are often sequential. There are other minor * adjustments to the readahead size in various special cases and these * are best discovered by reading the code. * * The above calculation, based on the previous readahead size, * determines the size of the readahead, to which any requested read * size may be added. * * Readahead requests are sent to the filesystem using the ->readahead() * address space operation, for which mpage_readahead() is a canonical * implementation. ->readahead() should normally initiate reads on all * folios, but may fail to read any or all folios without causing an I/O * error. The page cache reading code will issue a ->read_folio() request * for any folio which ->readahead() did not read, and only an error * from this will be final. * * ->readahead() will generally call readahead_folio() repeatedly to get * each folio from those prepared for readahead. It may fail to read a * folio by: * * * not calling readahead_folio() sufficiently many times, effectively * ignoring some folios, as might be appropriate if the path to * storage is congested. * * * failing to actually submit a read request for a given folio, * possibly due to insufficient resources, or * * * getting an error during subsequent processing of a request. * * In the last two cases, the folio should be unlocked by the filesystem * to indicate that the read attempt has failed. In the first case the * folio will be unlocked by the VFS. * * Those folios not in the final ``async_size`` of the request should be * considered to be important and ->readahead() should not fail them due * to congestion or temporary resource unavailability, but should wait * for necessary resources (e.g. memory or indexing information) to * become available. Folios in the final ``async_size`` may be * considered less urgent and failure to read them is more acceptable. * In this case it is best to use filemap_remove_folio() to remove the * folios from the page cache as is automatically done for folios that * were not fetched with readahead_folio(). This will allow a * subsequent synchronous readahead request to try them again. If they * are left in the page cache, then they will be read individually using * ->read_folio() which may be less efficient. */ #include <linux/blkdev.h> #include <linux/kernel.h> #include <linux/dax.h> #include <linux/gfp.h> #include <linux/export.h> #include <linux/backing-dev.h> #include <linux/task_io_accounting_ops.h> #include <linux/pagemap.h> #include <linux/psi.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/mm_inline.h> #include <linux/blk-cgroup.h> #include <linux/fadvise.h> #include <linux/sched/mm.h> #include "internal.h" /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. */ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; ra->prev_pos = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); static void read_pages(struct readahead_control *rac) { const struct address_space_operations *aops = rac->mapping->a_ops; struct folio *folio; struct blk_plug plug; if (!readahead_count(rac)) return; if (unlikely(rac->_workingset)) psi_memstall_enter(&rac->_pflags); blk_start_plug(&plug); if (aops->readahead) { aops->readahead(rac); /* * Clean up the remaining folios. The sizes in ->ra * may be used to size the next readahead, so make sure * they accurately reflect what happened. */ while ((folio = readahead_folio(rac)) != NULL) { unsigned long nr = folio_nr_pages(folio); folio_get(folio); rac->ra->size -= nr; if (rac->ra->async_size >= nr) { rac->ra->async_size -= nr; filemap_remove_folio(folio); } folio_unlock(folio); folio_put(folio); } } else { while ((folio = readahead_folio(rac)) != NULL) aops->read_folio(rac->file, folio); } blk_finish_plug(&plug); if (unlikely(rac->_workingset)) psi_memstall_leave(&rac->_pflags); rac->_workingset = false; BUG_ON(readahead_count(rac)); } /** * page_cache_ra_unbounded - Start unchecked readahead. * @ractl: Readahead control. * @nr_to_read: The number of pages to read. * @lookahead_size: Where to start the next readahead. * * This function is for filesystems to call when they want to start * readahead beyond a file's stated i_size. This is almost certainly * not the function you want to call. Use page_cache_async_readahead() * or page_cache_sync_readahead() instead. * * Context: File is referenced by caller. Mutexes may be held by caller. * May sleep, but will not reenter filesystem to reclaim memory. */ void page_cache_ra_unbounded(struct readahead_control *ractl, unsigned long nr_to_read, unsigned long lookahead_size) { struct address_space *mapping = ractl->mapping; unsigned long index = readahead_index(ractl); gfp_t gfp_mask = readahead_gfp_mask(mapping); unsigned long i; /* * Partway through the readahead operation, we will have added * locked pages to the page cache, but will not yet have submitted * them for I/O. Adding another page may need to allocate memory, * which can trigger memory reclaim. Telling the VM we're in * the middle of a filesystem operation will cause it to not * touch file-backed pages, preventing a deadlock. Most (all?) * filesystems already specify __GFP_NOFS in their mapping's * gfp_mask, but let's be explicit here. */ unsigned int nofs = memalloc_nofs_save(); filemap_invalidate_lock_shared(mapping); /* * Preallocate as many pages as we will need. */ for (i = 0; i < nr_to_read; i++) { struct folio *folio = xa_load(&mapping->i_pages, index + i); if (folio && !xa_is_value(folio)) { /* * Page already present? Kick off the current batch * of contiguous pages before continuing with the * next batch. This page may be the one we would * have intended to mark as Readahead, but we don't * have a stable reference to this page, and it's * not worth getting one just for that. */ read_pages(ractl); ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) break; if (filemap_add_folio(mapping, folio, index + i, gfp_mask) < 0) { folio_put(folio); read_pages(ractl); ractl->_index++; i = ractl->_index + ractl->_nr_pages - index - 1; continue; } if (i == nr_to_read - lookahead_size) folio_set_readahead(folio); ractl->_workingset |= folio_test_workingset(folio); ractl->_nr_pages++; } /* * Now start the IO. We ignore I/O errors - if the folio is not * uptodate then the caller will launch read_folio again, and * will then handle the error. */ read_pages(ractl); filemap_invalidate_unlock_shared(mapping); memalloc_nofs_restore(nofs); } EXPORT_SYMBOL_GPL(page_cache_ra_unbounded); /* * do_page_cache_ra() actually reads a chunk of disk. It allocates * the pages first, then submits them for I/O. This avoids the very bad * behaviour which would occur if page allocations are causing VM writeback. * We really don't want to intermingle reads and writes like that. */ static void do_page_cache_ra(struct readahead_control *ractl, unsigned long nr_to_read, unsigned long lookahead_size) { struct inode *inode = ractl->mapping->host; unsigned long index = readahead_index(ractl); loff_t isize = i_size_read(inode); pgoff_t end_index; /* The last page we want to read */ if (isize == 0) return; end_index = (isize - 1) >> PAGE_SHIFT; if (index > end_index) return; /* Don't read past the page containing the last byte of the file */ if (nr_to_read > end_index - index) nr_to_read = end_index - index + 1; page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size); } /* * Chunk the readahead into 2 megabyte units, so that we don't pin too much * memory at once. */ void force_page_cache_ra(struct readahead_control *ractl, unsigned long nr_to_read) { struct address_space *mapping = ractl->mapping; struct file_ra_state *ra = ractl->ra; struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long max_pages, index; if (unlikely(!mapping->a_ops->read_folio && !mapping->a_ops->readahead)) return; /* * If the request exceeds the readahead window, allow the read to * be up to the optimal hardware IO size */ index = readahead_index(ractl); max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); nr_to_read = min_t(unsigned long, nr_to_read, max_pages); while (nr_to_read) { unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; ractl->_index = index; do_page_cache_ra(ractl, this_chunk, 0); index += this_chunk; nr_to_read -= this_chunk; } } /* * Set the initial window size, round to next power of 2 and square * for small size, x 4 for medium, and x 2 for large * for 128k (32 page) max ra * 1-2 page = 16k, 3-4 page 32k, 5-8 page = 64k, > 8 page = 128k initial */ static unsigned long get_init_ra_size(unsigned long size, unsigned long max) { unsigned long newsize = roundup_pow_of_two(size); if (newsize <= max / 32) newsize = newsize * 4; else if (newsize <= max / 4) newsize = newsize * 2; else newsize = max; return newsize; } /* * Get the previous window size, ramp it up, and * return it as the new window size. */ static unsigned long get_next_ra_size(struct file_ra_state *ra, unsigned long max) { unsigned long cur = ra->size; if (cur < max / 16) return 4 * cur; if (cur <= max / 2) return 2 * cur; return max; } /* * On-demand readahead design. * * The fields in struct file_ra_state represent the most-recently-executed * readahead attempt: * * |<----- async_size ---------| * |------------------- size -------------------->| * |==================#===========================| * ^start ^page marked with PG_readahead * * To overlap application thinking time and disk I/O time, we do * `readahead pipelining': Do not wait until the application consumed all * readahead pages and stalled on the missing page at readahead_index; * Instead, submit an asynchronous readahead I/O as soon as there are * only async_size pages left in the readahead window. Normally async_size * will be equal to size, for maximum pipelining. * * In interleaved sequential reads, concurrent streams on the same fd can * be invalidating each other's readahead state. So we flag the new readahead * page at (start+size-async_size) with PG_readahead, and use it as readahead * indicator. The flag won't be set on already cached pages, to avoid the * readahead-for-nothing fuss, saving pointless page cache lookups. * * prev_pos tracks the last visited byte in the _previous_ read request. * It should be maintained by the caller, and will be used for detecting * small random reads. Note that the readahead algorithm checks loosely * for sequential patterns. Hence interleaved reads might be served as * sequential ones. * * There is a special-case: if the first page which the application tries to * read happens to be the first page of the file, it is assumed that a linear * read is about to happen and the window is immediately set to the initial size * based on I/O request size and the max_readahead. * * The code ramps up the readahead size aggressively at first, but slow down as * it approaches max_readhead. */ /* * Count contiguously cached pages from @index-1 to @index-@max, * this count is a conservative estimation of * - length of the sequential read sequence, or * - thrashing threshold in memory tight systems */ static pgoff_t count_history_pages(struct address_space *mapping, pgoff_t index, unsigned long max) { pgoff_t head; rcu_read_lock(); head = page_cache_prev_miss(mapping, index - 1, max); rcu_read_unlock(); return index - 1 - head; } /* * page cache context based readahead */ static int try_context_readahead(struct address_space *mapping, struct file_ra_state *ra, pgoff_t index, unsigned long req_size, unsigned long max) { pgoff_t size; size = count_history_pages(mapping, index, max); /* * not enough history pages: * it could be a random read */ if (size <= req_size) return 0; /* * starts from beginning of file: * it is a strong indication of long-run stream (or whole-file-read) */ if (size >= index) size *= 2; ra->start = index; ra->size = min(size + req_size, max); ra->async_size = 1; return 1; } static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, pgoff_t mark, unsigned int order, gfp_t gfp) { int err; struct folio *folio = filemap_alloc_folio(gfp, order); if (!folio) return -ENOMEM; mark = round_up(mark, 1UL << order); if (index == mark) folio_set_readahead(folio); err = filemap_add_folio(ractl->mapping, folio, index, gfp); if (err) { folio_put(folio); return err; } ractl->_nr_pages += 1UL << order; ractl->_workingset |= folio_test_workingset(folio); return 0; } void page_cache_ra_order(struct readahead_control *ractl, struct file_ra_state *ra, unsigned int new_order) { struct address_space *mapping = ractl->mapping; pgoff_t index = readahead_index(ractl); pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; int err = 0; gfp_t gfp = readahead_gfp_mask(mapping); if (!mapping_large_folio_support(mapping) || ra->size < 4) goto fallback; limit = min(limit, index + ra->size - 1); if (new_order < MAX_PAGECACHE_ORDER) { new_order += 2; if (new_order > MAX_PAGECACHE_ORDER) new_order = MAX_PAGECACHE_ORDER; while ((1 << new_order) > ra->size) new_order--; } filemap_invalidate_lock_shared(mapping); while (index <= limit) { unsigned int order = new_order; /* Align with smaller pages if needed */ if (index & ((1UL << order) - 1)) { order = __ffs(index); if (order == 1) order = 0; } /* Don't allocate pages past EOF */ while (index + (1UL << order) - 1 > limit) { if (--order == 1) order = 0; } err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) break; index += 1UL << order; } if (index > limit) { ra->size += index - limit - 1; ra->async_size += index - limit - 1; } read_pages(ractl); filemap_invalidate_unlock_shared(mapping); /* * If there were already pages in the page cache, then we may have * left some gaps. Let the regular readahead code take care of this * situation. */ if (!err) return; fallback: do_page_cache_ra(ractl, ra->size, ra->async_size); } /* * A minimal readahead algorithm for trivial sequential/random reads. */ static void ondemand_readahead(struct readahead_control *ractl, struct folio *folio, unsigned long req_size) { struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host); struct file_ra_state *ra = ractl->ra; unsigned long max_pages = ra->ra_pages; unsigned long add_pages; pgoff_t index = readahead_index(ractl); pgoff_t expected, prev_index; unsigned int order = folio ? folio_order(folio) : 0; /* * If the request exceeds the readahead window, allow the read to * be up to the optimal hardware IO size */ if (req_size > max_pages && bdi->io_pages > max_pages) max_pages = min(req_size, bdi->io_pages); /* * start of file */ if (!index) goto initial_readahead; /* * It's the expected callback index, assume sequential access. * Ramp up sizes, and push forward the readahead window. */ expected = round_up(ra->start + ra->size - ra->async_size, 1UL << order); if (index == expected || index == (ra->start + ra->size)) { ra->start += ra->size; ra->size = get_next_ra_size(ra, max_pages); ra->async_size = ra->size; goto readit; } /* * Hit a marked folio without valid readahead state. * E.g. interleaved reads. * Query the pagecache for async_size, which normally equals to * readahead size. Ramp it up and use it as the new readahead size. */ if (folio) { pgoff_t start; rcu_read_lock(); start = page_cache_next_miss(ractl->mapping, index + 1, max_pages); rcu_read_unlock(); if (!start || start - index > max_pages) return; ra->start = start; ra->size = start - index; /* old async_size */ ra->size += req_size; ra->size = get_next_ra_size(ra, max_pages); ra->async_size = ra->size; goto readit; } /* * oversize read */ if (req_size > max_pages) goto initial_readahead; /* * sequential cache miss * trivial case: (index - prev_index) == 1 * unaligned reads: (index - prev_index) == 0 */ prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; if (index - prev_index <= 1UL) goto initial_readahead; /* * Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ if (try_context_readahead(ractl->mapping, ra, index, req_size, max_pages)) goto readit; /* * standalone, small random read * Read as is, and do not pollute the readahead state. */ do_page_cache_ra(ractl, req_size, 0); return; initial_readahead: ra->start = index; ra->size = get_init_ra_size(req_size, max_pages); ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; readit: /* * Will this read hit the readahead marker made by itself? * If so, trigger the readahead marker hit now, and merge * the resulted next readahead window into the current one. * Take care of maximum IO pages as above. */ if (index == ra->start && ra->size == ra->async_size) { add_pages = get_next_ra_size(ra, max_pages); if (ra->size + add_pages <= max_pages) { ra->async_size = add_pages; ra->size += add_pages; } else { ra->size = max_pages; ra->async_size = max_pages >> 1; } } ractl->_index = ra->start; page_cache_ra_order(ractl, ra, order); } void page_cache_sync_ra(struct readahead_control *ractl, unsigned long req_count) { bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); /* * Even if readahead is disabled, issue this request as readahead * as we'll need it to satisfy the requested range. The forced * readahead will do the right thing and limit the read to just the * requested range, which we'll set to 1 page for this case. */ if (!ractl->ra->ra_pages || blk_cgroup_congested()) { if (!ractl->file) return; req_count = 1; do_forced_ra = true; } /* be dumb */ if (do_forced_ra) { force_page_cache_ra(ractl, req_count); return; } ondemand_readahead(ractl, NULL, req_count); } EXPORT_SYMBOL_GPL(page_cache_sync_ra); void page_cache_async_ra(struct readahead_control *ractl, struct folio *folio, unsigned long req_count) { /* no readahead */ if (!ractl->ra->ra_pages) return; /* * Same bit is used for PG_readahead and PG_reclaim. */ if (folio_test_writeback(folio)) return; folio_clear_readahead(folio); if (blk_cgroup_congested()) return; ondemand_readahead(ractl, folio, req_count); } EXPORT_SYMBOL_GPL(page_cache_async_ra); ssize_t ksys_readahead(int fd, loff_t offset, size_t count) { ssize_t ret; struct fd f; ret = -EBADF; f = fdget(fd); if (!f.file || !(f.file->f_mode & FMODE_READ)) goto out; /* * The readahead() syscall is intended to run only on files * that can execute readahead. If readahead is not possible * on this file, then we must return -EINVAL. */ ret = -EINVAL; if (!f.file->f_mapping || !f.file->f_mapping->a_ops || !S_ISREG(file_inode(f.file)->i_mode)) goto out; ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); out: fdput(f); return ret; } SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) { return ksys_readahead(fd, offset, count); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_READAHEAD) COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count) { return ksys_readahead(fd, compat_arg_u64_glue(offset), count); } #endif /** * readahead_expand - Expand a readahead request * @ractl: The request to be expanded * @new_start: The revised start * @new_len: The revised size of the request * * Attempt to expand a readahead request outwards from the current size to the * specified size by inserting locked pages before and after the current window * to increase the size to the new window. This may involve the insertion of * THPs, in which case the window may get expanded even beyond what was * requested. * * The algorithm will stop if it encounters a conflicting page already in the * pagecache and leave a smaller expansion than requested. * * The caller must check for this by examining the revised @ractl object for a * different expansion than was requested. */ void readahead_expand(struct readahead_control *ractl, loff_t new_start, size_t new_len) { struct address_space *mapping = ractl->mapping; struct file_ra_state *ra = ractl->ra; pgoff_t new_index, new_nr_pages; gfp_t gfp_mask = readahead_gfp_mask(mapping); new_index = new_start / PAGE_SIZE; /* Expand the leading edge downwards */ while (ractl->_index > new_index) { unsigned long index = ractl->_index - 1; struct folio *folio = xa_load(&mapping->i_pages, index); if (folio && !xa_is_value(folio)) return; /* Folio apparently present */ folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) return; if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { folio_put(folio); return; } if (unlikely(folio_test_workingset(folio)) && !ractl->_workingset) { ractl->_workingset = true; psi_memstall_enter(&ractl->_pflags); } ractl->_nr_pages++; ractl->_index = folio->index; } new_len += new_start - readahead_pos(ractl); new_nr_pages = DIV_ROUND_UP(new_len, PAGE_SIZE); /* Expand the trailing edge upwards */ while (ractl->_nr_pages < new_nr_pages) { unsigned long index = ractl->_index + ractl->_nr_pages; struct folio *folio = xa_load(&mapping->i_pages, index); if (folio && !xa_is_value(folio)) return; /* Folio apparently present */ folio = filemap_alloc_folio(gfp_mask, 0); if (!folio) return; if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { folio_put(folio); return; } if (unlikely(folio_test_workingset(folio)) && !ractl->_workingset) { ractl->_workingset = true; psi_memstall_enter(&ractl->_pflags); } ractl->_nr_pages++; if (ra) { ra->size++; ra->async_size++; } } } EXPORT_SYMBOL(readahead_expand);
linux-master
mm/readahead.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/io-mapping.h> /** * io_mapping_map_user - remap an I/O mapping to userspace * @iomap: the source io_mapping * @vma: user vma to map to * @addr: target user address to start at * @pfn: physical address of kernel memory * @size: size of map area * * Note: this is only safe if the mm semaphore is held when called. */ int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size) { vm_flags_t expected_flags = VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; if (WARN_ON_ONCE((vma->vm_flags & expected_flags) != expected_flags)) return -EINVAL; /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ return remap_pfn_range_notrack(vma, addr, pfn, size, __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK))); } EXPORT_SYMBOL_GPL(io_mapping_map_user);
linux-master
mm/io-mapping.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corporation, 2021 * * Author: Mike Rapoport <[email protected]> */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/swap.h> #include <linux/mount.h> #include <linux/memfd.h> #include <linux/bitops.h> #include <linux/printk.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/pseudo_fs.h> #include <linux/secretmem.h> #include <linux/set_memory.h> #include <linux/sched/signal.h> #include <uapi/linux/magic.h> #include <asm/tlbflush.h> #include "internal.h" #undef pr_fmt #define pr_fmt(fmt) "secretmem: " fmt /* * Define mode and flag masks to allow validation of the system call * parameters. */ #define SECRETMEM_MODE_MASK (0x0) #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK static bool secretmem_enable __ro_after_init = 1; module_param_named(enable, secretmem_enable, bool, 0400); MODULE_PARM_DESC(secretmem_enable, "Enable secretmem and memfd_secret(2) system call"); static atomic_t secretmem_users; bool secretmem_active(void) { return !!atomic_read(&secretmem_users); } static vm_fault_t secretmem_fault(struct vm_fault *vmf) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; struct inode *inode = file_inode(vmf->vma->vm_file); pgoff_t offset = vmf->pgoff; gfp_t gfp = vmf->gfp_mask; unsigned long addr; struct page *page; struct folio *folio; vm_fault_t ret; int err; if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode)) return vmf_error(-EINVAL); filemap_invalidate_lock_shared(mapping); retry: page = find_lock_page(mapping, offset); if (!page) { folio = folio_alloc(gfp | __GFP_ZERO, 0); if (!folio) { ret = VM_FAULT_OOM; goto out; } page = &folio->page; err = set_direct_map_invalid_noflush(page); if (err) { folio_put(folio); ret = vmf_error(err); goto out; } __folio_mark_uptodate(folio); err = filemap_add_folio(mapping, folio, offset, gfp); if (unlikely(err)) { folio_put(folio); /* * If a split of large page was required, it * already happened when we marked the page invalid * which guarantees that this call won't fail */ set_direct_map_default_noflush(page); if (err == -EEXIST) goto retry; ret = vmf_error(err); goto out; } addr = (unsigned long)page_address(page); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); } vmf->page = page; ret = VM_FAULT_LOCKED; out: filemap_invalidate_unlock_shared(mapping); return ret; } static const struct vm_operations_struct secretmem_vm_ops = { .fault = secretmem_fault, }; static int secretmem_release(struct inode *inode, struct file *file) { atomic_dec(&secretmem_users); return 0; } static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long len = vma->vm_end - vma->vm_start; if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; if (!mlock_future_ok(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) return -EAGAIN; vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP); vma->vm_ops = &secretmem_vm_ops; return 0; } bool vma_is_secretmem(struct vm_area_struct *vma) { return vma->vm_ops == &secretmem_vm_ops; } static const struct file_operations secretmem_fops = { .release = secretmem_release, .mmap = secretmem_mmap, }; static int secretmem_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { return -EBUSY; } static void secretmem_free_folio(struct folio *folio) { set_direct_map_default_noflush(&folio->page); folio_zero_segment(folio, 0, folio_size(folio)); } const struct address_space_operations secretmem_aops = { .dirty_folio = noop_dirty_folio, .free_folio = secretmem_free_folio, .migrate_folio = secretmem_migrate_folio, }; static int secretmem_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); struct address_space *mapping = inode->i_mapping; unsigned int ia_valid = iattr->ia_valid; int ret; filemap_invalidate_lock(mapping); if ((ia_valid & ATTR_SIZE) && inode->i_size) ret = -EINVAL; else ret = simple_setattr(idmap, dentry, iattr); filemap_invalidate_unlock(mapping); return ret; } static const struct inode_operations secretmem_iops = { .setattr = secretmem_setattr, }; static struct vfsmount *secretmem_mnt; static struct file *secretmem_file_create(unsigned long flags) { struct file *file; struct inode *inode; const char *anon_name = "[secretmem]"; const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name)); int err; inode = alloc_anon_inode(secretmem_mnt->mnt_sb); if (IS_ERR(inode)) return ERR_CAST(inode); err = security_inode_init_security_anon(inode, &qname, NULL); if (err) { file = ERR_PTR(err); goto err_free_inode; } file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem", O_RDWR, &secretmem_fops); if (IS_ERR(file)) goto err_free_inode; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_op = &secretmem_iops; inode->i_mapping->a_ops = &secretmem_aops; /* pretend we are a normal file with zero size */ inode->i_mode |= S_IFREG; inode->i_size = 0; return file; err_free_inode: iput(inode); return file; } SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) { struct file *file; int fd, err; /* make sure local flags do not confict with global fcntl.h */ BUILD_BUG_ON(SECRETMEM_FLAGS_MASK & O_CLOEXEC); if (!secretmem_enable) return -ENOSYS; if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC)) return -EINVAL; if (atomic_read(&secretmem_users) < 0) return -ENFILE; fd = get_unused_fd_flags(flags & O_CLOEXEC); if (fd < 0) return fd; file = secretmem_file_create(flags); if (IS_ERR(file)) { err = PTR_ERR(file); goto err_put_fd; } file->f_flags |= O_LARGEFILE; atomic_inc(&secretmem_users); fd_install(fd, file); return fd; err_put_fd: put_unused_fd(fd); return err; } static int secretmem_init_fs_context(struct fs_context *fc) { return init_pseudo(fc, SECRETMEM_MAGIC) ? 0 : -ENOMEM; } static struct file_system_type secretmem_fs = { .name = "secretmem", .init_fs_context = secretmem_init_fs_context, .kill_sb = kill_anon_super, }; static int __init secretmem_init(void) { if (!secretmem_enable) return 0; secretmem_mnt = kern_mount(&secretmem_fs); if (IS_ERR(secretmem_mnt)) return PTR_ERR(secretmem_mnt); /* prevent secretmem mappings from ever getting PROT_EXEC */ secretmem_mnt->mnt_flags |= MNT_NOEXEC; return 0; } fs_initcall(secretmem_init);
linux-master
mm/secretmem.c
// SPDX-License-Identifier: GPL-2.0 /* * mm/mremap.c * * (C) Copyright 1996 Linus Torvalds * * Address space accounting code <[email protected]> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/swapops.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/mmu_notifier.h> #include <linux/uaccess.h> #include <linux/userfaultfd_k.h> #include <linux/mempolicy.h> #include <asm/cacheflush.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none_or_clear_bad(p4d)) return NULL; pud = pud_offset(p4d, addr); if (pud_none_or_clear_bad(pud)) return NULL; return pud; } static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pmd_t *pmd; pud = get_old_pud(mm, addr); if (!pud) return NULL; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; return pmd; } static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; return pud_alloc(mm, p4d, addr); } static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) { pud_t *pud; pmd_t *pmd; pud = alloc_new_pud(mm, vma, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); return pmd; } static void take_rmap_locks(struct vm_area_struct *vma) { if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); if (vma->anon_vma) anon_vma_lock_write(vma->anon_vma); } static void drop_rmap_locks(struct vm_area_struct *vma) { if (vma->anon_vma) anon_vma_unlock_write(vma->anon_vma); if (vma->vm_file) i_mmap_unlock_write(vma->vm_file->f_mapping); } static pte_t move_soft_dirty_pte(pte_t pte) { /* * Set soft dirty bit so we can notice * in userspace the ptes were moved. */ #ifdef CONFIG_MEM_SOFT_DIRTY if (pte_present(pte)) pte = pte_mksoft_dirty(pte); else if (is_swap_pte(pte)) pte = pte_swp_mksoft_dirty(pte); #endif return pte; } static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) { struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; bool force_flush = false; unsigned long len = old_end - old_addr; int err = 0; /* * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma * locks to ensure that rmap will always observe either the old or the * new ptes. This is the easiest way to avoid races with * truncate_pagecache(), page migration, etc... * * When need_rmap_locks is false, we use other ways to avoid * such races: * * - During exec() shift_arg_pages(), we use a specially tagged vma * which rmap call sites look for using vma_is_temporary_stack(). * * - During mremap(), new_vma is often known to be placed after vma * in rmap traversal order. This ensures rmap will always observe * either the old pte, or the new pte, or both (the page table locks * serialize access to individual ptes, but only rmap traversal * order guarantees that we won't miss both the old and new ptes). */ if (need_rmap_locks) take_rmap_locks(vma); /* * We don't have to worry about the ordering of src and dst * pte locks because exclusive mmap_lock prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); if (!old_pte) { err = -EAGAIN; goto out; } new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl); if (!new_pte) { pte_unmap_unlock(old_pte, old_ptl); err = -EAGAIN; goto out; } if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(ptep_get(old_pte))) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); /* * If we are remapping a valid PTE, make sure * to flush TLB before we drop the PTL for the * PTE. * * NOTE! Both old and new PTL matter: the old one * for racing with page_mkclean(), the new one to * make sure the physical page stays valid until * the TLB entry for the old mapping has been * flushed. */ if (pte_present(pte)) force_flush = true; pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); } arch_leave_lazy_mmu_mode(); if (force_flush) flush_tlb_range(vma, old_end - len, old_end); if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); out: if (need_rmap_locks) drop_rmap_locks(vma); return err; } #ifndef arch_supports_page_table_move #define arch_supports_page_table_move arch_supports_page_table_move static inline bool arch_supports_page_table_move(void) { return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) || IS_ENABLED(CONFIG_HAVE_MOVE_PUD); } #endif #ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pmd_t pmd; if (!arch_supports_page_table_move()) return false; /* * The destination pmd shouldn't be established, free_pgtables() * should have released it. * * However, there's a case during execve() where we use mremap * to move the initial stack, and in that case the target area * may overlap the source area (always moving down). * * If everything is PMD-aligned, that works fine, as moving * each pmd down will clear the source pmd. But if we first * have a few 4kB-only pages that get moved down, and then * hit the "now the rest is PMD-aligned, let's do everything * one pmd at a time", we will still have the old (now empty * of any 4kB pages, but still there) PMD in the page table * tree. * * Warn on it once - because we really should try to figure * out how to do this better - but then say "I won't move * this pmd". * * One alternative might be to just unmap the target pmd at * this point, and verify that it really is empty. We'll see. */ if (WARN_ON_ONCE(!pmd_none(*new_pmd))) return false; /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = pmd_lock(vma->vm_mm, old_pmd); new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pmd */ pmd = *old_pmd; pmd_clear(old_pmd); VM_BUG_ON(!pmd_none(*new_pmd)); pmd_populate(mm, new_pmd, pmd_pgtable(pmd)); flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #else static inline bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { return false; } #endif #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD) static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pud_t pud; if (!arch_supports_page_table_move()) return false; /* * The destination pud shouldn't be established, free_pgtables() * should have released it. */ if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = pud_lock(vma->vm_mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pud */ pud = *old_pud; pud_clear(old_pud); VM_BUG_ON(!pud_none(*new_pud)); pud_populate(mm, new_pud, pud_pgtable(pud)); flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #else static inline bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { return false; } #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pud_t pud; /* * The destination pud shouldn't be established, free_pgtables() * should have released it. */ if (WARN_ON_ONCE(!pud_none(*new_pud))) return false; /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = pud_lock(vma->vm_mm, old_pud); new_ptl = pud_lockptr(mm, new_pud); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); /* Clear the pud */ pud = *old_pud; pud_clear(old_pud); VM_BUG_ON(!pud_none(*new_pud)); /* Set the new pud */ /* mark soft_ditry when we add pud level soft dirty support */ set_pud_at(mm, new_addr, new_pud, pud); flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } #else static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pud_t *old_pud, pud_t *new_pud) { WARN_ON_ONCE(1); return false; } #endif enum pgt_entry { NORMAL_PMD, HPAGE_PMD, NORMAL_PUD, HPAGE_PUD, }; /* * Returns an extent of the corresponding size for the pgt_entry specified if * valid. Else returns a smaller extent bounded by the end of the source and * destination pgt_entry. */ static __always_inline unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr, unsigned long old_end, unsigned long new_addr) { unsigned long next, extent, mask, size; switch (entry) { case HPAGE_PMD: case NORMAL_PMD: mask = PMD_MASK; size = PMD_SIZE; break; case HPAGE_PUD: case NORMAL_PUD: mask = PUD_MASK; size = PUD_SIZE; break; default: BUILD_BUG(); break; } next = (old_addr + size) & mask; /* even if next overflowed, extent below will be ok */ extent = next - old_addr; if (extent > old_end - old_addr) extent = old_end - old_addr; next = (new_addr + size) & mask; if (extent > next - new_addr) extent = next - new_addr; return extent; } /* * Attempts to speedup the move by moving entry at the level corresponding to * pgt_entry. Returns true if the move was successful, else false. */ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, void *old_entry, void *new_entry, bool need_rmap_locks) { bool moved = false; /* See comment in move_ptes() */ if (need_rmap_locks) take_rmap_locks(vma); switch (entry) { case NORMAL_PMD: moved = move_normal_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; case NORMAL_PUD: moved = move_normal_pud(vma, old_addr, new_addr, old_entry, new_entry); break; case HPAGE_PMD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && move_huge_pmd(vma, old_addr, new_addr, old_entry, new_entry); break; case HPAGE_PUD: moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && move_huge_pud(vma, old_addr, new_addr, old_entry, new_entry); break; default: WARN_ON_ONCE(1); break; } if (need_rmap_locks) drop_rmap_locks(vma); return moved; } unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks) { unsigned long extent, old_end; struct mmu_notifier_range range; pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; if (!len) return 0; old_end = old_addr + len; if (is_vm_hugetlb_page(vma)) return move_hugetlb_page_tables(vma, new_vma, old_addr, new_addr, len); flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, old_addr, old_end); mmu_notifier_invalidate_range_start(&range); for (; old_addr < old_end; old_addr += extent, new_addr += extent) { cond_resched(); /* * If extent is PUD-sized try to speed up the move by moving at the * PUD level if possible. */ extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr); old_pud = get_old_pud(vma->vm_mm, old_addr); if (!old_pud) continue; new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr); if (!new_pud) break; if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) { if (extent == HPAGE_PUD_SIZE) { move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr, old_pud, new_pud, need_rmap_locks); /* We ignore and continue on error? */ continue; } } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) { if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr, old_pud, new_pud, true)) continue; } extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr); old_pmd = get_old_pmd(vma->vm_mm, old_addr); if (!old_pmd) continue; new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr, old_pmd, new_pmd, need_rmap_locks)) continue; split_huge_pmd(vma, old_pmd, old_addr); } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* * If the extent is PMD-sized, try to speed the move by * moving at the PMD level if possible. */ if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr, old_pmd, new_pmd, true)) continue; } if (pmd_none(*old_pmd)) continue; if (pte_alloc(new_vma->vm_mm, new_pmd)) break; if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, new_pmd, new_addr, need_rmap_locks) < 0) goto again; } mmu_notifier_invalidate_range_end(&range); return len + old_addr - old_end; /* how much done */ } static unsigned long move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap) { long to_account = new_len - old_len; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; unsigned long vm_flags = vma->vm_flags; unsigned long new_pgoff; unsigned long moved_len; unsigned long account_start = 0; unsigned long account_end = 0; unsigned long hiwater_vm; int err = 0; bool need_rmap_locks; struct vma_iterator vmi; /* * We'd prefer to avoid failure later on in do_munmap: * which may split one vma into three before unmapping. */ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; if (unlikely(flags & MREMAP_DONTUNMAP)) to_account = new_len; if (vma->vm_ops && vma->vm_ops->may_split) { if (vma->vm_start != old_addr) err = vma->vm_ops->may_split(vma, old_addr); if (!err && vma->vm_end != old_addr + old_len) err = vma->vm_ops->may_split(vma, old_addr + old_len); if (err) return err; } /* * Advise KSM to break any KSM pages in the area to be moved: * it would be confusing if they were to turn up at the new * location, where they happen to coincide with different KSM * pages recently unmapped. But leave vma->vm_flags as it was, * so KSM can come around to merge on vma and new_vma afterwards. */ err = ksm_madvise(vma, old_addr, old_addr + old_len, MADV_UNMERGEABLE, &vm_flags); if (err) return err; if (vm_flags & VM_ACCOUNT) { if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT)) return -ENOMEM; } vma_start_write(vma); new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, &need_rmap_locks); if (!new_vma) { if (vm_flags & VM_ACCOUNT) vm_unacct_memory(to_account >> PAGE_SHIFT); return -ENOMEM; } moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) { err = vma->vm_ops->mremap(new_vma); } if (unlikely(err)) { /* * On error, move entries back from new area to old, * which will succeed since page tables still there, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); vma = new_vma; old_len = new_len; old_addr = new_addr; new_addr = err; } else { mremap_userfaultfd_prep(new_vma, uf); } if (is_vm_hugetlb_page(vma)) { clear_vma_resv_huge_pages(vma); } /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) { vm_flags_clear(vma, VM_ACCOUNT); if (vma->vm_start < old_addr) account_start = vma->vm_start; if (vma->vm_end > old_addr + old_len) account_end = vma->vm_end; } /* * If we failed to move page tables we still do total_vm increment * since do_munmap() will decrement it by old_len == new_len. * * Since total_vm is about to be raised artificially high for a * moment, we need to restore high watermark afterwards: if stats * are taken meanwhile, total_vm and hiwater_vm appear too high. * If this were a serious issue, we'd add a flag to do_munmap(). */ hiwater_vm = mm->hiwater_vm; vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT); /* Tell pfnmap has moved from this vma */ if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn_clear(vma); if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { /* We always clear VM_LOCKED[ONFAULT] on the old vma */ vm_flags_clear(vma, VM_LOCKED_MASK); /* * anon_vma links of the old vma is no longer needed after its page * table has been moved. */ if (new_vma != vma && vma->vm_start == old_addr && vma->vm_end == (old_addr + old_len)) unlink_anon_vmas(vma); /* Because we won't unmap we don't need to touch locked_vm */ return new_addr; } vma_iter_init(&vmi, mm, old_addr); if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) { /* OOM: unable to split vma, just get accounts right */ if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) vm_acct_memory(old_len >> PAGE_SHIFT); account_start = account_end = 0; } if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; *locked = true; } mm->hiwater_vm = hiwater_vm; /* Restore VM_ACCOUNT if one or two pieces of vma left */ if (account_start) { vma = vma_prev(&vmi); vm_flags_set(vma, VM_ACCOUNT); } if (account_end) { vma = vma_next(&vmi); vm_flags_set(vma, VM_ACCOUNT); } return new_addr; } static struct vm_area_struct *vma_to_resize(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long pgoff; vma = vma_lookup(mm, addr); if (!vma) return ERR_PTR(-EFAULT); /* * !old_len is a special case where an attempt is made to 'duplicate' * a mapping. This makes no sense for private mappings as it will * instead create a fresh/new mapping unrelated to the original. This * is contrary to the basic idea of mremap which creates new mappings * based on the original. There are no known use cases for this * behavior. As a result, fail such attempts. */ if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) { pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); return ERR_PTR(-EINVAL); } if ((flags & MREMAP_DONTUNMAP) && (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))) return ERR_PTR(-EINVAL); /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) return ERR_PTR(-EFAULT); if (new_len == old_len) return vma; /* Need to be careful about a growing mapping */ pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) return ERR_PTR(-EINVAL); if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) return ERR_PTR(-EAGAIN); if (!may_expand_vm(mm, vma->vm_flags, (new_len - old_len) >> PAGE_SHIFT)) return ERR_PTR(-ENOMEM); return vma; } static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked, unsigned long flags, struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap_early, struct list_head *uf_unmap) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; unsigned long map_flags = 0; if (offset_in_page(new_addr)) goto out; if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* Ensure the old/new locations do not overlap */ if (addr + old_len > new_addr && new_addr + new_len > addr) goto out; /* * move_vma() need us to stay 4 maps below the threshold, otherwise * it will bail out at the very beginning. * That is a problem if we have already unmaped the regions here * (new_addr, and old_addr), because userspace will not know the * state of the vma's after it gets -ENOMEM. * So, to avoid such scenario we can pre-compute if the whole * operation has high chances to success map-wise. * Worst-scenario case is when both vma's (new_addr and old_addr) get * split in 3 before unmapping it. * That means 2 more maps (1 for each) to the ones we already hold. * Check whether current map count plus 2 still leads us to 4 maps below * the threshold, otherwise return -ENOMEM here to be more safe. */ if ((mm->map_count + 2) >= sysctl_max_map_count - 3) return -ENOMEM; if (flags & MREMAP_FIXED) { ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); if (ret) goto out; } if (old_len > new_len) { ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap); if (ret) goto out; old_len = new_len; } vma = vma_to_resize(addr, old_len, new_len, flags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */ if (flags & MREMAP_DONTUNMAP && !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) { ret = -ENOMEM; goto out; } if (flags & MREMAP_FIXED) map_flags |= MAP_FIXED; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (IS_ERR_VALUE(ret)) goto out; /* We got a new mapping */ if (!(flags & MREMAP_FIXED)) new_addr = ret; ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf, uf_unmap); out: return ret; } static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) { unsigned long end = vma->vm_end + delta; if (end < vma->vm_end) /* overflow */ return 0; if (find_vma_intersection(vma->vm_mm, vma->vm_end, end)) return 0; if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 0, MAP_FIXED) & ~PAGE_MASK) return 0; return 1; } /* * Expand (or shrink) an existing mapping, potentially moving it at the * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. */ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long ret = -EINVAL; bool locked = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; LIST_HEAD(uf_unmap_early); LIST_HEAD(uf_unmap); /* * There is a deliberate asymmetry here: we strip the pointer tag * from the old address but leave the new address alone. This is * for consistency with mmap(), where we prevent the creation of * aliasing mappings in userspace by leaving the tag bits of the * mapping address intact. A non-zero tag will cause the subsequent * range checks to reject the address as invalid. * * See Documentation/arch/arm64/tagged-address-abi.rst for more * information. */ addr = untagged_addr(addr); if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP)) return ret; if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) return ret; /* * MREMAP_DONTUNMAP is always a move and it does not allow resizing * in the process. */ if (flags & MREMAP_DONTUNMAP && (!(flags & MREMAP_MAYMOVE) || old_len != new_len)) return ret; if (offset_in_page(addr)) return ret; old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); /* * We allow a zero old-len as a special case * for DOS-emu "duplicate shm area" thing. But * a zero new-len is nonsensical. */ if (!new_len) return ret; if (mmap_write_lock_killable(current->mm)) return -EINTR; vma = vma_lookup(mm, addr); if (!vma) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { struct hstate *h __maybe_unused = hstate_vma(vma); old_len = ALIGN(old_len, huge_page_size(h)); new_len = ALIGN(new_len, huge_page_size(h)); /* addrs must be huge page aligned */ if (addr & ~huge_page_mask(h)) goto out; if (new_addr & ~huge_page_mask(h)) goto out; /* * Don't allow remap expansion, because the underlying hugetlb * reservation is not yet capable to handle split reservation. */ if (new_len > old_len) goto out; } if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) { ret = mremap_to(addr, old_len, new_addr, new_len, &locked, flags, &uf, &uf_unmap_early, &uf_unmap); goto out; } /* * Always allow a shrinking remap: that just unmaps * the unnecessary pages.. * do_vmi_munmap does all the needed commit accounting, and * unlocks the mmap_lock if so directed. */ if (old_len >= new_len) { VMA_ITERATOR(vmi, mm, addr + new_len); if (old_len == new_len) { ret = addr; goto out; } ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len, &uf_unmap, true); if (ret) goto out; ret = addr; goto out_unlocked; } /* * Ok, we need to grow.. */ vma = vma_to_resize(addr, old_len, new_len, flags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* old_len exactly to the end of the area.. */ if (old_len == vma->vm_end - addr) { /* can we just expand the current mapping? */ if (vma_expandable(vma, new_len - old_len)) { long pages = (new_len - old_len) >> PAGE_SHIFT; unsigned long extension_start = addr + old_len; unsigned long extension_end = addr + new_len; pgoff_t extension_pgoff = vma->vm_pgoff + ((extension_start - vma->vm_start) >> PAGE_SHIFT); VMA_ITERATOR(vmi, mm, extension_start); if (vma->vm_flags & VM_ACCOUNT) { if (security_vm_enough_memory_mm(mm, pages)) { ret = -ENOMEM; goto out; } } /* * Function vma_merge() is called on the extension we * are adding to the already existing vma, vma_merge() * will merge this extension with the already existing * vma (expand operation itself) and possibly also with * the next vma if it becomes adjacent to the expanded * vma and otherwise compatible. */ vma = vma_merge(&vmi, mm, vma, extension_start, extension_end, vma->vm_flags, vma->anon_vma, vma->vm_file, extension_pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (!vma) { vm_unacct_memory(pages); ret = -ENOMEM; goto out; } vm_stat_account(mm, vma->vm_flags, pages); if (vma->vm_flags & VM_LOCKED) { mm->locked_vm += pages; locked = true; new_addr = addr; } ret = addr; goto out; } } /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. */ ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { unsigned long map_flags = 0; if (vma->vm_flags & VM_MAYSHARE) map_flags |= MAP_SHARED; new_addr = get_unmapped_area(vma->vm_file, 0, new_len, vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT), map_flags); if (IS_ERR_VALUE(new_addr)) { ret = new_addr; goto out; } ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked, flags, &uf, &uf_unmap); } out: if (offset_in_page(ret)) locked = false; mmap_write_unlock(current->mm); if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); out_unlocked: userfaultfd_unmap_complete(mm, &uf_unmap_early); mremap_userfaultfd_complete(&uf, addr, ret, old_len); userfaultfd_unmap_complete(mm, &uf_unmap); return ret; }
linux-master
mm/mremap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/mm/process_vm_access.c * * Copyright (C) 2010-2011 Christopher Yeoh <[email protected]>, IBM Corp. */ #include <linux/compat.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/syscalls.h> /** * process_vm_rw_pages - read/write pages from task specified * @pages: array of pointers to pages we want to copy * @offset: offset in page to start copying from/to * @len: number of bytes to copy * @iter: where to copy to/from locally * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success, error code otherwise */ static int process_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) { /* Do the copy for each page */ while (len && iov_iter_count(iter)) { struct page *page = *pages++; size_t copy = PAGE_SIZE - offset; size_t copied; if (copy > len) copy = len; if (vm_write) copied = copy_page_from_iter(page, offset, copy, iter); else copied = copy_page_to_iter(page, offset, copy, iter); len -= copied; if (copied < copy && iov_iter_count(iter)) return -EFAULT; offset = 0; } return 0; } /* Maximum number of pages kmalloc'd to hold struct page's during copy */ #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2) /** * process_vm_rw_single_vec - read/write pages from task specified * @addr: start memory address of target process * @len: size of area to copy to/from * @iter: where to copy to/from locally * @process_pages: struct pages area that can store at least * nr_pages_to_copy struct page pointers * @mm: mm for task * @task: task to read/write from * @vm_write: 0 means copy from, 1 means copy to * Returns 0 on success or on failure error code */ static int process_vm_rw_single_vec(unsigned long addr, unsigned long len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) { unsigned long pa = addr & PAGE_MASK; unsigned long start_offset = addr - pa; unsigned long nr_pages; ssize_t rc = 0; unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES / sizeof(struct pages *); unsigned int flags = 0; /* Work out address and page range required */ if (len == 0) return 0; nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1; if (vm_write) flags |= FOLL_WRITE; while (!rc && nr_pages && iov_iter_count(iter)) { int pinned_pages = min(nr_pages, max_pages_per_loop); int locked = 1; size_t bytes; /* * Get the pages we're interested in. We must * access remotely because task/mm might not * current/current->mm */ mmap_read_lock(mm); pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, &locked); if (locked) mmap_read_unlock(mm); if (pinned_pages <= 0) return -EFAULT; bytes = pinned_pages * PAGE_SIZE - start_offset; if (bytes > len) bytes = len; rc = process_vm_rw_pages(process_pages, start_offset, bytes, iter, vm_write); len -= bytes; start_offset = 0; nr_pages -= pinned_pages; pa += pinned_pages * PAGE_SIZE; /* If vm_write is set, the pages need to be made dirty: */ unpin_user_pages_dirty_lock(process_pages, pinned_pages, vm_write); } return rc; } /* Maximum number of entries for process pages array which lives on stack */ #define PVM_MAX_PP_ARRAY_COUNT 16 /** * process_vm_rw_core - core of reading/writing pages from task specified * @pid: PID of process to read/write from/to * @iter: where to copy to/from locally * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, const struct iovec *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) { struct task_struct *task; struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT]; struct page **process_pages = pp_stack; struct mm_struct *mm; unsigned long i; ssize_t rc = 0; unsigned long nr_pages = 0; unsigned long nr_pages_iov; ssize_t iov_len; size_t total_len = iov_iter_count(iter); /* * Work out how many pages of struct pages we're going to need * when eventually calling get_user_pages */ for (i = 0; i < riovcnt; i++) { iov_len = rvec[i].iov_len; if (iov_len > 0) { nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE - (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1; nr_pages = max(nr_pages, nr_pages_iov); } } if (nr_pages == 0) return 0; if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) { /* For reliability don't try to kmalloc more than 2 pages worth */ process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES, sizeof(struct pages *)*nr_pages), GFP_KERNEL); if (!process_pages) return -ENOMEM; } /* Get process information */ task = find_get_task_by_vpid(pid); if (!task) { rc = -ESRCH; goto free_proc_pages; } mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); if (!mm || IS_ERR(mm)) { rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; /* * Explicitly map EACCES to EPERM as EPERM is a more * appropriate error code for process_vw_readv/writev */ if (rc == -EACCES) rc = -EPERM; goto put_task_struct; } for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) rc = process_vm_rw_single_vec( (unsigned long)rvec[i].iov_base, rvec[i].iov_len, iter, process_pages, mm, task, vm_write); /* copied = space before - space after */ total_len -= iov_iter_count(iter); /* If we have managed to copy any data at all then we return the number of bytes copied. Otherwise we return the error code */ if (total_len) rc = total_len; mmput(mm); put_task_struct: put_task_struct(task); free_proc_pages: if (process_pages != pp_stack) kfree(process_pages); return rc; } /** * process_vm_rw - check iovecs before calling core routine * @pid: PID of process to read/write from/to * @lvec: iovec array specifying where to copy to/from locally * @liovcnt: size of lvec array * @rvec: iovec array specifying where to copy to/from in the other process * @riovcnt: size of rvec array * @flags: currently unused * @vm_write: 0 if reading from other process, 1 if writing to other process * * Returns the number of bytes read/written or error code. May * return less bytes than expected if an error occurs during the copying * process. */ static ssize_t process_vm_rw(pid_t pid, const struct iovec __user *lvec, unsigned long liovcnt, const struct iovec __user *rvec, unsigned long riovcnt, unsigned long flags, int vm_write) { struct iovec iovstack_l[UIO_FASTIOV]; struct iovec iovstack_r[UIO_FASTIOV]; struct iovec *iov_l = iovstack_l; struct iovec *iov_r; struct iov_iter iter; ssize_t rc; int dir = vm_write ? ITER_SOURCE : ITER_DEST; if (flags != 0) return -EINVAL; /* Check iovecs */ rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); if (rc < 0) return rc; if (!iov_iter_count(&iter)) goto free_iov_l; iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r, in_compat_syscall()); if (IS_ERR(iov_r)) { rc = PTR_ERR(iov_r); goto free_iov_l; } rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); if (iov_r != iovstack_r) kfree(iov_r); free_iov_l: kfree(iov_l); return rc; } SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec, unsigned long, liovcnt, const struct iovec __user *, rvec, unsigned long, riovcnt, unsigned long, flags) { return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0); } SYSCALL_DEFINE6(process_vm_writev, pid_t, pid, const struct iovec __user *, lvec, unsigned long, liovcnt, const struct iovec __user *, rvec, unsigned long, riovcnt, unsigned long, flags) { return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1); }
linux-master
mm/process_vm_access.c
// SPDX-License-Identifier: GPL-2.0 /* * Virtual Memory Map support * * (C) 2007 sgi. Christoph Lameter. * * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, * virt_to_page, page_address() to be implemented as a base offset * calculation without memory access. * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings * via TLBs. For those arches the virtual memory map is essentially * for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. * * The architecture is expected to provide a vmemmap_populate() function * to instantiate the mapping. */ #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/memblock.h> #include <linux/memremap.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <asm/dma.h> #include <asm/pgalloc.h> /* * Allocate a block of memory to be used to back the virtual memory map * or to back the page tables that are used to create the mapping. * Uses the main allocators if they are available, else bootmem. */ static void * __ref __earlyonly_bootmem_alloc(int node, unsigned long size, unsigned long align, unsigned long goal) { return memblock_alloc_try_nid_raw(size, align, goal, MEMBLOCK_ALLOC_ACCESSIBLE, node); } void * __meminit vmemmap_alloc_block(unsigned long size, int node) { /* If the main allocator is up use that, fallback to bootmem. */ if (slab_is_available()) { gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; int order = get_order(size); static bool warned; struct page *page; page = alloc_pages_node(node, gfp_mask, order); if (page) return page_address(page); if (!warned) { warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, "vmemmap alloc failure: order:%u", order); warned = true; } return NULL; } else return __earlyonly_bootmem_alloc(node, size, size, __pa(MAX_DMA_ADDRESS)); } static void * __meminit altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); /* need to make sure size is all the same during early stage */ void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, struct vmem_altmap *altmap) { void *ptr; if (altmap) return altmap_alloc_block_buf(size, altmap); ptr = sparse_buffer_alloc(size); if (!ptr) ptr = vmemmap_alloc_block(size, node); return ptr; } static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) { return altmap->base_pfn + altmap->reserve + altmap->alloc + altmap->align; } static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) { unsigned long allocated = altmap->alloc + altmap->align; if (altmap->free > allocated) return altmap->free - allocated; return 0; } static void * __meminit altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap) { unsigned long pfn, nr_pfns, nr_align; if (size & ~PAGE_MASK) { pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", __func__, size); return NULL; } pfn = vmem_altmap_next_pfn(altmap); nr_pfns = size >> PAGE_SHIFT; nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); nr_align = ALIGN(pfn, nr_align) - pfn; if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) return NULL; altmap->alloc += nr_pfns; altmap->align += nr_align; pfn += nr_align; pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", __func__, pfn, altmap->alloc, altmap->align, nr_pfns); return __va(__pfn_to_phys(pfn)); } void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { unsigned long pfn = pte_pfn(ptep_get(pte)); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) pr_warn_once("[%lx-%lx] potential offnode page_structs\n", start, end - 1); } pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) { pte_t *pte = pte_offset_kernel(pmd, addr); if (pte_none(ptep_get(pte))) { pte_t entry; void *p; if (!reuse) { p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); if (!p) return NULL; } else { /* * When a PTE/PMD entry is freed from the init_mm * there's a free_pages() call to this page allocated * above. Thus this get_page() is paired with the * put_page_testzero() on the freeing path. * This can only called by certain ZONE_DEVICE path, * and through vmemmap_populate_compound_pages() when * slab is available. */ get_page(reuse); p = page_to_virt(reuse); } entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); set_pte_at(&init_mm, addr, pte, entry); } return pte; } static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) { void *p = vmemmap_alloc_block(size, node); if (!p) return NULL; memset(p, 0, size); return p; } pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) { pmd_t *pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; pmd_populate_kernel(&init_mm, pmd, p); } return pmd; } void __weak __meminit pmd_init(void *addr) { } pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) { pud_t *pud = pud_offset(p4d, addr); if (pud_none(*pud)) { void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; pmd_init(p); pud_populate(&init_mm, pud, p); } return pud; } void __weak __meminit pud_init(void *addr) { } p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) { p4d_t *p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; pud_init(p); p4d_populate(&init_mm, p4d, p); } return p4d; } pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) { pgd_t *pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; pgd_populate(&init_mm, pgd, p); } return pgd; } static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, struct vmem_altmap *altmap, struct page *reuse) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return NULL; p4d = vmemmap_p4d_populate(pgd, addr, node); if (!p4d) return NULL; pud = vmemmap_pud_populate(p4d, addr, node); if (!pud) return NULL; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return NULL; pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); if (!pte) return NULL; vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); return pte; } static int __meminit vmemmap_populate_range(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap, struct page *reuse) { unsigned long addr = start; pte_t *pte; for (; addr < end; addr += PAGE_SIZE) { pte = vmemmap_populate_address(addr, node, altmap, reuse); if (!pte) return -ENOMEM; } return 0; } int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { return vmemmap_populate_range(start, end, node, altmap, NULL); } void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, unsigned long addr, unsigned long next) { } int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { return 0; } int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { unsigned long addr; unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; p4d = vmemmap_p4d_populate(pgd, addr, node); if (!p4d) return -ENOMEM; pud = vmemmap_pud_populate(p4d, addr, node); if (!pud) return -ENOMEM; pmd = pmd_offset(pud, addr); if (pmd_none(READ_ONCE(*pmd))) { void *p; p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); if (p) { vmemmap_set_pmd(pmd, p, node, addr, next); continue; } else if (altmap) { /* * No fallback: In any case we care about, the * altmap should be reasonably sized and aligned * such that vmemmap_alloc_block_buf() will always * succeed. For consistency with the PTE case, * return an error here as failure could indicate * a configuration issue with the size of the altmap. */ return -ENOMEM; } } else if (vmemmap_check_pmd(pmd, node, addr, next)) continue; if (vmemmap_populate_basepages(addr, next, node, altmap)) return -ENOMEM; } return 0; } #ifndef vmemmap_populate_compound_pages /* * For compound pages bigger than section size (e.g. x86 1G compound * pages with 2M subsection size) fill the rest of sections as tail * pages. * * Note that memremap_pages() resets @nr_range value and will increment * it after each range successful onlining. Thus the value or @nr_range * at section memmap populate corresponds to the in-progress range * being onlined here. */ static bool __meminit reuse_compound_section(unsigned long start_pfn, struct dev_pagemap *pgmap) { unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); unsigned long offset = start_pfn - PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION; } static pte_t * __meminit compound_section_tail_page(unsigned long addr) { pte_t *pte; addr -= PAGE_SIZE; /* * Assuming sections are populated sequentially, the previous section's * page data can be reused. */ pte = pte_offset_kernel(pmd_off_k(addr), addr); if (!pte) return NULL; return pte; } static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, unsigned long start, unsigned long end, int node, struct dev_pagemap *pgmap) { unsigned long size, addr; pte_t *pte; int rc; if (reuse_compound_section(start_pfn, pgmap)) { pte = compound_section_tail_page(start); if (!pte) return -ENOMEM; /* * Reuse the page that was populated in the prior iteration * with just tail struct pages. */ return vmemmap_populate_range(start, end, node, NULL, pte_page(ptep_get(pte))); } size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); for (addr = start; addr < end; addr += size) { unsigned long next, last = addr + size; /* Populate the head page vmemmap page */ pte = vmemmap_populate_address(addr, node, NULL, NULL); if (!pte) return -ENOMEM; /* Populate the tail pages vmemmap page */ next = addr + PAGE_SIZE; pte = vmemmap_populate_address(next, node, NULL, NULL); if (!pte) return -ENOMEM; /* * Reuse the previous page for the rest of tail pages * See layout diagram in Documentation/mm/vmemmap_dedup.rst */ next += PAGE_SIZE; rc = vmemmap_populate_range(next, last, node, NULL, pte_page(ptep_get(pte))); if (rc) return -ENOMEM; } return 0; } #endif struct page * __meminit __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { unsigned long start = (unsigned long) pfn_to_page(pfn); unsigned long end = start + nr_pages * sizeof(struct page); int r; if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) return NULL; if (vmemmap_can_optimize(altmap, pgmap)) r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); else r = vmemmap_populate(start, end, nid, altmap); if (r < 0) return NULL; return pfn_to_page(pfn); }
linux-master
mm/sparse-vmemmap.c
// SPDX-License-Identifier: GPL-2.0 /* * mm/debug.c * * mm/ specific debug routines. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> #include <trace/events/mmflags.h> #include <linux/migrate.h> #include <linux/page_owner.h> #include <linux/ctype.h> #include "internal.h" #include <trace/events/migrate.h> /* * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can * be used to populate migrate_reason_names[]. */ #undef EM #undef EMe #define EM(a, b) b, #define EMe(a, b) b const char *migrate_reason_names[MR_TYPES] = { MIGRATE_REASON }; const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, {0, NULL} }; const struct trace_print_flags pagetype_names[] = { __def_pagetype_names, {0, NULL} }; const struct trace_print_flags gfpflag_names[] = { __def_gfpflag_names, {0, NULL} }; const struct trace_print_flags vmaflag_names[] = { __def_vmaflag_names, {0, NULL} }; static void __dump_page(struct page *page) { struct folio *folio = page_folio(page); struct page *head = &folio->page; struct address_space *mapping; bool compound = PageCompound(page); /* * Accessing the pageblock without the zone lock. It could change to * "isolate" again in the meantime, but since we are just dumping the * state for debugging, it should be fine to accept a bit of * inaccuracy here due to racing. */ bool page_cma = is_migrate_cma_page(page); int mapcount; char *type = ""; if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { /* * Corrupt page, so we cannot call page_mapping. Instead, do a * safe subset of the steps that page_mapping() does. Caution: * this will be misleading for tail pages, PageSwapCache pages, * and potentially other situations. (See the page_mapping() * implementation for what's missing here.) */ unsigned long tmp = (unsigned long)page->mapping; if (tmp & PAGE_MAPPING_ANON) mapping = NULL; else mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); head = page; folio = (struct folio *)page; compound = false; } else { mapping = page_mapping(page); } /* * Avoid VM_BUG_ON() in page_mapcount(). * page->_mapcount space in struct page is used by sl[aou]b pages to * encode own info. */ mapcount = PageSlab(head) ? 0 : page_mapcount(page); pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", page, page_ref_count(head), mapcount, mapping, page_to_pgoff(page), page_to_pfn(page)); if (compound) { pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", head, compound_order(head), folio_entire_mapcount(folio), folio_nr_pages_mapped(folio), atomic_read(&folio->_pincount)); } #ifdef CONFIG_MEMCG if (head->memcg_data) pr_warn("memcg:%lx\n", head->memcg_data); #endif if (PageKsm(page)) type = "ksm "; else if (PageAnon(page)) type = "anon "; else if (mapping) dump_mapping(mapping); BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); pr_warn("%sflags: %pGp%s\n", type, &head->flags, page_cma ? " CMA" : ""); pr_warn("page_type: %pGt\n", &head->page_type); print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), page, sizeof(struct page), false); if (head != page) print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, sizeof(unsigned long), head, sizeof(struct page), false); } void dump_page(struct page *page, const char *reason) { if (PagePoisoned(page)) pr_warn("page:%p is uninitialized and poisoned", page); else __dump_page(page); if (reason) pr_warn("page dumped because: %s\n", reason); dump_page_owner(page); } EXPORT_SYMBOL(dump_page); #ifdef CONFIG_DEBUG_VM void dump_vma(const struct vm_area_struct *vma) { pr_emerg("vma %px start %px end %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, (unsigned long)pgprot_val(vma->vm_page_prot), vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); } EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" "binfmt %px flags %lx\n" #ifdef CONFIG_AIO "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG "owner %px " #endif "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER "notifier_subscriptions %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" #endif "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", mm, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif mm->mmap_base, mm->mmap_legacy_base, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), mm_pgtables_bytes(mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, (u64)atomic64_read(&mm->pinned_vm), mm->data_vm, mm->exec_vm, mm->stack_vm, mm->start_code, mm->end_code, mm->start_data, mm->end_data, mm->start_brk, mm->brk, mm->start_stack, mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, mm->binfmt, mm->flags, #ifdef CONFIG_AIO mm->ioctx_table, #endif #ifdef CONFIG_MEMCG mm->owner, #endif mm->exe_file, #ifdef CONFIG_MMU_NOTIFIER mm->notifier_subscriptions, #endif #ifdef CONFIG_NUMA_BALANCING mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, #endif atomic_read(&mm->tlb_flush_pending), mm->def_flags, &mm->def_flags ); } EXPORT_SYMBOL(dump_mm); static bool page_init_poisoning __read_mostly = true; static int __init setup_vm_debug(char *str) { bool __page_init_poisoning = true; /* * Calling vm_debug with no arguments is equivalent to requesting * to enable all debugging options we can control. */ if (*str++ != '=' || !*str) goto out; __page_init_poisoning = false; if (*str == '-') goto out; while (*str) { switch (tolower(*str)) { case'p': __page_init_poisoning = true; break; default: pr_err("vm_debug option '%c' unknown. skipped\n", *str); } str++; } out: if (page_init_poisoning && !__page_init_poisoning) pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); page_init_poisoning = __page_init_poisoning; return 1; } __setup("vm_debug", setup_vm_debug); void page_init_poison(struct page *page, size_t size) { if (page_init_poisoning) memset(page, PAGE_POISON_PATTERN, size); } void vma_iter_dump_tree(const struct vma_iterator *vmi) { #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) mas_dump(&vmi->mas); mt_dump(vmi->mas.tree, mt_dump_hex); #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ } #endif /* CONFIG_DEBUG_VM */
linux-master
mm/debug.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/swap_cgroup.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/swapops.h> /* depends on mm.h include */ static DEFINE_MUTEX(swap_cgroup_mutex); struct swap_cgroup_ctrl { struct page **map; unsigned long length; spinlock_t lock; }; static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; struct swap_cgroup { unsigned short id; }; #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) /* * SwapCgroup implements "lookup" and "exchange" operations. * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge * against SwapCache. At swap_free(), this is accessed directly from swap. * * This means, * - we have no race in "exchange" when we're accessed via SwapCache because * SwapCache(and its swp_entry) is under lock. * - When called via swap_free(), there is no user of this entry and no race. * Then, we don't need lock around "exchange". * * TODO: we can push these buffers out to HIGHMEM. */ /* * allocate buffer for swap_cgroup. */ static int swap_cgroup_prepare(int type) { struct page *page; struct swap_cgroup_ctrl *ctrl; unsigned long idx, max; ctrl = &swap_cgroup_ctrl[type]; for (idx = 0; idx < ctrl->length; idx++) { page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto not_enough_page; ctrl->map[idx] = page; if (!(idx % SWAP_CLUSTER_MAX)) cond_resched(); } return 0; not_enough_page: max = idx; for (idx = 0; idx < max; idx++) __free_page(ctrl->map[idx]); return -ENOMEM; } static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl, pgoff_t offset) { struct page *mappage; struct swap_cgroup *sc; mappage = ctrl->map[offset / SC_PER_PAGE]; sc = page_address(mappage); return sc + offset % SC_PER_PAGE; } static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, struct swap_cgroup_ctrl **ctrlp) { pgoff_t offset = swp_offset(ent); struct swap_cgroup_ctrl *ctrl; ctrl = &swap_cgroup_ctrl[swp_type(ent)]; if (ctrlp) *ctrlp = ctrl; return __lookup_swap_cgroup(ctrl, offset); } /** * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. * @ent: swap entry to be cmpxchged * @old: old id * @new: new id * * Returns old id at success, 0 at failure. * (There is no mem_cgroup using 0 as its id) */ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new) { struct swap_cgroup_ctrl *ctrl; struct swap_cgroup *sc; unsigned long flags; unsigned short retval; sc = lookup_swap_cgroup(ent, &ctrl); spin_lock_irqsave(&ctrl->lock, flags); retval = sc->id; if (retval == old) sc->id = new; else retval = 0; spin_unlock_irqrestore(&ctrl->lock, flags); return retval; } /** * swap_cgroup_record - record mem_cgroup for a set of swap entries * @ent: the first swap entry to be recorded into * @id: mem_cgroup to be recorded * @nr_ents: number of swap entries to be recorded * * Returns old value at success, 0 at failure. * (Of course, old value can be 0.) */ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents) { struct swap_cgroup_ctrl *ctrl; struct swap_cgroup *sc; unsigned short old; unsigned long flags; pgoff_t offset = swp_offset(ent); pgoff_t end = offset + nr_ents; sc = lookup_swap_cgroup(ent, &ctrl); spin_lock_irqsave(&ctrl->lock, flags); old = sc->id; for (;;) { VM_BUG_ON(sc->id != old); sc->id = id; offset++; if (offset == end) break; if (offset % SC_PER_PAGE) sc++; else sc = __lookup_swap_cgroup(ctrl, offset); } spin_unlock_irqrestore(&ctrl->lock, flags); return old; } /** * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry * @ent: swap entry to be looked up. * * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) */ unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { return lookup_swap_cgroup(ent, NULL)->id; } int swap_cgroup_swapon(int type, unsigned long max_pages) { void *array; unsigned long length; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return 0; length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); array = vcalloc(length, sizeof(void *)); if (!array) goto nomem; ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; spin_lock_init(&ctrl->lock); if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL; ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); vfree(array); goto nomem; } mutex_unlock(&swap_cgroup_mutex); return 0; nomem: pr_info("couldn't allocate enough memory for swap_cgroup\n"); pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); return -ENOMEM; } void swap_cgroup_swapoff(int type) { struct page **map; unsigned long i, length; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return; mutex_lock(&swap_cgroup_mutex); ctrl = &swap_cgroup_ctrl[type]; map = ctrl->map; length = ctrl->length; ctrl->map = NULL; ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); if (map) { for (i = 0; i < length; i++) { struct page *page = map[i]; if (page) __free_page(page); if (!(i % SWAP_CLUSTER_MAX)) cond_resched(); } vfree(map); } }
linux-master
mm/swap_cgroup.c
// SPDX-License-Identifier: GPL-2.0 /* * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's * * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/export.h> #include <linux/ioremap.h> void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, pgprot_t prot) { unsigned long offset, vaddr; phys_addr_t last_addr; struct vm_struct *area; /* An early platform driver might end up here */ if (WARN_ON_ONCE(!slab_is_available())) return NULL; /* Disallow wrap-around or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* Page-align mappings */ offset = phys_addr & (~PAGE_MASK); phys_addr -= offset; size = PAGE_ALIGN(size + offset); area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, __builtin_return_address(0)); if (!area) return NULL; vaddr = (unsigned long)area->addr; area->phys_addr = phys_addr; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { free_vm_area(area); return NULL; } return (void __iomem *)(vaddr + offset); } #ifndef ioremap_prot void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot) { return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); } EXPORT_SYMBOL(ioremap_prot); #endif void generic_iounmap(volatile void __iomem *addr) { void *vaddr = (void *)((unsigned long)addr & PAGE_MASK); if (is_ioremap_addr(vaddr)) vunmap(vaddr); } #ifndef iounmap void iounmap(volatile void __iomem *addr) { generic_iounmap(addr); } EXPORT_SYMBOL(iounmap); #endif
linux-master
mm/ioremap.c
// SPDX-License-Identifier: GPL-2.0-only /* * mm/truncate.c - code for taking down pages from address_spaces * * Copyright (C) 2002, Linus Torvalds * * 10Sep2002 Andrew Morton * Initial version. */ #include <linux/kernel.h> #include <linux/backing-dev.h> #include <linux/dax.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/export.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/pagevec.h> #include <linux/task_io_accounting_ops.h> #include <linux/shmem_fs.h> #include <linux/rmap.h> #include "internal.h" /* * Regular page slots are stabilized by the page lock even without the tree * itself locked. These unlocked entries need verification under the tree * lock. */ static inline void __clear_shadow_entry(struct address_space *mapping, pgoff_t index, void *entry) { XA_STATE(xas, &mapping->i_pages, index); xas_set_update(&xas, workingset_update_node); if (xas_load(&xas) != entry) return; xas_store(&xas, NULL); } static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, void *entry) { spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); __clear_shadow_entry(mapping, index, entry); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); } /* * Unconditionally remove exceptional entries. Usually called from truncate * path. Note that the folio_batch may be altered by this function by removing * exceptional entries similar to what folio_batch_remove_exceptionals() does. */ static void truncate_folio_batch_exceptionals(struct address_space *mapping, struct folio_batch *fbatch, pgoff_t *indices) { int i, j; bool dax; /* Handled by shmem itself */ if (shmem_mapping(mapping)) return; for (j = 0; j < folio_batch_count(fbatch); j++) if (xa_is_value(fbatch->folios[j])) break; if (j == folio_batch_count(fbatch)) return; dax = dax_mapping(mapping); if (!dax) { spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); } for (i = j; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; pgoff_t index = indices[i]; if (!xa_is_value(folio)) { fbatch->folios[j++] = folio; continue; } if (unlikely(dax)) { dax_delete_mapping_entry(mapping, index); continue; } __clear_shadow_entry(mapping, index, folio); } if (!dax) { xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); } fbatch->nr = j; } /* * Invalidate exceptional entry if easily possible. This handles exceptional * entries for invalidate_inode_pages(). */ static int invalidate_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) { /* Handled by shmem itself, or for DAX we do nothing. */ if (shmem_mapping(mapping) || dax_mapping(mapping)) return 1; clear_shadow_entry(mapping, index, entry); return 1; } /* * Invalidate exceptional entry if clean. This handles exceptional entries for * invalidate_inode_pages2() so for DAX it evicts only clean entries. */ static int invalidate_exceptional_entry2(struct address_space *mapping, pgoff_t index, void *entry) { /* Handled by shmem itself */ if (shmem_mapping(mapping)) return 1; if (dax_mapping(mapping)) return dax_invalidate_mapping_entry_sync(mapping, index); clear_shadow_entry(mapping, index, entry); return 1; } /** * folio_invalidate - Invalidate part or all of a folio. * @folio: The folio which is affected. * @offset: start of the range to invalidate * @length: length of the range to invalidate * * folio_invalidate() is called when all or part of the folio has become * invalidated by a truncate operation. * * folio_invalidate() does not have to release all buffers, but it must * ensure that no dirty buffer is left outside @offset and that no I/O * is underway against any of the blocks which are outside the truncation * point. Because the caller is about to free (and possibly reuse) those * blocks on-disk. */ void folio_invalidate(struct folio *folio, size_t offset, size_t length) { const struct address_space_operations *aops = folio->mapping->a_ops; if (aops->invalidate_folio) aops->invalidate_folio(folio, offset, length); } EXPORT_SYMBOL_GPL(folio_invalidate); /* * If truncate cannot remove the fs-private metadata from the page, the page * becomes orphaned. It will be left on the LRU and may even be mapped into * user pagetables if we're racing with filemap_fault(). * * We need to bail out if page->mapping is no longer equal to the original * mapping. This happens a) when the VM reclaimed the page while we waited on * its lock, b) when a concurrent invalidate_mapping_pages got there first and * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space. */ static void truncate_cleanup_folio(struct folio *folio) { if (folio_mapped(folio)) unmap_mapping_folio(folio); if (folio_has_private(folio)) folio_invalidate(folio, 0, folio_size(folio)); /* * Some filesystems seem to re-dirty the page even after * the VM has canceled the dirty bit (eg ext3 journaling). * Hence dirty accounting check is placed after invalidation. */ folio_cancel_dirty(folio); folio_clear_mappedtodisk(folio); } int truncate_inode_folio(struct address_space *mapping, struct folio *folio) { if (folio->mapping != mapping) return -EIO; truncate_cleanup_folio(folio); filemap_remove_folio(folio); return 0; } /* * Handle partial folios. The folio may be entirely within the * range if a split has raced with us. If not, we zero the part of the * folio that's within the [start, end] range, and then split the folio if * it's large. split_page_range() will discard pages which now lie beyond * i_size, and we rely on the caller to discard pages which lie within a * newly created hole. * * Returns false if splitting failed so the caller can avoid * discarding the entire folio which is stubbornly unsplit. */ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); unsigned int offset, length; if (pos < start) offset = start - pos; else offset = 0; length = folio_size(folio); if (pos + length <= (u64)end) length = length - offset; else length = end + 1 - pos - offset; folio_wait_writeback(folio); if (length == folio_size(folio)) { truncate_inode_folio(folio->mapping, folio); return true; } /* * We may be zeroing pages we're about to discard, but it avoids * doing a complex calculation here, and then doing the zeroing * anyway if the page split fails. */ folio_zero_range(folio, offset, length); if (folio_has_private(folio)) folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; if (split_folio(folio) == 0) return true; if (folio_test_dirty(folio)) return false; truncate_inode_folio(folio->mapping, folio); return true; } /* * Used to get rid of pages on hardware memory corruption. */ int generic_error_remove_page(struct address_space *mapping, struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); if (!mapping) return -EINVAL; /* * Only punch for normal data pages for now. * Handling other types like directories would need more auditing. */ if (!S_ISREG(mapping->host->i_mode)) return -EIO; return truncate_inode_folio(mapping, page_folio(page)); } EXPORT_SYMBOL(generic_error_remove_page); static long mapping_evict_folio(struct address_space *mapping, struct folio *folio) { if (folio_test_dirty(folio) || folio_test_writeback(folio)) return 0; /* The refcount will be elevated if any page in the folio is mapped */ if (folio_ref_count(folio) > folio_nr_pages(folio) + folio_has_private(folio) + 1) return 0; if (!filemap_release_folio(folio, 0)) return 0; return remove_mapping(mapping, folio); } /** * invalidate_inode_page() - Remove an unused page from the pagecache. * @page: The page to remove. * * Safely invalidate one page from its pagecache mapping. * It only drops clean, unused pages. * * Context: Page must be locked. * Return: The number of pages successfully removed. */ long invalidate_inode_page(struct page *page) { struct folio *folio = page_folio(page); struct address_space *mapping = folio_mapping(folio); /* The page may have been truncated before it was locked */ if (!mapping) return 0; return mapping_evict_folio(mapping, folio); } /** * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets * @mapping: mapping to truncate * @lstart: offset from which to truncate * @lend: offset to which to truncate (inclusive) * * Truncate the page cache, removing the pages that are between * specified offsets (and zeroing out partial pages * if lstart or lend + 1 is not page aligned). * * Truncate takes two passes - the first pass is nonblocking. It will not * block on page locks and it will not block on writeback. The second pass * will wait. This is to prevent as much IO as possible in the affected region. * The first pass will remove most pages, so the search cost of the second pass * is low. * * We pass down the cache-hot hint to the page freeing code. Even if the * mapping is large, it is probably the case that the final pages are the most * recently touched, and freeing happens in ascending file offset order. * * Note that since ->invalidate_folio() accepts range to invalidate * truncate_inode_pages_range is able to handle cases where lend + 1 is not * page aligned properly. */ void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart, loff_t lend) { pgoff_t start; /* inclusive */ pgoff_t end; /* exclusive */ struct folio_batch fbatch; pgoff_t indices[PAGEVEC_SIZE]; pgoff_t index; int i; struct folio *folio; bool same_folio; if (mapping_empty(mapping)) return; /* * 'start' and 'end' always covers the range of pages to be fully * truncated. Partial pages are covered with 'partial_start' at the * start of the range and 'partial_end' at the end of the range. * Note that 'end' is exclusive while 'lend' is inclusive. */ start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; if (lend == -1) /* * lend == -1 indicates end-of-file so we have to set 'end' * to the highest possible pgoff_t and since the type is * unsigned we're using -1. */ end = -1; else end = (lend + 1) >> PAGE_SHIFT; folio_batch_init(&fbatch); index = start; while (index < end && find_lock_entries(mapping, &index, end - 1, &fbatch, indices)) { truncate_folio_batch_exceptionals(mapping, &fbatch, indices); for (i = 0; i < folio_batch_count(&fbatch); i++) truncate_cleanup_folio(fbatch.folios[i]); delete_from_page_cache_batch(mapping, &fbatch); for (i = 0; i < folio_batch_count(&fbatch); i++) folio_unlock(fbatch.folios[i]); folio_batch_release(&fbatch); cond_resched(); } same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); if (!IS_ERR(folio)) { same_folio = lend < folio_pos(folio) + folio_size(folio); if (!truncate_inode_partial_folio(folio, lstart, lend)) { start = folio_next_index(folio); if (same_folio) end = folio->index; } folio_unlock(folio); folio_put(folio); folio = NULL; } if (!same_folio) { folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, FGP_LOCK, 0); if (!IS_ERR(folio)) { if (!truncate_inode_partial_folio(folio, lstart, lend)) end = folio->index; folio_unlock(folio); folio_put(folio); } } index = start; while (index < end) { cond_resched(); if (!find_get_entries(mapping, &index, end - 1, &fbatch, indices)) { /* If all gone from start onwards, we're done */ if (index == start) break; /* Otherwise restart to make sure all gone */ index = start; continue; } for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing page->index */ if (xa_is_value(folio)) continue; folio_lock(folio); VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); truncate_inode_folio(mapping, folio); folio_unlock(folio); } truncate_folio_batch_exceptionals(mapping, &fbatch, indices); folio_batch_release(&fbatch); } } EXPORT_SYMBOL(truncate_inode_pages_range); /** * truncate_inode_pages - truncate *all* the pages from an offset * @mapping: mapping to truncate * @lstart: offset from which to truncate * * Called under (and serialised by) inode->i_rwsem and * mapping->invalidate_lock. * * Note: When this function returns, there can be a page in the process of * deletion (inside __filemap_remove_folio()) in the specified range. Thus * mapping->nrpages can be non-zero when this function returns even after * truncation of the whole mapping. */ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) { truncate_inode_pages_range(mapping, lstart, (loff_t)-1); } EXPORT_SYMBOL(truncate_inode_pages); /** * truncate_inode_pages_final - truncate *all* pages before inode dies * @mapping: mapping to truncate * * Called under (and serialized by) inode->i_rwsem. * * Filesystems have to use this in the .evict_inode path to inform the * VM that this is the final truncate and the inode is going away. */ void truncate_inode_pages_final(struct address_space *mapping) { /* * Page reclaim can not participate in regular inode lifetime * management (can't call iput()) and thus can race with the * inode teardown. Tell it when the address space is exiting, * so that it does not install eviction information after the * final truncate has begun. */ mapping_set_exiting(mapping); if (!mapping_empty(mapping)) { /* * As truncation uses a lockless tree lookup, cycle * the tree lock to make sure any ongoing tree * modification that does not see AS_EXITING is * completed before starting the final truncate. */ xa_lock_irq(&mapping->i_pages); xa_unlock_irq(&mapping->i_pages); } truncate_inode_pages(mapping, 0); } EXPORT_SYMBOL(truncate_inode_pages_final); /** * mapping_try_invalidate - Invalidate all the evictable folios of one inode * @mapping: the address_space which holds the folios to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * @nr_failed: How many folio invalidations failed * * This function is similar to invalidate_mapping_pages(), except that it * returns the number of folios which could not be evicted in @nr_failed. */ unsigned long mapping_try_invalidate(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_failed) { pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; pgoff_t index = start; unsigned long ret; unsigned long count = 0; int i; folio_batch_init(&fbatch); while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { count += invalidate_exceptional_entry(mapping, indices[i], folio); continue; } ret = mapping_evict_folio(mapping, folio); folio_unlock(folio); /* * Invalidation is a hint that the folio is no longer * of interest and try to speed up its reclaim. */ if (!ret) { deactivate_file_folio(folio); /* Likely in the lru cache of a remote CPU */ if (nr_failed) (*nr_failed)++; } count += ret; } folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); } return count; } /** * invalidate_mapping_pages - Invalidate all clean, unlocked cache of one inode * @mapping: the address_space which holds the cache to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) * * This function removes pages that are clean, unmapped and unlocked, * as well as shadow entries. It will not block on IO activity. * * If you want to remove all the pages of one inode, regardless of * their use and writeback state, use truncate_inode_pages(). * * Return: The number of indices that had their contents invalidated */ unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { return mapping_try_invalidate(mapping, start, end, NULL); } EXPORT_SYMBOL(invalidate_mapping_pages); /* * This is like invalidate_inode_page(), except it ignores the page's * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because * shrink_page_list() has a temp ref on them, or because they're transiently * sitting in the folio_add_lru() caches. */ static int invalidate_complete_folio2(struct address_space *mapping, struct folio *folio) { if (folio->mapping != mapping) return 0; if (!filemap_release_folio(folio, GFP_KERNEL)) return 0; spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); if (folio_test_dirty(folio)) goto failed; BUG_ON(folio_has_private(folio)); __filemap_remove_folio(folio, NULL); xa_unlock_irq(&mapping->i_pages); if (mapping_shrinkable(mapping)) inode_add_lru(mapping->host); spin_unlock(&mapping->host->i_lock); filemap_free_folio(mapping, folio); return 1; failed: xa_unlock_irq(&mapping->i_pages); spin_unlock(&mapping->host->i_lock); return 0; } static int folio_launder(struct address_space *mapping, struct folio *folio) { if (!folio_test_dirty(folio)) return 0; if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) return 0; return mapping->a_ops->launder_folio(folio); } /** * invalidate_inode_pages2_range - remove range of pages from an address_space * @mapping: the address_space * @start: the page offset 'from' which to invalidate * @end: the page offset 'to' which to invalidate (inclusive) * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * * Return: -EBUSY if any pages could not be invalidated. */ int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; pgoff_t index; int i; int ret = 0; int ret2 = 0; int did_range_unmap = 0; if (mapping_empty(mapping)) return 0; folio_batch_init(&fbatch); index = start; while (find_get_entries(mapping, &index, end, &fbatch, indices)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ if (xa_is_value(folio)) { if (!invalidate_exceptional_entry2(mapping, indices[i], folio)) ret = -EBUSY; continue; } if (!did_range_unmap && folio_mapped(folio)) { /* * If folio is mapped, before taking its lock, * zap the rest of the file in one hit. */ unmap_mapping_pages(mapping, indices[i], (1 + end - indices[i]), false); did_range_unmap = 1; } folio_lock(folio); if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); continue; } VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); if (folio_mapped(folio)) unmap_mapping_folio(folio); BUG_ON(folio_mapped(folio)); ret2 = folio_launder(mapping, folio); if (ret2 == 0) { if (!invalidate_complete_folio2(mapping, folio)) ret2 = -EBUSY; } if (ret2 < 0) ret = ret2; folio_unlock(folio); } folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); } /* * For DAX we invalidate page tables after invalidating page cache. We * could invalidate page tables while invalidating each entry however * that would be expensive. And doing range unmapping before doesn't * work as we have no cheap way to find whether page cache entry didn't * get remapped later. */ if (dax_mapping(mapping)) { unmap_mapping_pages(mapping, start, end - start + 1, false); } return ret; } EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); /** * invalidate_inode_pages2 - remove all pages from an address_space * @mapping: the address_space * * Any pages which are found to be mapped into pagetables are unmapped prior to * invalidation. * * Return: -EBUSY if any pages could not be invalidated. */ int invalidate_inode_pages2(struct address_space *mapping) { return invalidate_inode_pages2_range(mapping, 0, -1); } EXPORT_SYMBOL_GPL(invalidate_inode_pages2); /** * truncate_pagecache - unmap and remove pagecache that has been truncated * @inode: inode * @newsize: new file size * * inode's new i_size must already be written before truncate_pagecache * is called. * * This function should typically be called before the filesystem * releases resources associated with the freed range (eg. deallocates * blocks). This way, pagecache will always stay logically coherent * with on-disk format, and the filesystem would not have to deal with * situations such as writepage being called for a page that has already * had its underlying blocks deallocated. */ void truncate_pagecache(struct inode *inode, loff_t newsize) { struct address_space *mapping = inode->i_mapping; loff_t holebegin = round_up(newsize, PAGE_SIZE); /* * unmap_mapping_range is called twice, first simply for * efficiency so that truncate_inode_pages does fewer * single-page unmaps. However after this first call, and * before truncate_inode_pages finishes, it is possible for * private pages to be COWed, which remain after * truncate_inode_pages finishes, hence the second * unmap_mapping_range call must be made for correctness. */ unmap_mapping_range(mapping, holebegin, 0, 1); truncate_inode_pages(mapping, newsize); unmap_mapping_range(mapping, holebegin, 0, 1); } EXPORT_SYMBOL(truncate_pagecache); /** * truncate_setsize - update inode and pagecache for a new file size * @inode: inode * @newsize: new file size * * truncate_setsize updates i_size and performs pagecache truncation (if * necessary) to @newsize. It will be typically be called from the filesystem's * setattr function when ATTR_SIZE is passed in. * * Must be called with a lock serializing truncates and writes (generally * i_rwsem but e.g. xfs uses a different lock) and before all filesystem * specific block truncation has been performed. */ void truncate_setsize(struct inode *inode, loff_t newsize) { loff_t oldsize = inode->i_size; i_size_write(inode, newsize); if (newsize > oldsize) pagecache_isize_extended(inode, oldsize, newsize); truncate_pagecache(inode, newsize); } EXPORT_SYMBOL(truncate_setsize); /** * pagecache_isize_extended - update pagecache after extension of i_size * @inode: inode for which i_size was extended * @from: original inode size * @to: new inode size * * Handle extension of inode size either caused by extending truncate or by * write starting after current i_size. We mark the page straddling current * i_size RO so that page_mkwrite() is called on the nearest write access to * the page. This way filesystem can be sure that page_mkwrite() is called on * the page before user writes to the page via mmap after the i_size has been * changed. * * The function must be called after i_size is updated so that page fault * coming after we unlock the page will already see the new i_size. * The function must be called while we still hold i_rwsem - this not only * makes sure i_size is stable but also that userspace cannot observe new * i_size value before we are prepared to store mmap writes at new inode size. */ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) { int bsize = i_blocksize(inode); loff_t rounded_from; struct page *page; pgoff_t index; WARN_ON(to > inode->i_size); if (from >= to || bsize == PAGE_SIZE) return; /* Page straddling @from will not have any hole block created? */ rounded_from = round_up(from, bsize); if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) return; index = from >> PAGE_SHIFT; page = find_lock_page(inode->i_mapping, index); /* Page not cached? Nothing to do */ if (!page) return; /* * See clear_page_dirty_for_io() for details why set_page_dirty() * is needed. */ if (page_mkclean(page)) set_page_dirty(page); unlock_page(page); put_page(page); } EXPORT_SYMBOL(pagecache_isize_extended); /** * truncate_pagecache_range - unmap and remove pagecache that is hole-punched * @inode: inode * @lstart: offset of beginning of hole * @lend: offset of last byte of hole * * This function should typically be called before the filesystem * releases resources associated with the freed range (eg. deallocates * blocks). This way, pagecache will always stay logically coherent * with on-disk format, and the filesystem would not have to deal with * situations such as writepage being called for a page that has already * had its underlying blocks deallocated. */ void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend) { struct address_space *mapping = inode->i_mapping; loff_t unmap_start = round_up(lstart, PAGE_SIZE); loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; /* * This rounding is currently just for example: unmap_mapping_range * expands its hole outwards, whereas we want it to contract the hole * inwards. However, existing callers of truncate_pagecache_range are * doing their own page rounding first. Note that unmap_mapping_range * allows holelen 0 for all, and we allow lend -1 for end of file. */ /* * Unlike in truncate_pagecache, unmap_mapping_range is called only * once (before truncating pagecache), and without "even_cows" flag: * hole-punching should not remove private COWed pages from the hole. */ if ((u64)unmap_end > (u64)unmap_start) unmap_mapping_range(mapping, unmap_start, 1 + unmap_end - unmap_start, 0); truncate_inode_pages_range(mapping, lstart, lend); } EXPORT_SYMBOL(truncate_pagecache_range);
linux-master
mm/truncate.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/madvise.c * * Copyright (C) 1999 Linus Torvalds * Copyright (C) 2002 Christoph Hellwig */ #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/mempolicy.h> #include <linux/page-isolation.h> #include <linux/page_idle.h> #include <linux/userfaultfd_k.h> #include <linux/hugetlb.h> #include <linux/falloc.h> #include <linux/fadvise.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/mm_inline.h> #include <linux/string.h> #include <linux/uio.h> #include <linux/ksm.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/pagewalk.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/shmem_fs.h> #include <linux/mmu_notifier.h> #include <asm/tlb.h> #include "internal.h" #include "swap.h" struct madvise_walk_private { struct mmu_gather *tlb; bool pageout; }; /* * Any behaviour which results in changes to the vma->vm_flags needs to * take mmap_lock for writing. Others, which simply traverse vmas, need * to only take it for reading. */ static int madvise_need_mmap_write(int behavior) { switch (behavior) { case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_COLD: case MADV_PAGEOUT: case MADV_FREE: case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: case MADV_COLLAPSE: return 0; default: /* be safe, default to 1. list exceptions explicitly */ return 1; } } #ifdef CONFIG_ANON_VMA_NAME struct anon_vma_name *anon_vma_name_alloc(const char *name) { struct anon_vma_name *anon_name; size_t count; /* Add 1 for NUL terminator at the end of the anon_name->name */ count = strlen(name) + 1; anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); if (anon_name) { kref_init(&anon_name->kref); memcpy(anon_name->name, name, count); } return anon_name; } void anon_vma_name_free(struct kref *kref) { struct anon_vma_name *anon_name = container_of(kref, struct anon_vma_name, kref); kfree(anon_name); } struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); return vma->anon_name; } /* mmap_lock should be write-locked */ static int replace_anon_vma_name(struct vm_area_struct *vma, struct anon_vma_name *anon_name) { struct anon_vma_name *orig_name = anon_vma_name(vma); if (!anon_name) { vma->anon_name = NULL; anon_vma_name_put(orig_name); return 0; } if (anon_vma_name_eq(orig_name, anon_name)) return 0; vma->anon_name = anon_vma_name_reuse(anon_name); anon_vma_name_put(orig_name); return 0; } #else /* CONFIG_ANON_VMA_NAME */ static int replace_anon_vma_name(struct vm_area_struct *vma, struct anon_vma_name *anon_name) { if (anon_name) return -EINVAL; return 0; } #endif /* CONFIG_ANON_VMA_NAME */ /* * Update the vm_flags on region of a vma, splitting it or merging it as * necessary. Must be called with mmap_lock held for writing; * Caller should ensure anon_name stability by raising its refcount even when * anon_name belongs to a valid vma because this function might free that vma. */ static int madvise_update_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long new_flags, struct anon_vma_name *anon_name) { struct mm_struct *mm = vma->vm_mm; int error; pgoff_t pgoff; VMA_ITERATOR(vmi, mm, start); if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { *prev = vma; return 0; } pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(&vmi, mm, *prev, start, end, new_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_name); if (*prev) { vma = *prev; goto success; } *prev = vma; if (start != vma->vm_start) { error = split_vma(&vmi, vma, start, 1); if (error) return error; } if (end != vma->vm_end) { error = split_vma(&vmi, vma, end, 0); if (error) return error; } success: /* vm_flags is protected by the mmap_lock held in write mode. */ vma_start_write(vma); vm_flags_reset(vma, new_flags); if (!vma->vm_file || vma_is_anon_shmem(vma)) { error = replace_anon_vma_name(vma, anon_name); if (error) return error; } return 0; } #ifdef CONFIG_SWAP static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; struct swap_iocb *splug = NULL; pte_t *ptep = NULL; spinlock_t *ptl; unsigned long addr; for (addr = start; addr < end; addr += PAGE_SIZE) { pte_t pte; swp_entry_t entry; struct page *page; if (!ptep++) { ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!ptep) break; } pte = ptep_get(ptep); if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); if (unlikely(non_swap_entry(entry))) continue; pte_unmap_unlock(ptep, ptl); ptep = NULL; page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, vma, addr, &splug); if (page) put_page(page); } if (ptep) pte_unmap_unlock(ptep, ptl); swap_read_unplug(splug); cond_resched(); return 0; } static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, .walk_lock = PGWALK_RDLOCK, }; static void shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); pgoff_t end_index = linear_page_index(vma, end) - 1; struct page *page; struct swap_iocb *splug = NULL; rcu_read_lock(); xas_for_each(&xas, page, end_index) { unsigned long addr; swp_entry_t entry; if (!xa_is_value(page)) continue; entry = radix_to_swp_entry(page); /* There might be swapin error entries in shmem mapping. */ if (non_swap_entry(entry)) continue; addr = vma->vm_start + ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); xas_pause(&xas); rcu_read_unlock(); page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), vma, addr, &splug); if (page) put_page(page); rcu_read_lock(); } rcu_read_unlock(); swap_read_unplug(splug); } #endif /* CONFIG_SWAP */ /* * Schedule all required I/O operations. Do not wait for completion. */ static long madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; struct file *file = vma->vm_file; loff_t offset; *prev = vma; #ifdef CONFIG_SWAP if (!file) { walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } if (shmem_mapping(file->f_mapping)) { shmem_swapin_range(vma, start, end, file->f_mapping); lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } #else if (!file) return -EBADF; #endif if (IS_DAX(file_inode(file))) { /* no bad return value, but ignore advice */ return 0; } /* * Filesystem's fadvise may need to take various locks. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_lock. */ *prev = NULL; /* tell sys_madvise we drop mmap_lock */ get_file(file); offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); mmap_read_unlock(mm); vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); fput(file); mmap_read_lock(mm); return 0; } static inline bool can_do_file_pageout(struct vm_area_struct *vma) { if (!vma->vm_file) return false; /* * paging out pagecache only for non-anonymous mappings that correspond * to the files the calling process could (if tried) open for writing; * otherwise we'd be including shared non-exclusive mappings, which * opens a side channel. */ return inode_owner_or_capable(&nop_mnt_idmap, file_inode(vma->vm_file)) || file_permission(vma->vm_file, MAY_WRITE) == 0; } static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct madvise_walk_private *private = walk->private; struct mmu_gather *tlb = private->tlb; bool pageout = private->pageout; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; pte_t *start_pte, *pte, ptent; spinlock_t *ptl; struct folio *folio = NULL; LIST_HEAD(folio_list); bool pageout_anon_only_filter; if (fatal_signal_pending(current)) return -EINTR; pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && !can_do_file_pageout(vma); #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pmd_trans_huge(*pmd)) { pmd_t orig_pmd; unsigned long next = pmd_addr_end(addr, end); tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) return 0; orig_pmd = *pmd; if (is_huge_zero_pmd(orig_pmd)) goto huge_unlock; if (unlikely(!pmd_present(orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(orig_pmd)); goto huge_unlock; } folio = pfn_folio(pmd_pfn(orig_pmd)); /* Do not interfere with other mappings of this folio */ if (folio_estimated_sharers(folio) != 1) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) goto huge_unlock; if (next - addr != HPAGE_PMD_SIZE) { int err; folio_get(folio); spin_unlock(ptl); folio_lock(folio); err = split_folio(folio); folio_unlock(folio); folio_put(folio); if (!err) goto regular_folio; return 0; } if (pmd_young(orig_pmd)) { pmdp_invalidate(vma, addr, pmd); orig_pmd = pmd_mkold(orig_pmd); set_pmd_at(mm, addr, pmd, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } folio_clear_referenced(folio); folio_test_clear_young(folio); if (folio_test_active(folio)) folio_set_workingset(folio); if (pageout) { if (folio_isolate_lru(folio)) { if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); } } else folio_deactivate(folio); huge_unlock: spin_unlock(ptl); if (pageout) reclaim_pages(&folio_list); return 0; } regular_folio: #endif tlb_change_page_size(tlb, PAGE_SIZE); start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!start_pte) return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (pte_none(ptent)) continue; if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* * Creating a THP page is expensive so split it only if we * are sure it's worth. Split it if we are only owner. */ if (folio_test_large(folio)) { int err; if (folio_estimated_sharers(folio) != 1) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; if (!folio_trylock(folio)) break; folio_get(folio); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); start_pte = NULL; err = split_folio(folio); folio_unlock(folio); folio_put(folio); if (err) break; start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) break; arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; } /* * Do not interfere with other mappings of this folio and * non-LRU folio. */ if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; VM_BUG_ON_FOLIO(folio_test_large(folio), folio); if (pte_young(ptent)) { ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); ptent = pte_mkold(ptent); set_pte_at(mm, addr, pte, ptent); tlb_remove_tlb_entry(tlb, pte, addr); } /* * We are deactivating a folio for accelerating reclaiming. * VM couldn't reclaim the folio unless we clear PG_young. * As a side effect, it makes confuse idle-page tracking * because they will miss recent referenced history. */ folio_clear_referenced(folio); folio_test_clear_young(folio); if (folio_test_active(folio)) folio_set_workingset(folio); if (pageout) { if (folio_isolate_lru(folio)) { if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); } } else folio_deactivate(folio); } if (start_pte) { arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); } if (pageout) reclaim_pages(&folio_list); cond_resched(); return 0; } static const struct mm_walk_ops cold_walk_ops = { .pmd_entry = madvise_cold_or_pageout_pte_range, .walk_lock = PGWALK_RDLOCK, }; static void madvise_cold_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { struct madvise_walk_private walk_private = { .pageout = false, .tlb = tlb, }; tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); } static inline bool can_madv_lru_vma(struct vm_area_struct *vma) { return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); } static long madvise_cold(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; *prev = vma; if (!can_madv_lru_vma(vma)) return -EINVAL; lru_add_drain(); tlb_gather_mmu(&tlb, mm); madvise_cold_page_range(&tlb, vma, start_addr, end_addr); tlb_finish_mmu(&tlb); return 0; } static void madvise_pageout_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end) { struct madvise_walk_private walk_private = { .pageout = true, .tlb = tlb, }; tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); } static long madvise_pageout(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start_addr, unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; *prev = vma; if (!can_madv_lru_vma(vma)) return -EINVAL; /* * If the VMA belongs to a private file mapping, there can be private * dirty pages which can be paged out if even this process is neither * owner nor write capable of the file. We allow private file mappings * further to pageout dirty anon pages. */ if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && (vma->vm_flags & VM_MAYSHARE))) return 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm); madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); tlb_finish_mmu(&tlb); return 0; } static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct mmu_gather *tlb = walk->private; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte, ptent; struct folio *folio; int nr_swap = 0; unsigned long next; next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) return 0; tlb_change_page_size(tlb, PAGE_SIZE); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (pte_none(ptent)) continue; /* * If the pte has swp_entry, just clear page table to * prevent swap-in which is more expensive rather than * (page allocation + zeroing). */ if (!pte_present(ptent)) { swp_entry_t entry; entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) { nr_swap--; free_swap_and_cache(entry); pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); } continue; } folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* * If pmd isn't transhuge but the folio is large and * is owned by only this process, split it and * deactivate all pages. */ if (folio_test_large(folio)) { int err; if (folio_estimated_sharers(folio) != 1) break; if (!folio_trylock(folio)) break; folio_get(folio); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); start_pte = NULL; err = split_folio(folio); folio_unlock(folio); folio_put(folio); if (err) break; start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!start_pte) break; arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; } if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { if (!folio_trylock(folio)) continue; /* * If folio is shared with others, we mustn't clear * the folio's dirty flag. */ if (folio_mapcount(folio) != 1) { folio_unlock(folio); continue; } if (folio_test_swapcache(folio) && !folio_free_swap(folio)) { folio_unlock(folio); continue; } folio_clear_dirty(folio); folio_unlock(folio); } if (pte_young(ptent) || pte_dirty(ptent)) { /* * Some of architecture(ex, PPC) don't update TLB * with set_pte_at and tlb_remove_tlb_entry so for * the portability, remap the pte with old|clean * after pte clearing. */ ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); ptent = pte_mkold(ptent); ptent = pte_mkclean(ptent); set_pte_at(mm, addr, pte, ptent); tlb_remove_tlb_entry(tlb, pte, addr); } folio_mark_lazyfree(folio); } if (nr_swap) { if (current->mm == mm) sync_mm_rss(mm); add_mm_counter(mm, MM_SWAPENTS, nr_swap); } if (start_pte) { arch_leave_lazy_mmu_mode(); pte_unmap_unlock(start_pte, ptl); } cond_resched(); return 0; } static const struct mm_walk_ops madvise_free_walk_ops = { .pmd_entry = madvise_free_pte_range, .walk_lock = PGWALK_RDLOCK, }; static int madvise_free_single_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; struct mmu_notifier_range range; struct mmu_gather tlb; /* MADV_FREE works for only anon vma at the moment */ if (!vma_is_anonymous(vma)) return -EINVAL; range.start = max(vma->vm_start, start_addr); if (range.start >= vma->vm_end) return -EINVAL; range.end = min(vma->vm_end, end_addr); if (range.end <= vma->vm_start) return -EINVAL; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, range.start, range.end); lru_add_drain(); tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(&range); tlb_start_vma(&tlb, vma); walk_page_range(vma->vm_mm, range.start, range.end, &madvise_free_walk_ops, &tlb); tlb_end_vma(&tlb, vma); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); return 0; } /* * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The * zap_page_range_single call sets things up for shrink_active_list to actually * free these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. * * NB: This interface discards data rather than pushes it out to swap, * as some implementations do. This has performance implications for * applications like large transactional databases which want to discard * pages in anonymous maps after committing to backing store the data * that was kept in them. There is no reason to write this data out to * the swap area if the application is discarding it. * * An interface that causes the system to free clean pages and flush * dirty pages is already available as msync(MS_INVALIDATE). */ static long madvise_dontneed_single_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) { zap_page_range_single(vma, start, end - start, NULL); return 0; } static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, unsigned long start, unsigned long *end, int behavior) { if (!is_vm_hugetlb_page(vma)) { unsigned int forbidden = VM_PFNMAP; if (behavior != MADV_DONTNEED_LOCKED) forbidden |= VM_LOCKED; return !(vma->vm_flags & forbidden); } if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) return false; if (start & ~huge_page_mask(hstate_vma(vma))) return false; /* * Madvise callers expect the length to be rounded up to PAGE_SIZE * boundaries, and may be unaware that this VMA uses huge pages. * Avoid unexpected data loss by rounding down the number of * huge pages freed. */ *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); return true; } static long madvise_dontneed_free(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { struct mm_struct *mm = vma->vm_mm; *prev = vma; if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) return -EINVAL; if (start == end) return 0; if (!userfaultfd_remove(vma, start, end)) { *prev = NULL; /* mmap_lock has been dropped, prev is stale */ mmap_read_lock(mm); vma = vma_lookup(mm, start); if (!vma) return -ENOMEM; /* * Potential end adjustment for hugetlb vma is OK as * the check below keeps end within vma. */ if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) return -EINVAL; if (end > vma->vm_end) { /* * Don't fail if end > vma->vm_end. If the old * vma was split while the mmap_lock was * released the effect of the concurrent * operation may not cause madvise() to * have an undefined result. There may be an * adjacent next vma that we'll walk * next. userfaultfd_remove() will generate an * UFFD_EVENT_REMOVE repetition on the * end-vma->vm_end range, but the manager can * handle a repetition fine. */ end = vma->vm_end; } VM_WARN_ON(start >= end); } if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) return madvise_dontneed_single_vma(vma, start, end); else if (behavior == MADV_FREE) return madvise_free_single_vma(vma, start, end); else return -EINVAL; } static long madvise_populate(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { const bool write = behavior == MADV_POPULATE_WRITE; struct mm_struct *mm = vma->vm_mm; unsigned long tmp_end; int locked = 1; long pages; *prev = vma; while (start < end) { /* * We might have temporarily dropped the lock. For example, * our VMA might have been split. */ if (!vma || start >= vma->vm_end) { vma = vma_lookup(mm, start); if (!vma) return -ENOMEM; } tmp_end = min_t(unsigned long, end, vma->vm_end); /* Populate (prefault) page tables readable/writable. */ pages = faultin_vma_page_range(vma, start, tmp_end, write, &locked); if (!locked) { mmap_read_lock(mm); locked = 1; *prev = NULL; vma = NULL; } if (pages < 0) { switch (pages) { case -EINTR: return -EINTR; case -EINVAL: /* Incompatible mappings / permissions. */ return -EINVAL; case -EHWPOISON: return -EHWPOISON; case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ return -EFAULT; default: pr_warn_once("%s: unhandled return value: %ld\n", __func__, pages); fallthrough; case -ENOMEM: return -ENOMEM; } } start += pages * PAGE_SIZE; } return 0; } /* * Application wants to free up the pages and associated backing store. * This is effectively punching a hole into the middle of a file. */ static long madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { loff_t offset; int error; struct file *f; struct mm_struct *mm = vma->vm_mm; *prev = NULL; /* tell sys_madvise we drop mmap_lock */ if (vma->vm_flags & VM_LOCKED) return -EINVAL; f = vma->vm_file; if (!f || !f->f_mapping || !f->f_mapping->host) { return -EINVAL; } if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* * Filesystem's fallocate may need to take i_rwsem. We need to * explicitly grab a reference because the vma (and hence the * vma's reference to the file) can go away as soon as we drop * mmap_lock. */ get_file(f); if (userfaultfd_remove(vma, start, end)) { /* mmap_lock was not released by userfaultfd_remove() */ mmap_read_unlock(mm); } error = vfs_fallocate(f, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, end - start); fput(f); mmap_read_lock(mm); return error; } /* * Apply an madvise behavior to a region of a vma. madvise_update_vma * will handle splitting a vm area into separate areas, each area with its own * behavior. */ static int madvise_vma_behavior(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long behavior) { int error; struct anon_vma_name *anon_name; unsigned long new_flags = vma->vm_flags; switch (behavior) { case MADV_REMOVE: return madvise_remove(vma, prev, start, end); case MADV_WILLNEED: return madvise_willneed(vma, prev, start, end); case MADV_COLD: return madvise_cold(vma, prev, start, end); case MADV_PAGEOUT: return madvise_pageout(vma, prev, start, end); case MADV_FREE: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: return madvise_dontneed_free(vma, prev, start, end, behavior); case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: return madvise_populate(vma, prev, start, end, behavior); case MADV_NORMAL: new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; break; case MADV_SEQUENTIAL: new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; break; case MADV_RANDOM: new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; break; case MADV_DONTFORK: new_flags |= VM_DONTCOPY; break; case MADV_DOFORK: if (vma->vm_flags & VM_IO) return -EINVAL; new_flags &= ~VM_DONTCOPY; break; case MADV_WIPEONFORK: /* MADV_WIPEONFORK is only supported on anonymous memory. */ if (vma->vm_file || vma->vm_flags & VM_SHARED) return -EINVAL; new_flags |= VM_WIPEONFORK; break; case MADV_KEEPONFORK: new_flags &= ~VM_WIPEONFORK; break; case MADV_DONTDUMP: new_flags |= VM_DONTDUMP; break; case MADV_DODUMP: if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) return -EINVAL; new_flags &= ~VM_DONTDUMP; break; case MADV_MERGEABLE: case MADV_UNMERGEABLE: error = ksm_madvise(vma, start, end, behavior, &new_flags); if (error) goto out; break; case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: error = hugepage_madvise(vma, &new_flags, behavior); if (error) goto out; break; case MADV_COLLAPSE: return madvise_collapse(vma, prev, start, end); } anon_name = anon_vma_name(vma); anon_vma_name_get(anon_name); error = madvise_update_vma(vma, prev, start, end, new_flags, anon_name); anon_vma_name_put(anon_name); out: /* * madvise() returns EAGAIN if kernel resources, such as * slab, are temporarily unavailable. */ if (error == -ENOMEM) error = -EAGAIN; return error; } #ifdef CONFIG_MEMORY_FAILURE /* * Error injection support for memory error handling. */ static int madvise_inject_error(int behavior, unsigned long start, unsigned long end) { unsigned long size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (; start < end; start += size) { unsigned long pfn; struct page *page; int ret; ret = get_user_pages_fast(start, 1, 0, &page); if (ret != 1) return ret; pfn = page_to_pfn(page); /* * When soft offlining hugepages, after migrating the page * we dissolve it, therefore in the second loop "page" will * no longer be a compound page. */ size = page_size(compound_head(page)); if (behavior == MADV_SOFT_OFFLINE) { pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", pfn, start); ret = soft_offline_page(pfn, MF_COUNT_INCREASED); } else { pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", pfn, start); ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; } if (ret) return ret; } return 0; } #endif static bool madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_DOFORK: case MADV_DONTFORK: case MADV_NORMAL: case MADV_SEQUENTIAL: case MADV_RANDOM: case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_FREE: case MADV_COLD: case MADV_PAGEOUT: case MADV_POPULATE_READ: case MADV_POPULATE_WRITE: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE case MADV_HUGEPAGE: case MADV_NOHUGEPAGE: case MADV_COLLAPSE: #endif case MADV_DONTDUMP: case MADV_DODUMP: case MADV_WIPEONFORK: case MADV_KEEPONFORK: #ifdef CONFIG_MEMORY_FAILURE case MADV_SOFT_OFFLINE: case MADV_HWPOISON: #endif return true; default: return false; } } static bool process_madvise_behavior_valid(int behavior) { switch (behavior) { case MADV_COLD: case MADV_PAGEOUT: case MADV_WILLNEED: case MADV_COLLAPSE: return true; default: return false; } } /* * Walk the vmas in range [start,end), and call the visit function on each one. * The visit function will get start and end parameters that cover the overlap * between the current vma and the original range. Any unmapped regions in the * original range will result in this function returning -ENOMEM while still * calling the visit function on all of the existing vmas in the range. * Must be called with the mmap_lock held for reading or writing. */ static int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long arg, int (*visit)(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long arg)) { struct vm_area_struct *vma; struct vm_area_struct *prev; unsigned long tmp; int unmapped_error = 0; /* * If the interval [start,end) covers some unmapped address * ranges, just ignore them, but return -ENOMEM at the end. * - different from the way of handling in mlock etc. */ vma = find_vma_prev(mm, start, &prev); if (vma && start > vma->vm_start) prev = vma; for (;;) { int error; /* Still start < end. */ if (!vma) return -ENOMEM; /* Here start < (end|vma->vm_end). */ if (start < vma->vm_start) { unmapped_error = -ENOMEM; start = vma->vm_start; if (start >= end) break; } /* Here vma->vm_start <= start < (end|vma->vm_end) */ tmp = vma->vm_end; if (end < tmp) tmp = end; /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ error = visit(vma, &prev, start, tmp, arg); if (error) return error; start = tmp; if (prev && start < prev->vm_end) start = prev->vm_end; if (start >= end) break; if (prev) vma = find_vma(mm, prev->vm_end); else /* madvise_remove dropped mmap_lock */ vma = find_vma(mm, start); } return unmapped_error; } #ifdef CONFIG_ANON_VMA_NAME static int madvise_vma_anon_name(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, unsigned long anon_name) { int error; /* Only anonymous mappings can be named */ if (vma->vm_file && !vma_is_anon_shmem(vma)) return -EBADF; error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, (struct anon_vma_name *)anon_name); /* * madvise() returns EAGAIN if kernel resources, such as * slab, are temporarily unavailable. */ if (error == -ENOMEM) error = -EAGAIN; return error; } int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, struct anon_vma_name *anon_name) { unsigned long end; unsigned long len; if (start & ~PAGE_MASK) return -EINVAL; len = (len_in + ~PAGE_MASK) & PAGE_MASK; /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) return -EINVAL; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, madvise_vma_anon_name); } #endif /* CONFIG_ANON_VMA_NAME */ /* * The madvise(2) system call. * * Applications can use madvise() to advise the kernel how it should * handle paging I/O in this VM area. The idea is to help the kernel * use appropriate read-ahead and caching techniques. The information * provided is advisory only, and can be safely disregarded by the * kernel without affecting the correct operation of the application. * * behavior values: * MADV_NORMAL - the default behavior is to read clusters. This * results in some read-ahead and read-behind. * MADV_RANDOM - the system should read the minimum amount of data * on any access, since it is unlikely that the appli- * cation will need more than what it asks for. * MADV_SEQUENTIAL - pages in the given range will probably be accessed * once, so they can be aggressively read ahead, and * can be freed soon after they are accessed. * MADV_WILLNEED - the application is notifying the system to read * some pages ahead. * MADV_DONTNEED - the application is finished with the given range, * so the kernel can free resources associated with it. * MADV_FREE - the application marks pages in the given range as lazy free, * where actual purges are postponed until memory pressure happens. * MADV_REMOVE - the application wants to free up the given range of * pages and associated backing store. * MADV_DONTFORK - omit this area from child's address space when forking: * typically, to avoid COWing pages pinned by get_user_pages(). * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. * MADV_WIPEONFORK - present the child process with zero-filled memory in this * range after a fork. * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK * MADV_HWPOISON - trigger memory error handler as if the given memory range * were corrupted by unrecoverable hardware memory failure. * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in * this area with pages of identical content from other such areas. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. * MADV_HUGEPAGE - the application wants to back the given range by transparent * huge pages in the future. Existing pages might be coalesced and * new pages might be allocated as THP. * MADV_NOHUGEPAGE - mark the given range as not worth being backed by * transparent huge pages so the existing pages will not be * coalesced into THP and new pages will not be allocated as THP. * MADV_COLLAPSE - synchronously coalesce pages into new THP. * MADV_DONTDUMP - the application wants to prevent pages in the given range * from being included in its core dump. * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. * MADV_COLD - the application is not expected to use this memory soon, * deactivate pages in this range so that they can be reclaimed * easily if memory pressure happens. * MADV_PAGEOUT - the application is not expected to use this memory soon, * page out the pages in this range immediately. * MADV_POPULATE_READ - populate (prefault) page tables readable by * triggering read faults if required * MADV_POPULATE_WRITE - populate (prefault) page tables writable by * triggering write faults if required * * return values: * zero - success * -EINVAL - start + len < 0, start is not page-aligned, * "behavior" is not a valid value, or application * is attempting to release locked or shared pages, * or the specified address range includes file, Huge TLB, * MAP_SHARED or VMPFNMAP range. * -ENOMEM - addresses in the specified range are not currently * mapped, or are outside the AS of the process. * -EIO - an I/O error occurred while paging in data. * -EBADF - map exists, but area maps something that isn't a file. * -EAGAIN - a kernel resource was temporarily unavailable. */ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) { unsigned long end; int error; int write; size_t len; struct blk_plug plug; if (!madvise_behavior_valid(behavior)) return -EINVAL; if (!PAGE_ALIGNED(start)) return -EINVAL; len = PAGE_ALIGN(len_in); /* Check to see whether len was rounded up from small -ve to zero */ if (len_in && !len) return -EINVAL; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; #ifdef CONFIG_MEMORY_FAILURE if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) return madvise_inject_error(behavior, start, start + len_in); #endif write = madvise_need_mmap_write(behavior); if (write) { if (mmap_write_lock_killable(mm)) return -EINTR; } else { mmap_read_lock(mm); } start = untagged_addr_remote(mm, start); end = start + len; blk_start_plug(&plug); error = madvise_walk_vmas(mm, start, end, behavior, madvise_vma_behavior); blk_finish_plug(&plug); if (write) mmap_write_unlock(mm); else mmap_read_unlock(mm); return error; } SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) { return do_madvise(current->mm, start, len_in, behavior); } SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, size_t, vlen, int, behavior, unsigned int, flags) { ssize_t ret; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; struct task_struct *task; struct mm_struct *mm; size_t total_len; unsigned int f_flags; if (flags != 0) { ret = -EINVAL; goto out; } ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) goto out; task = pidfd_get_task(pidfd, &f_flags); if (IS_ERR(task)) { ret = PTR_ERR(task); goto free_iov; } if (!process_madvise_behavior_valid(behavior)) { ret = -EINVAL; goto release_task; } /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); if (IS_ERR_OR_NULL(mm)) { ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; goto release_task; } /* * Require CAP_SYS_NICE for influencing process performance. Note that * only non-destructive hints are currently supported. */ if (!capable(CAP_SYS_NICE)) { ret = -EPERM; goto release_mm; } total_len = iov_iter_count(&iter); while (iov_iter_count(&iter)) { ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter), iter_iov_len(&iter), behavior); if (ret < 0) break; iov_iter_advance(&iter, iter_iov_len(&iter)); } ret = (total_len - iov_iter_count(&iter)) ? : ret; release_mm: mmput(mm); release_task: put_task_struct(task); free_iov: kfree(iov); out: return ret; }
linux-master
mm/madvise.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/oom_kill.c * * Copyright (C) 1998,2000 Rik van Riel * Thanks go out to Claus Fischer for some serious inspiration and * for goading me into coding this file... * Copyright (C) 2010 Google, Inc. * Rewritten by David Rientjes * * The routines in this file are used to kill a process when * we're seriously out of memory. This gets called from __alloc_pages() * in mm/page_alloc.c when we really run out of memory. * * Since we won't call these routines often (on a well-configured * machine) this file will double as a 'coding guide' and a signpost * for newbie kernel hackers. It features several pointers to major * kernel subsystems and hints as to where to find out what things do. */ #include <linux/oom.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/task.h> #include <linux/sched/debug.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/timex.h> #include <linux/jiffies.h> #include <linux/cpuset.h> #include <linux/export.h> #include <linux/notifier.h> #include <linux/memcontrol.h> #include <linux/mempolicy.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/freezer.h> #include <linux/ftrace.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/init.h> #include <linux/mmu_notifier.h> #include <asm/tlb.h> #include "internal.h" #include "slab.h" #define CREATE_TRACE_POINTS #include <trace/events/oom.h> static int sysctl_panic_on_oom; static int sysctl_oom_kill_allocating_task; static int sysctl_oom_dump_tasks = 1; /* * Serializes oom killer invocations (out_of_memory()) from all contexts to * prevent from over eager oom killing (e.g. when the oom killer is invoked * from different domains). * * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled * and mark_oom_victim */ DEFINE_MUTEX(oom_lock); /* Serializes oom_score_adj and oom_score_adj_min updates */ DEFINE_MUTEX(oom_adj_mutex); static inline bool is_memcg_oom(struct oom_control *oc) { return oc->memcg != NULL; } #ifdef CONFIG_NUMA /** * oom_cpuset_eligible() - check task eligibility for kill * @start: task struct of which task to consider * @oc: pointer to struct oom_control * * Task eligibility is determined by whether or not a candidate task, @tsk, * shares the same mempolicy nodes as current if it is bound by such a policy * and whether or not it has the same set of allowed cpuset nodes. * * This function is assuming oom-killer context and 'current' has triggered * the oom-killer. */ static bool oom_cpuset_eligible(struct task_struct *start, struct oom_control *oc) { struct task_struct *tsk; bool ret = false; const nodemask_t *mask = oc->nodemask; rcu_read_lock(); for_each_thread(start, tsk) { if (mask) { /* * If this is a mempolicy constrained oom, tsk's * cpuset is irrelevant. Only return true if its * mempolicy intersects current, otherwise it may be * needlessly killed. */ ret = mempolicy_in_oom_domain(tsk, mask); } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ ret = cpuset_mems_allowed_intersects(current, tsk); } if (ret) break; } rcu_read_unlock(); return ret; } #else static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc) { return true; } #endif /* CONFIG_NUMA */ /* * The process p may have detached its own ->mm while exiting or through * kthread_use_mm(), but one or more of its subthreads may still have a valid * pointer. Return p, or any of its subthreads with a valid ->mm, with * task_lock() held. */ struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t; rcu_read_lock(); for_each_thread(p, t) { task_lock(t); if (likely(t->mm)) goto found; task_unlock(t); } t = NULL; found: rcu_read_unlock(); return t; } /* * order == -1 means the oom kill is required by sysrq, otherwise only * for display purposes. */ static inline bool is_sysrq_oom(struct oom_control *oc) { return oc->order == -1; } /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; return false; } /* * Check whether unreclaimable slab amount is greater than * all user memory(LRU pages). * dump_unreclaimable_slab() could help in the case that * oom due to too much unreclaimable slab used by kernel. */ static bool should_dump_unreclaim_slab(void) { unsigned long nr_lru; nr_lru = global_node_page_state(NR_ACTIVE_ANON) + global_node_page_state(NR_INACTIVE_ANON) + global_node_page_state(NR_ACTIVE_FILE) + global_node_page_state(NR_INACTIVE_FILE) + global_node_page_state(NR_ISOLATED_ANON) + global_node_page_state(NR_ISOLATED_FILE) + global_node_page_state(NR_UNEVICTABLE); return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru); } /** * oom_badness - heuristic function to determine which candidate task to kill * @p: task struct of which task we should calculate * @totalpages: total present RAM allowed for page allocation * * The heuristic for determining which task to kill is made to be as simple and * predictable as possible. The goal is to return the highest value for the * task consuming the most memory to avoid subsequent oom failures. */ long oom_badness(struct task_struct *p, unsigned long totalpages) { long points; long adj; if (oom_unkillable_task(p)) return LONG_MIN; p = find_lock_task_mm(p); if (!p) return LONG_MIN; /* * Do not even consider tasks which are explicitly marked oom * unkillable or have been already oom reaped or the are in * the middle of vfork */ adj = (long)p->signal->oom_score_adj; if (adj == OOM_SCORE_ADJ_MIN || test_bit(MMF_OOM_SKIP, &p->mm->flags) || in_vfork(p)) { task_unlock(p); return LONG_MIN; } /* * The baseline for the badness score is the proportion of RAM that each * task's rss, pagetable and swap space use. */ points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + mm_pgtables_bytes(p->mm) / PAGE_SIZE; task_unlock(p); /* Normalize to oom_score_adj units */ adj *= totalpages / 1000; points += adj; return points; } static const char * const oom_constraint_text[] = { [CONSTRAINT_NONE] = "CONSTRAINT_NONE", [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET", [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY", [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG", }; /* * Determine the type of allocation constraint. */ static enum oom_constraint constrained_alloc(struct oom_control *oc) { struct zone *zone; struct zoneref *z; enum zone_type highest_zoneidx = gfp_zone(oc->gfp_mask); bool cpuset_limited = false; int nid; if (is_memcg_oom(oc)) { oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; return CONSTRAINT_MEMCG; } /* Default to all available memory */ oc->totalpages = totalram_pages() + total_swap_pages; if (!IS_ENABLED(CONFIG_NUMA)) return CONSTRAINT_NONE; if (!oc->zonelist) return CONSTRAINT_NONE; /* * Reach here only when __GFP_NOFAIL is used. So, we should avoid * to kill current.We have to random task kill in this case. * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. */ if (oc->gfp_mask & __GFP_THISNODE) return CONSTRAINT_NONE; /* * This is not a __GFP_THISNODE allocation, so a truncated nodemask in * the page allocator means a mempolicy is in effect. Cpuset policy * is enforced in get_page_from_freelist(). */ if (oc->nodemask && !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, *oc->nodemask) oc->totalpages += node_present_pages(nid); return CONSTRAINT_MEMORY_POLICY; } /* Check this allocation failure is caused by cpuset's wall function */ for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, highest_zoneidx, oc->nodemask) if (!cpuset_zone_allowed(zone, oc->gfp_mask)) cpuset_limited = true; if (cpuset_limited) { oc->totalpages = total_swap_pages; for_each_node_mask(nid, cpuset_current_mems_allowed) oc->totalpages += node_present_pages(nid); return CONSTRAINT_CPUSET; } return CONSTRAINT_NONE; } static int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; long points; if (oom_unkillable_task(task)) goto next; /* p may not have freeable memory in nodemask */ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) goto next; /* * This task already has access to memory reserves and is being killed. * Don't allow any other task to have access to the reserves unless * the task has MMF_OOM_SKIP because chances that it would release * any memory is quite low. */ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) goto next; goto abort; } /* * If task is allocating a lot of memory and has been marked to be * killed first if it triggers an oom, then select it. */ if (oom_task_origin(task)) { points = LONG_MAX; goto select; } points = oom_badness(task, oc->totalpages); if (points == LONG_MIN || points < oc->chosen_points) goto next; select: if (oc->chosen) put_task_struct(oc->chosen); get_task_struct(task); oc->chosen = task; oc->chosen_points = points; next: return 0; abort: if (oc->chosen) put_task_struct(oc->chosen); oc->chosen = (void *)-1UL; return 1; } /* * Simple selection loop. We choose the process with the highest number of * 'points'. In case scan was aborted, oc->chosen is set to -1. */ static void select_bad_process(struct oom_control *oc) { oc->chosen_points = LONG_MIN; if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); else { struct task_struct *p; rcu_read_lock(); for_each_process(p) if (oom_evaluate_task(p, oc)) break; rcu_read_unlock(); } } static int dump_task(struct task_struct *p, void *arg) { struct oom_control *oc = arg; struct task_struct *task; if (oom_unkillable_task(p)) return 0; /* p may not have freeable memory in nodemask */ if (!is_memcg_oom(oc) && !oom_cpuset_eligible(p, oc)) return 0; task = find_lock_task_mm(p); if (!task) { /* * All of p's threads have already detached their mm's. There's * no need to report them; they can't be oom killed anyway. */ return 0; } pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n", task->pid, from_kuid(&init_user_ns, task_uid(task)), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), mm_pgtables_bytes(task->mm), get_mm_counter(task->mm, MM_SWAPENTS), task->signal->oom_score_adj, task->comm); task_unlock(task); return 0; } /** * dump_tasks - dump current memory state of all system tasks * @oc: pointer to struct oom_control * * Dumps the current memory state of all eligible tasks. Tasks not in the same * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes * are not shown. * State information includes task's pid, uid, tgid, vm size, rss, * pgtables_bytes, swapents, oom_score_adj value, and name. */ static void dump_tasks(struct oom_control *oc) { pr_info("Tasks state (memory values in pages):\n"); pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n"); if (is_memcg_oom(oc)) mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); else { struct task_struct *p; rcu_read_lock(); for_each_process(p) dump_task(p, oc); rcu_read_unlock(); } } static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim) { /* one line summary of the oom killer context. */ pr_info("oom-kill:constraint=%s,nodemask=%*pbl", oom_constraint_text[oc->constraint], nodemask_pr_args(oc->nodemask)); cpuset_print_current_mems_allowed(); mem_cgroup_print_oom_context(oc->memcg, victim); pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid, from_kuid(&init_user_ns, task_uid(victim))); } static void dump_header(struct oom_control *oc, struct task_struct *p) { pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, current->signal->oom_score_adj); if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order) pr_warn("COMPACTION is disabled!!!\n"); dump_stack(); if (is_memcg_oom(oc)) mem_cgroup_print_oom_meminfo(oc->memcg); else { __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask)); if (should_dump_unreclaim_slab()) dump_unreclaimable_slab(); } if (sysctl_oom_dump_tasks) dump_tasks(oc); if (p) dump_oom_summary(oc, p); } /* * Number of OOM victims in flight */ static atomic_t oom_victims = ATOMIC_INIT(0); static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); static bool oom_killer_disabled __read_mostly; /* * task->mm can be NULL if the task is the exited group leader. So to * determine whether the task is using a particular mm, we examine all the * task's threads: if one of those is using this mm then this task was also * using it. */ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) { struct task_struct *t; for_each_thread(p, t) { struct mm_struct *t_mm = READ_ONCE(t->mm); if (t_mm) return t_mm == mm; } return false; } #ifdef CONFIG_MMU /* * OOM Reaper kernel thread which tries to reap the memory used by the OOM * victim (if that is possible) to help the OOM killer to move on. */ static struct task_struct *oom_reaper_th; static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait); static struct task_struct *oom_reaper_list; static DEFINE_SPINLOCK(oom_reaper_lock); static bool __oom_reap_task_mm(struct mm_struct *mm) { struct vm_area_struct *vma; bool ret = true; VMA_ITERATOR(vmi, mm, 0); /* * Tell all users of get_user/copy_from_user etc... that the content * is no longer stable. No barriers really needed because unmapping * should imply barriers already and the reader would hit a page fault * if it stumbled over a reaped memory. */ set_bit(MMF_UNSTABLE, &mm->flags); for_each_vma(vmi, vma) { if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP)) continue; /* * Only anonymous pages have a good chance to be dropped * without additional steps which we cannot afford as we * are OOM already. * * We do not even care about fs backed pages because all * which are reclaimable have already been reclaimed and * we do not want to block exit_mmap by keeping mm ref * count elevated without a good reason. */ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { struct mmu_notifier_range range; struct mmu_gather tlb; mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, vma->vm_start, vma->vm_end); tlb_gather_mmu(&tlb, mm); if (mmu_notifier_invalidate_range_start_nonblock(&range)) { tlb_finish_mmu(&tlb); ret = false; continue; } unmap_page_range(&tlb, vma, range.start, range.end, NULL); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); } } return ret; } /* * Reaps the address space of the give task. * * Returns true on success and false if none or part of the address space * has been reclaimed and the caller should retry later. */ static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) { bool ret = true; if (!mmap_read_trylock(mm)) { trace_skip_task_reaping(tsk->pid); return false; } /* * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't * work on the mm anymore. The check for MMF_OOM_SKIP must run * under mmap_lock for reading because it serializes against the * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap(). */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { trace_skip_task_reaping(tsk->pid); goto out_unlock; } trace_start_task_reaping(tsk->pid); /* failed to reap part of the address space. Try again later */ ret = __oom_reap_task_mm(mm); if (!ret) goto out_finish; pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", task_pid_nr(tsk), tsk->comm, K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES))); out_finish: trace_finish_task_reaping(tsk->pid); out_unlock: mmap_read_unlock(mm); return ret; } #define MAX_OOM_REAP_RETRIES 10 static void oom_reap_task(struct task_struct *tsk) { int attempts = 0; struct mm_struct *mm = tsk->signal->oom_mm; /* Retry the mmap_read_trylock(mm) a few times */ while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm)) schedule_timeout_idle(HZ/10); if (attempts <= MAX_OOM_REAP_RETRIES || test_bit(MMF_OOM_SKIP, &mm->flags)) goto done; pr_info("oom_reaper: unable to reap pid:%d (%s)\n", task_pid_nr(tsk), tsk->comm); sched_show_task(tsk); debug_show_all_locks(); done: tsk->oom_reaper_list = NULL; /* * Hide this mm from OOM killer because it has been either reaped or * somebody can't call mmap_write_unlock(mm). */ set_bit(MMF_OOM_SKIP, &mm->flags); /* Drop a reference taken by queue_oom_reaper */ put_task_struct(tsk); } static int oom_reaper(void *unused) { set_freezable(); while (true) { struct task_struct *tsk = NULL; wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL); spin_lock_irq(&oom_reaper_lock); if (oom_reaper_list != NULL) { tsk = oom_reaper_list; oom_reaper_list = tsk->oom_reaper_list; } spin_unlock_irq(&oom_reaper_lock); if (tsk) oom_reap_task(tsk); } return 0; } static void wake_oom_reaper(struct timer_list *timer) { struct task_struct *tsk = container_of(timer, struct task_struct, oom_reaper_timer); struct mm_struct *mm = tsk->signal->oom_mm; unsigned long flags; /* The victim managed to terminate on its own - see exit_mmap */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) { put_task_struct(tsk); return; } spin_lock_irqsave(&oom_reaper_lock, flags); tsk->oom_reaper_list = oom_reaper_list; oom_reaper_list = tsk; spin_unlock_irqrestore(&oom_reaper_lock, flags); trace_wake_reaper(tsk->pid); wake_up(&oom_reaper_wait); } /* * Give the OOM victim time to exit naturally before invoking the oom_reaping. * The timers timeout is arbitrary... the longer it is, the longer the worst * case scenario for the OOM can take. If it is too small, the oom_reaper can * get in the way and release resources needed by the process exit path. * e.g. The futex robust list can sit in Anon|Private memory that gets reaped * before the exit path is able to wake the futex waiters. */ #define OOM_REAPER_DELAY (2*HZ) static void queue_oom_reaper(struct task_struct *tsk) { /* mm is already queued? */ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) return; get_task_struct(tsk); timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0); tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY; add_timer(&tsk->oom_reaper_timer); } #ifdef CONFIG_SYSCTL static struct ctl_table vm_oom_kill_table[] = { { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif static int __init oom_init(void) { oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper"); #ifdef CONFIG_SYSCTL register_sysctl_init("vm", vm_oom_kill_table); #endif return 0; } subsys_initcall(oom_init) #else static inline void queue_oom_reaper(struct task_struct *tsk) { } #endif /* CONFIG_MMU */ /** * mark_oom_victim - mark the given task as OOM victim * @tsk: task to mark * * Has to be called with oom_lock held and never after * oom has been disabled already. * * tsk->mm has to be non NULL and caller has to guarantee it is stable (either * under task_lock or operate on the current). */ static void mark_oom_victim(struct task_struct *tsk) { struct mm_struct *mm = tsk->mm; WARN_ON(oom_killer_disabled); /* OOM killer might race with memcg OOM */ if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) return; /* oom_mm is bound to the signal struct life time. */ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) mmgrab(tsk->signal->oom_mm); /* * Make sure that the task is woken up from uninterruptible sleep * if it is frozen because OOM killer wouldn't be able to free * any memory and livelock. freezing_slow_path will tell the freezer * that TIF_MEMDIE tasks should be ignored. */ __thaw_task(tsk); atomic_inc(&oom_victims); trace_mark_victim(tsk->pid); } /** * exit_oom_victim - note the exit of an OOM victim */ void exit_oom_victim(void) { clear_thread_flag(TIF_MEMDIE); if (!atomic_dec_return(&oom_victims)) wake_up_all(&oom_victims_wait); } /** * oom_killer_enable - enable OOM killer */ void oom_killer_enable(void) { oom_killer_disabled = false; pr_info("OOM killer enabled.\n"); } /** * oom_killer_disable - disable OOM killer * @timeout: maximum timeout to wait for oom victims in jiffies * * Forces all page allocations to fail rather than trigger OOM killer. * Will block and wait until all OOM victims are killed or the given * timeout expires. * * The function cannot be called when there are runnable user tasks because * the userspace would see unexpected allocation failures as a result. Any * new usage of this function should be consulted with MM people. * * Returns true if successful and false if the OOM killer cannot be * disabled. */ bool oom_killer_disable(signed long timeout) { signed long ret; /* * Make sure to not race with an ongoing OOM killer. Check that the * current is not killed (possibly due to sharing the victim's memory). */ if (mutex_lock_killable(&oom_lock)) return false; oom_killer_disabled = true; mutex_unlock(&oom_lock); ret = wait_event_interruptible_timeout(oom_victims_wait, !atomic_read(&oom_victims), timeout); if (ret <= 0) { oom_killer_enable(); return false; } pr_info("OOM killer disabled.\n"); return true; } static inline bool __task_will_free_mem(struct task_struct *task) { struct signal_struct *sig = task->signal; /* * A coredumping process may sleep for an extended period in * coredump_task_exit(), so the oom killer cannot assume that * the process will promptly exit and release memory. */ if (sig->core_state) return false; if (sig->flags & SIGNAL_GROUP_EXIT) return true; if (thread_group_empty(task) && (task->flags & PF_EXITING)) return true; return false; } /* * Checks whether the given task is dying or exiting and likely to * release its address space. This means that all threads and processes * sharing the same mm have to be killed or exiting. * Caller has to make sure that task->mm is stable (hold task_lock or * it operates on the current). */ static bool task_will_free_mem(struct task_struct *task) { struct mm_struct *mm = task->mm; struct task_struct *p; bool ret = true; /* * Skip tasks without mm because it might have passed its exit_mm and * exit_oom_victim. oom_reaper could have rescued that but do not rely * on that for now. We can consider find_lock_task_mm in future. */ if (!mm) return false; if (!__task_will_free_mem(task)) return false; /* * This task has already been drained by the oom reaper so there are * only small chances it will free some more */ if (test_bit(MMF_OOM_SKIP, &mm->flags)) return false; if (atomic_read(&mm->mm_users) <= 1) return true; /* * Make sure that all tasks which share the mm with the given tasks * are dying as well to make sure that a) nobody pins its mm and * b) the task is also reapable by the oom reaper. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(task, p)) continue; ret = __task_will_free_mem(p); if (!ret) break; } rcu_read_unlock(); return ret; } static void __oom_kill_process(struct task_struct *victim, const char *message) { struct task_struct *p; struct mm_struct *mm; bool can_oom_reap = true; p = find_lock_task_mm(victim); if (!p) { pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n", message, task_pid_nr(victim), victim->comm); put_task_struct(victim); return; } else if (victim != p) { get_task_struct(p); put_task_struct(victim); victim = p; } /* Get a reference to safely compare mm after task_unlock(victim) */ mm = victim->mm; mmgrab(mm); /* Raise event before sending signal: task reaper must see this */ count_vm_event(OOM_KILL); memcg_memory_event_mm(mm, MEMCG_OOM_KILL); /* * We should send SIGKILL before granting access to memory reserves * in order to prevent the OOM victim from depleting the memory * reserves from the user space under its control. */ do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID); mark_oom_victim(victim); pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n", message, task_pid_nr(victim), victim->comm, K(mm->total_vm), K(get_mm_counter(mm, MM_ANONPAGES)), K(get_mm_counter(mm, MM_FILEPAGES)), K(get_mm_counter(mm, MM_SHMEMPAGES)), from_kuid(&init_user_ns, task_uid(victim)), mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj); task_unlock(victim); /* * Kill all user processes sharing victim->mm in other thread groups, if * any. They don't get access to memory reserves, though, to avoid * depletion of all memory. This prevents mm->mmap_lock livelock when an * oom killed thread cannot exit because it requires the semaphore and * its contended by another thread trying to allocate memory itself. * That thread will now get access to memory reserves since it has a * pending fatal signal. */ rcu_read_lock(); for_each_process(p) { if (!process_shares_mm(p, mm)) continue; if (same_thread_group(p, victim)) continue; if (is_global_init(p)) { can_oom_reap = false; set_bit(MMF_OOM_SKIP, &mm->flags); pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n", task_pid_nr(victim), victim->comm, task_pid_nr(p), p->comm); continue; } /* * No kthread_use_mm() user needs to read from the userspace so * we are ok to reap it. */ if (unlikely(p->flags & PF_KTHREAD)) continue; do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID); } rcu_read_unlock(); if (can_oom_reap) queue_oom_reaper(victim); mmdrop(mm); put_task_struct(victim); } /* * Kill provided task unless it's secured by setting * oom_score_adj to OOM_SCORE_ADJ_MIN. */ static int oom_kill_memcg_member(struct task_struct *task, void *message) { if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN && !is_global_init(task)) { get_task_struct(task); __oom_kill_process(task, message); } return 0; } static void oom_kill_process(struct oom_control *oc, const char *message) { struct task_struct *victim = oc->chosen; struct mem_cgroup *oom_group; static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); /* * If the task is already exiting, don't alarm the sysadmin or kill * its children or threads, just give it access to memory reserves * so it can die quickly */ task_lock(victim); if (task_will_free_mem(victim)) { mark_oom_victim(victim); queue_oom_reaper(victim); task_unlock(victim); put_task_struct(victim); return; } task_unlock(victim); if (__ratelimit(&oom_rs)) dump_header(oc, victim); /* * Do we need to kill the entire memory cgroup? * Or even one of the ancestor memory cgroups? * Check this out before killing the victim task. */ oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); __oom_kill_process(victim, message); /* * If necessary, kill all tasks in the selected memory cgroup. */ if (oom_group) { memcg_memory_event(oom_group, MEMCG_OOM_GROUP_KILL); mem_cgroup_print_oom_group(oom_group); mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member, (void *)message); mem_cgroup_put(oom_group); } } /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ static void check_panic_on_oom(struct oom_control *oc) { if (likely(!sysctl_panic_on_oom)) return; if (sysctl_panic_on_oom != 2) { /* * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ if (oc->constraint != CONSTRAINT_NONE) return; } /* Do not panic for oom kills triggered by sysrq */ if (is_sysrq_oom(oc)) return; dump_header(oc, NULL); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } static BLOCKING_NOTIFIER_HEAD(oom_notify_list); int register_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(register_oom_notifier); int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_oom_notifier); /** * out_of_memory - kill the "best" process when we run out of memory * @oc: pointer to struct oom_control * * If we run out of memory, we have the choice between either * killing a random task (bad), letting the system crash (worse) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ bool out_of_memory(struct oom_control *oc) { unsigned long freed = 0; if (oom_killer_disabled) return false; if (!is_memcg_oom(oc)) { blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0 && !is_sysrq_oom(oc)) /* Got some memory back in the last second. */ return true; } /* * If current has a pending SIGKILL or is exiting, then automatically * select it. The goal is to allow it to allocate so that it may * quickly exit and free its memory. */ if (task_will_free_mem(current)) { mark_oom_victim(current); queue_oom_reaper(current); return true; } /* * The OOM killer does not compensate for IO-less reclaim. * But mem_cgroup_oom() has to invoke the OOM killer even * if it is a GFP_NOFS allocation. */ if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) return true; /* * Check if there were limitations on the allocation (only relevant for * NUMA and memcg) that may require different handling. */ oc->constraint = constrained_alloc(oc); if (oc->constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; check_panic_on_oom(oc); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current) && oom_cpuset_eligible(current, oc) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); oc->chosen = current; oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)"); return true; } select_bad_process(oc); /* Found nothing?!?! */ if (!oc->chosen) { dump_header(oc, NULL); pr_warn("Out of memory and no killable processes...\n"); /* * If we got here due to an actual allocation at the * system level, we cannot survive this and will enter * an endless loop in the allocator. Bail out now. */ if (!is_sysrq_oom(oc) && !is_memcg_oom(oc)) panic("System is deadlocked on memory\n"); } if (oc->chosen && oc->chosen != (void *)-1UL) oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : "Memory cgroup out of memory"); return !!oc->chosen; } /* * The pagefault handler calls here because some allocation has failed. We have * to take care of the memcg OOM here because this is the only safe context without * any locks held but let the oom killer triggered from the allocation context care * about the global OOM. */ void pagefault_out_of_memory(void) { static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if (mem_cgroup_oom_synchronize(true)) return; if (fatal_signal_pending(current)) return; if (__ratelimit(&pfoom_rs)) pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n"); } SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags) { #ifdef CONFIG_MMU struct mm_struct *mm = NULL; struct task_struct *task; struct task_struct *p; unsigned int f_flags; bool reap = false; long ret = 0; if (flags) return -EINVAL; task = pidfd_get_task(pidfd, &f_flags); if (IS_ERR(task)) return PTR_ERR(task); /* * Make sure to choose a thread which still has a reference to mm * during the group exit */ p = find_lock_task_mm(task); if (!p) { ret = -ESRCH; goto put_task; } mm = p->mm; mmgrab(mm); if (task_will_free_mem(p)) reap = true; else { /* Error only if the work has not been done already */ if (!test_bit(MMF_OOM_SKIP, &mm->flags)) ret = -EINVAL; } task_unlock(p); if (!reap) goto drop_mm; if (mmap_read_lock_killable(mm)) { ret = -EINTR; goto drop_mm; } /* * Check MMF_OOM_SKIP again under mmap_read_lock protection to ensure * possible change in exit_mmap is seen */ if (!test_bit(MMF_OOM_SKIP, &mm->flags) && !__oom_reap_task_mm(mm)) ret = -EAGAIN; mmap_read_unlock(mm); drop_mm: mmdrop(mm); put_task: put_task_struct(task); return ret; #else return -ENOSYS; #endif /* CONFIG_MMU */ }
linux-master
mm/oom_kill.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/page_alloc.c * * Manages the free list, the system allocates free pages here. * Note that kmalloc() lives in slab.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/kasan.h> #include <linux/kmsan.h> #include <linux/module.h> #include <linux/suspend.h> #include <linux/ratelimit.h> #include <linux/oom.h> #include <linux/topology.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/memory_hotplug.h> #include <linux/nodemask.h> #include <linux/vmstat.h> #include <linux/fault-inject.h> #include <linux/compaction.h> #include <trace/events/kmem.h> #include <trace/events/oom.h> #include <linux/prefetch.h> #include <linux/mm_inline.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/sched/mm.h> #include <linux/page_owner.h> #include <linux/page_table_check.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> #include <linux/lockdep.h> #include <linux/psi.h> #include <linux/khugepaged.h> #include <linux/delayacct.h> #include <asm/div64.h> #include "internal.h" #include "shuffle.h" #include "page_reporting.h" /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ typedef int __bitwise fpi_t; /* No special request */ #define FPI_NONE ((__force fpi_t)0) /* * Skip free page reporting notification for the (possibly merged) page. * This does not hinder free page reporting from grabbing the page, * reporting it and marking it "reported" - it only skips notifying * the free page reporting infrastructure about a newly freed page. For * example, used when temporarily pulling a page from a freelist and * putting it back unmodified. */ #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) /* * Place the (possibly merged) page to the tail of the freelist. Will ignore * page shuffling (relevant code - e.g., memory onlining - is expected to * shuffle the whole zone). * * Note: No code should rely on this flag for correctness - it's purely * to allow for optimizations when handing back either fresh pages * (memory onlining) or untouched pages (page isolation, free page * reporting). */ #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ static DEFINE_MUTEX(pcp_batch_high_lock); #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* * On SMP, spin_trylock is sufficient protection. * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. */ #define pcp_trylock_prepare(flags) do { } while (0) #define pcp_trylock_finish(flag) do { } while (0) #else /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ #define pcp_trylock_prepare(flags) local_irq_save(flags) #define pcp_trylock_finish(flags) local_irq_restore(flags) #endif /* * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid * a migration causing the wrong PCP to be locked and remote memory being * potentially allocated, pin the task to the CPU for the lookup+lock. * preempt_disable is used on !RT because it is faster than migrate_disable. * migrate_disable is used on RT because otherwise RT spinlock usage is * interfered with and a high priority task cannot preempt the allocator. */ #ifndef CONFIG_PREEMPT_RT #define pcpu_task_pin() preempt_disable() #define pcpu_task_unpin() preempt_enable() #else #define pcpu_task_pin() migrate_disable() #define pcpu_task_unpin() migrate_enable() #endif /* * Generic helper to lookup and a per-cpu variable with an embedded spinlock. * Return value should be used with equivalent unlock helper. */ #define pcpu_spin_lock(type, member, ptr) \ ({ \ type *_ret; \ pcpu_task_pin(); \ _ret = this_cpu_ptr(ptr); \ spin_lock(&_ret->member); \ _ret; \ }) #define pcpu_spin_trylock(type, member, ptr) \ ({ \ type *_ret; \ pcpu_task_pin(); \ _ret = this_cpu_ptr(ptr); \ if (!spin_trylock(&_ret->member)) { \ pcpu_task_unpin(); \ _ret = NULL; \ } \ _ret; \ }) #define pcpu_spin_unlock(member, ptr) \ ({ \ spin_unlock(&ptr->member); \ pcpu_task_unpin(); \ }) /* struct per_cpu_pages specific helpers. */ #define pcp_spin_lock(ptr) \ pcpu_spin_lock(struct per_cpu_pages, lock, ptr) #define pcp_spin_trylock(ptr) \ pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) #define pcp_spin_unlock(ptr) \ pcpu_spin_unlock(lock, ptr) #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID DEFINE_PER_CPU(int, numa_node); EXPORT_PER_CPU_SYMBOL(numa_node); #endif DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() * defined in <linux/topology.h>. */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); #endif static DEFINE_MUTEX(pcpu_drain_mutex); #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY volatile unsigned long latent_entropy __latent_entropy; EXPORT_SYMBOL(latent_entropy); #endif /* * Array of node states. */ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { [N_POSSIBLE] = NODE_MASK_ALL, [N_ONLINE] = { { [0] = 1UL } }, #ifndef CONFIG_NUMA [N_NORMAL_MEMORY] = { { [0] = 1UL } }, #ifdef CONFIG_HIGHMEM [N_HIGH_MEMORY] = { { [0] = 1UL } }, #endif [N_MEMORY] = { { [0] = 1UL } }, [N_CPU] = { { [0] = 1UL } }, #endif /* NUMA */ }; EXPORT_SYMBOL(node_states); gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; /* * A cached value of the page's pageblock's migratetype, used when the page is * put on a pcplist. Used to avoid the pageblock migratetype lookup when * freeing from pcplists in most cases, at the cost of possibly becoming stale. * Also the migratetype set in the page does not necessarily match the pcplist * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any * other index - this ensures that it will be put on the correct CMA freelist. */ static inline int get_pcppage_migratetype(struct page *page) { return page->index; } static inline void set_pcppage_migratetype(struct page *page, int migratetype) { page->index = migratetype; } #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE unsigned int pageblock_order __read_mostly; #endif static void __free_pages_ok(struct page *page, unsigned int order, fpi_t fpi_flags); /* * results with 256, 32 in the lowmem_reserve sysctl: * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) * 1G machine -> (16M dma, 784M normal, 224M high) * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA * * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA [ZONE_DMA] = 256, #endif #ifdef CONFIG_ZONE_DMA32 [ZONE_DMA32] = 256, #endif [ZONE_NORMAL] = 32, #ifdef CONFIG_HIGHMEM [ZONE_HIGHMEM] = 0, #endif [ZONE_MOVABLE] = 0, }; char * const zone_names[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA "DMA", #endif #ifdef CONFIG_ZONE_DMA32 "DMA32", #endif "Normal", #ifdef CONFIG_HIGHMEM "HighMem", #endif "Movable", #ifdef CONFIG_ZONE_DEVICE "Device", #endif }; const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", "HighAtomic", #ifdef CONFIG_CMA "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION "Isolate", #endif }; int min_free_kbytes = 1024; int user_min_free_kbytes = -1; static int watermark_boost_factor __read_mostly = 15000; static int watermark_scale_factor = 10; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; EXPORT_SYMBOL(movable_zone); #if MAX_NUMNODES > 1 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; unsigned int nr_online_nodes __read_mostly = 1; EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif static bool page_contains_unaccepted(struct page *page, unsigned int order); static void accept_page(struct page *page, unsigned int order); static bool try_to_accept_memory(struct zone *zone, unsigned int order); static inline bool has_unaccepted_memory(void); static bool __free_unaccepted(struct page *page); int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * During boot we initialize deferred pages on-demand, as needed, but once * page_alloc_init_late() has finished, the deferred pages are all initialized, * and we can permanently disable that path. */ DEFINE_STATIC_KEY_TRUE(deferred_pages); static inline bool deferred_pages_enabled(void) { return static_branch_unlikely(&deferred_pages); } /* * deferred_grow_zone() is __init, but it is called from * get_page_from_freelist() during early boot until deferred_pages permanently * disables this call. This is why we have refdata wrapper to avoid warning, * and to ensure that the function body gets unloaded. */ static bool __ref _deferred_grow_zone(struct zone *zone, unsigned int order) { return deferred_grow_zone(zone, order); } #else static inline bool deferred_pages_enabled(void) { return false; } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /* Return a pointer to the bitmap storing bits affecting a block of pages */ static inline unsigned long *get_pageblock_bitmap(const struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM return section_to_usemap(__pfn_to_section(pfn)); #else return page_zone(page)->pageblock_flags; #endif /* CONFIG_SPARSEMEM */ } static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) { #ifdef CONFIG_SPARSEMEM pfn &= (PAGES_PER_SECTION-1); #else pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); #endif /* CONFIG_SPARSEMEM */ return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; } /** * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages * @page: The page within the block of interest * @pfn: The target page frame number * @mask: mask of bits that the caller is interested in * * Return: pageblock_bits flags */ unsigned long get_pfnblock_flags_mask(const struct page *page, unsigned long pfn, unsigned long mask) { unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long word; bitmap = get_pageblock_bitmap(page, pfn); bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); /* * This races, without locks, with set_pfnblock_flags_mask(). Ensure * a consistent read of the memory array, so that results, even though * racy, are not corrupted. */ word = READ_ONCE(bitmap[word_bitidx]); return (word >> bitidx) & mask; } static __always_inline int get_pfnblock_migratetype(const struct page *page, unsigned long pfn) { return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); } /** * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @flags: The flags to set * @pfn: The target page frame number * @mask: mask of bits that the caller is interested in */ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, unsigned long mask) { unsigned long *bitmap; unsigned long bitidx, word_bitidx; unsigned long word; BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); bitmap = get_pageblock_bitmap(page, pfn); bitidx = pfn_to_bitidx(page, pfn); word_bitidx = bitidx / BITS_PER_LONG; bitidx &= (BITS_PER_LONG-1); VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); mask <<= bitidx; flags <<= bitidx; word = READ_ONCE(bitmap[word_bitidx]); do { } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); } void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled && migratetype < MIGRATE_PCPTYPES)) migratetype = MIGRATE_UNMOVABLE; set_pfnblock_flags_mask(page, (unsigned long)migratetype, page_to_pfn(page), MIGRATETYPE_MASK); } #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { int ret; unsigned seq; unsigned long pfn = page_to_pfn(page); unsigned long sp, start_pfn; do { seq = zone_span_seqbegin(zone); start_pfn = zone->zone_start_pfn; sp = zone->spanned_pages; ret = !zone_spans_pfn(zone, pfn); } while (zone_span_seqretry(zone, seq)); if (ret) pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", pfn, zone_to_nid(zone), zone->name, start_pfn, start_pfn + sp); return ret; } /* * Temporary debugging check for pages not lying within a given zone. */ static int __maybe_unused bad_range(struct zone *zone, struct page *page) { if (page_outside_zone_boundaries(zone, page)) return 1; if (zone != page_zone(page)) return 1; return 0; } #else static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) { return 0; } #endif static void bad_page(struct page *page, const char *reason) { static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; goto out; } if (nr_unshown) { pr_alert( "BUG: Bad page state: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); dump_page(page, reason); print_modules(); dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ page_mapcount_reset(page); /* remove PageBuddy */ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } static inline unsigned int order_to_pindex(int migratetype, int order) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order > PAGE_ALLOC_COSTLY_ORDER) { VM_BUG_ON(order != pageblock_order); return NR_LOWORDER_PCP_LISTS; } #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); #endif return (MIGRATE_PCPTYPES * order) + migratetype; } static inline int pindex_to_order(unsigned int pindex) { int order = pindex / MIGRATE_PCPTYPES; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pindex == NR_LOWORDER_PCP_LISTS) order = pageblock_order; #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); #endif return order; } static inline bool pcp_allowed_order(unsigned int order) { if (order <= PAGE_ALLOC_COSTLY_ORDER) return true; #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order == pageblock_order) return true; #endif return false; } static inline void free_the_page(struct page *page, unsigned int order) { if (pcp_allowed_order(order)) /* Via pcp? */ free_unref_page(page, order); else __free_pages_ok(page, order, FPI_NONE); } /* * Higher-order pages are called "compound pages". They are structured thusly: * * The first PAGE_SIZE page is called the "head page" and have PG_head set. * * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded * in bit 0 of page->compound_head. The rest of bits is pointer to head page. * * The first tail page's ->compound_order holds the order of allocation. * This usage means that zero-order pages may not be compound. */ void prep_compound_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; __SetPageHead(page); for (i = 1; i < nr_pages; i++) prep_compound_tail(page, i); prep_compound_head(page, order); } void destroy_large_folio(struct folio *folio) { if (folio_test_hugetlb(folio)) { free_huge_folio(folio); return; } if (folio_test_large_rmappable(folio)) folio_undo_large_rmappable(folio); mem_cgroup_uncharge(folio); free_the_page(&folio->page, folio_order(folio)); } static inline void set_buddy_order(struct page *page, unsigned int order) { set_page_private(page, order); __SetPageBuddy(page); } #ifdef CONFIG_COMPACTION static inline struct capture_control *task_capc(struct zone *zone) { struct capture_control *capc = current->capture_control; return unlikely(capc) && !(current->flags & PF_KTHREAD) && !capc->page && capc->cc->zone == zone ? capc : NULL; } static inline bool compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) { if (!capc || order != capc->cc->order) return false; /* Do not accidentally pollute CMA or isolated regions*/ if (is_migrate_cma(migratetype) || is_migrate_isolate(migratetype)) return false; /* * Do not let lower order allocations pollute a movable pageblock. * This might let an unmovable request use a reclaimable pageblock * and vice-versa but no more than normal fallback logic which can * have trouble finding a high-order free page. */ if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) return false; capc->page = page; return true; } #else static inline struct capture_control *task_capc(struct zone *zone) { return NULL; } static inline bool compaction_capture(struct capture_control *capc, struct page *page, int order, int migratetype) { return false; } #endif /* CONFIG_COMPACTION */ /* Used for pages not on another list */ static inline void add_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) { struct free_area *area = &zone->free_area[order]; list_add(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; } /* Used for pages not on another list */ static inline void add_to_free_list_tail(struct page *page, struct zone *zone, unsigned int order, int migratetype) { struct free_area *area = &zone->free_area[order]; list_add_tail(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; } /* * Used for pages which are on another list. Move the pages to the tail * of the list - so the moved pages won't immediately be considered for * allocation again (e.g., optimization for memory onlining). */ static inline void move_to_free_list(struct page *page, struct zone *zone, unsigned int order, int migratetype) { struct free_area *area = &zone->free_area[order]; list_move_tail(&page->buddy_list, &area->free_list[migratetype]); } static inline void del_page_from_free_list(struct page *page, struct zone *zone, unsigned int order) { /* clear reported state and update reported page count */ if (page_reported(page)) __ClearPageReported(page); list_del(&page->buddy_list); __ClearPageBuddy(page); set_page_private(page, 0); zone->free_area[order].nr_free--; } static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { return list_first_entry_or_null(&area->free_list[migratetype], struct page, buddy_list); } /* * If this is not the largest possible page, check if the buddy * of the next-highest order is free. If it is, it's possible * that pages are being freed that will coalesce soon. In case, * that is happening, add the free page to the tail of the list * so it's less likely to be used soon and more likely to be merged * as a higher order page */ static inline bool buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, struct page *page, unsigned int order) { unsigned long higher_page_pfn; struct page *higher_page; if (order >= MAX_ORDER - 1) return false; higher_page_pfn = buddy_pfn & pfn; higher_page = page + (higher_page_pfn - pfn); return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, NULL) != NULL; } /* * Freeing function for a buddy system allocator. * * The concept of a buddy system is to maintain direct-mapped table * (containing bit values) for memory blocks of various "orders". * The bottom level table contains the map for the smallest allocatable * units of memory (here, pages), and each level above it describes * pairs of units from the levels below, hence, "buddies". * At a high level, all that happens here is marking the table entry * at the bottom level available, and propagating the changes upward * as necessary, plus some accounting needed to play nicely with other * parts of the VM system. * At each level, we keep a list of pages, which are heads of continuous * free pages of length of (1 << order) and marked with PageBuddy. * Page's order is recorded in page_private(page) field. * So when we are allocating or freeing one, we can derive the state of the * other. That is, if we allocate a small block, and both were * free, the remainder of the region must be split into blocks. * If a block is freed, and its buddy is also free, then this * triggers coalescing into a block of larger size. * * -- nyc */ static inline void __free_one_page(struct page *page, unsigned long pfn, struct zone *zone, unsigned int order, int migratetype, fpi_t fpi_flags) { struct capture_control *capc = task_capc(zone); unsigned long buddy_pfn = 0; unsigned long combined_pfn; struct page *buddy; bool to_tail; VM_BUG_ON(!zone_is_initialized(zone)); VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); if (likely(!is_migrate_isolate(migratetype))) __mod_zone_freepage_state(zone, 1 << order, migratetype); VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); while (order < MAX_ORDER) { if (compaction_capture(capc, page, order, migratetype)) { __mod_zone_freepage_state(zone, -(1 << order), migratetype); return; } buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); if (!buddy) goto done_merging; if (unlikely(order >= pageblock_order)) { /* * We want to prevent merge between freepages on pageblock * without fallbacks and normal pageblock. Without this, * pageblock isolation could cause incorrect freepage or CMA * accounting or HIGHATOMIC accounting. */ int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); if (migratetype != buddy_mt && (!migratetype_is_mergeable(migratetype) || !migratetype_is_mergeable(buddy_mt))) goto done_merging; } /* * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * merge with it and move up one order. */ if (page_is_guard(buddy)) clear_page_guard(zone, buddy, order, migratetype); else del_page_from_free_list(buddy, zone, order); combined_pfn = buddy_pfn & pfn; page = page + (combined_pfn - pfn); pfn = combined_pfn; order++; } done_merging: set_buddy_order(page, order); if (fpi_flags & FPI_TO_TAIL) to_tail = true; else if (is_shuffle_order(order)) to_tail = shuffle_pick_tail(); else to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); if (to_tail) add_to_free_list_tail(page, zone, order, migratetype); else add_to_free_list(page, zone, order, migratetype); /* Notify page reporting subsystem of freed page */ if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) page_reporting_notify_free(order); } /** * split_free_page() -- split a free page at split_pfn_offset * @free_page: the original free page * @order: the order of the page * @split_pfn_offset: split offset within the page * * Return -ENOENT if the free page is changed, otherwise 0 * * It is used when the free page crosses two pageblocks with different migratetypes * at split_pfn_offset within the page. The split free page will be put into * separate migratetype lists afterwards. Otherwise, the function achieves * nothing. */ int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset) { struct zone *zone = page_zone(free_page); unsigned long free_page_pfn = page_to_pfn(free_page); unsigned long pfn; unsigned long flags; int free_page_order; int mt; int ret = 0; if (split_pfn_offset == 0) return ret; spin_lock_irqsave(&zone->lock, flags); if (!PageBuddy(free_page) || buddy_order(free_page) != order) { ret = -ENOENT; goto out; } mt = get_pfnblock_migratetype(free_page, free_page_pfn); if (likely(!is_migrate_isolate(mt))) __mod_zone_freepage_state(zone, -(1UL << order), mt); del_page_from_free_list(free_page, zone, order); for (pfn = free_page_pfn; pfn < free_page_pfn + (1UL << order);) { int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); free_page_order = min_t(unsigned int, pfn ? __ffs(pfn) : order, __fls(split_pfn_offset)); __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, mt, FPI_NONE); pfn += 1UL << free_page_order; split_pfn_offset -= (1UL << free_page_order); /* we have done the first part, now switch to second part */ if (split_pfn_offset == 0) split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); } out: spin_unlock_irqrestore(&zone->lock, flags); return ret; } /* * A bad page could be due to a number of fields. Instead of multiple branches, * try and check multiple fields with one check. The caller must do a detailed * check if necessary. */ static inline bool page_expected_state(struct page *page, unsigned long check_flags) { if (unlikely(atomic_read(&page->_mapcount) != -1)) return false; if (unlikely((unsigned long)page->mapping | page_ref_count(page) | #ifdef CONFIG_MEMCG page->memcg_data | #endif (page->flags & check_flags))) return false; return true; } static const char *page_bad_reason(struct page *page, unsigned long flags) { const char *bad_reason = NULL; if (unlikely(atomic_read(&page->_mapcount) != -1)) bad_reason = "nonzero mapcount"; if (unlikely(page->mapping != NULL)) bad_reason = "non-NULL mapping"; if (unlikely(page_ref_count(page) != 0)) bad_reason = "nonzero _refcount"; if (unlikely(page->flags & flags)) { if (flags == PAGE_FLAGS_CHECK_AT_PREP) bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; else bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; } #ifdef CONFIG_MEMCG if (unlikely(page->memcg_data)) bad_reason = "page still charged to cgroup"; #endif return bad_reason; } static void free_page_is_bad_report(struct page *page) { bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); } static inline bool free_page_is_bad(struct page *page) { if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) return false; /* Something has gone sideways, find it */ free_page_is_bad_report(page); return true; } static inline bool is_check_pages_enabled(void) { return static_branch_unlikely(&check_pages_enabled); } static int free_tail_page_prepare(struct page *head_page, struct page *page) { struct folio *folio = (struct folio *)head_page; int ret = 1; /* * We rely page->lru.next never has bit 0 set, unless the page * is PageTail(). Let's make sure that's true even for poisoned ->lru. */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); if (!is_check_pages_enabled()) { ret = 0; goto out; } switch (page - head_page) { case 1: /* the first tail page: these may be in place of ->mapping */ if (unlikely(folio_entire_mapcount(folio))) { bad_page(page, "nonzero entire_mapcount"); goto out; } if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { bad_page(page, "nonzero nr_pages_mapped"); goto out; } if (unlikely(atomic_read(&folio->_pincount))) { bad_page(page, "nonzero pincount"); goto out; } break; case 2: /* * the second tail page: ->mapping is * deferred_list.next -- ignore value. */ break; default: if (page->mapping != TAIL_MAPPING) { bad_page(page, "corrupted mapping in tail page"); goto out; } break; } if (unlikely(!PageTail(page))) { bad_page(page, "PageTail not set"); goto out; } if (unlikely(compound_head(page) != head_page)) { bad_page(page, "compound_head not consistent"); goto out; } ret = 0; out: page->mapping = NULL; clear_compound_head(page); return ret; } /* * Skip KASAN memory poisoning when either: * * 1. For generic KASAN: deferred memory initialization has not yet completed. * Tag-based KASAN modes skip pages freed via deferred memory initialization * using page tags instead (see below). * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating * that error detection is disabled for accesses via the page address. * * Pages will have match-all tags in the following circumstances: * * 1. Pages are being initialized for the first time, including during deferred * memory init; see the call to page_kasan_tag_reset in __init_single_page. * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the * exception of pages unpoisoned by kasan_unpoison_vmalloc. * 3. The allocation was excluded from being checked due to sampling, * see the call to kasan_unpoison_pages. * * Poisoning pages during deferred memory init will greatly lengthen the * process and cause problem in large memory systems as the deferred pages * initialization is done with interrupt disabled. * * Assuming that there will be no reference to those newly initialized * pages before they are ever allocated, this should have no effect on * KASAN memory tracking as the poison will be properly inserted at page * allocation time. The only corner case is when pages are allocated by * on-demand allocation and then freed again before the deferred pages * initialization is done, but this is not likely to happen. */ static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return deferred_pages_enabled(); return page_kasan_tag(page) == 0xff; } static void kernel_init_pages(struct page *page, int numpages) { int i; /* s390's use of memset() could override KASAN redzones. */ kasan_disable_current(); for (i = 0; i < numpages; i++) clear_highpage_kasan_tagged(page + i); kasan_enable_current(); } static __always_inline bool free_pages_prepare(struct page *page, unsigned int order, fpi_t fpi_flags) { int bad = 0; bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); bool init = want_init_on_free(); VM_BUG_ON_PAGE(PageTail(page), page); trace_mm_page_free(page, order); kmsan_free_page(page, order); if (unlikely(PageHWPoison(page)) && !order) { /* * Do not let hwpoison pages hit pcplists/buddy * Untie memcg state and reset page's owner */ if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); reset_page_owner(page, order); page_table_check_free(page, order); return false; } /* * Check tail pages before head page information is cleared to * avoid checking PageCompound for order-0 pages. */ if (unlikely(order)) { bool compound = PageCompound(page); int i; VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); if (compound) page[1].flags &= ~PAGE_FLAGS_SECOND; for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_page_prepare(page, page + i); if (is_check_pages_enabled()) { if (free_page_is_bad(page + i)) { bad++; continue; } } (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; } } if (PageMappingFlags(page)) page->mapping = NULL; if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); if (is_check_pages_enabled()) { if (free_page_is_bad(page)) bad++; if (bad) return false; } page_cpupid_reset_last(page); page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; reset_page_owner(page, order); page_table_check_free(page, order); if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), PAGE_SIZE << order); debug_check_no_obj_freed(page_address(page), PAGE_SIZE << order); } kernel_poison_pages(page, 1 << order); /* * As memory initialization might be integrated into KASAN, * KASAN poisoning and memory initialization code must be * kept together to avoid discrepancies in behavior. * * With hardware tag-based KASAN, memory tags must be set before the * page becomes unavailable via debug_pagealloc or arch_free_page. */ if (!skip_kasan_poison) { kasan_poison_pages(page, order, init); /* Memory is already initialized if KASAN did it internally. */ if (kasan_has_integrated_init()) init = false; } if (init) kernel_init_pages(page, 1 << order); /* * arch_free_page() can make the page's contents inaccessible. s390 * does this. So nothing which can access the page's contents should * happen after this. */ arch_free_page(page, order); debug_pagealloc_unmap_pages(page, 1 << order); return true; } /* * Frees a number of pages from the PCP lists * Assumes all pages on list are in same zone. * count is the number of pages to free. */ static void free_pcppages_bulk(struct zone *zone, int count, struct per_cpu_pages *pcp, int pindex) { unsigned long flags; unsigned int order; bool isolated_pageblocks; struct page *page; /* * Ensure proper count is passed which otherwise would stuck in the * below while (list_empty(list)) loop. */ count = min(pcp->count, count); /* Ensure requested pindex is drained first. */ pindex = pindex - 1; spin_lock_irqsave(&zone->lock, flags); isolated_pageblocks = has_isolate_pageblock(zone); while (count > 0) { struct list_head *list; int nr_pages; /* Remove pages from lists in a round-robin fashion. */ do { if (++pindex > NR_PCP_LISTS - 1) pindex = 0; list = &pcp->lists[pindex]; } while (list_empty(list)); order = pindex_to_order(pindex); nr_pages = 1 << order; do { int mt; page = list_last_entry(list, struct page, pcp_list); mt = get_pcppage_migratetype(page); /* must delete to avoid corrupting pcp list */ list_del(&page->pcp_list); count -= nr_pages; pcp->count -= nr_pages; /* MIGRATE_ISOLATE page should not go to pcplists */ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); /* Pageblock could have been isolated meanwhile */ if (unlikely(isolated_pageblocks)) mt = get_pageblock_migratetype(page); __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); trace_mm_page_pcpu_drain(page, order, mt); } while (count > 0 && !list_empty(list)); } spin_unlock_irqrestore(&zone->lock, flags); } static void free_one_page(struct zone *zone, struct page *page, unsigned long pfn, unsigned int order, int migratetype, fpi_t fpi_flags) { unsigned long flags; spin_lock_irqsave(&zone->lock, flags); if (unlikely(has_isolate_pageblock(zone) || is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); spin_unlock_irqrestore(&zone->lock, flags); } static void __free_pages_ok(struct page *page, unsigned int order, fpi_t fpi_flags) { unsigned long flags; int migratetype; unsigned long pfn = page_to_pfn(page); struct zone *zone = page_zone(page); if (!free_pages_prepare(page, order, fpi_flags)) return; /* * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here * is used to avoid calling get_pfnblock_migratetype() under the lock. * This will reduce the lock holding time. */ migratetype = get_pfnblock_migratetype(page, pfn); spin_lock_irqsave(&zone->lock, flags); if (unlikely(has_isolate_pageblock(zone) || is_migrate_isolate(migratetype))) { migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); spin_unlock_irqrestore(&zone->lock, flags); __count_vm_events(PGFREE, 1 << order); } void __free_pages_core(struct page *page, unsigned int order) { unsigned int nr_pages = 1 << order; struct page *p = page; unsigned int loop; /* * When initializing the memmap, __init_single_page() sets the refcount * of all pages to 1 ("allocated"/"not free"). We have to set the * refcount of all involved pages to 0. */ prefetchw(p); for (loop = 0; loop < (nr_pages - 1); loop++, p++) { prefetchw(p + 1); __ClearPageReserved(p); set_page_count(p, 0); } __ClearPageReserved(p); set_page_count(p, 0); atomic_long_add(nr_pages, &page_zone(page)->managed_pages); if (page_contains_unaccepted(page, order)) { if (order == MAX_ORDER && __free_unaccepted(page)) return; accept_page(page, order); } /* * Bypass PCP and place fresh pages right to the tail, primarily * relevant for memory onlining. */ __free_pages_ok(page, order, FPI_TO_TAIL); } /* * Check that the whole (or subset of) a pageblock given by the interval of * [start_pfn, end_pfn) is valid and within the same zone, before scanning it * with the migration of free compaction scanner. * * Return struct page pointer of start_pfn, or NULL if checks were not passed. * * It's possible on some configurations to have a setup like node0 node1 node0 * i.e. it's possible that all pages within a zones range of pages do not * belong to a single zone. We assume that a border between node0 and node1 * can occur within a single pageblock, but not a node0 node1 node0 * interleaving within a single pageblock. It is therefore sufficient to check * the first and last page of a pageblock and avoid checking each individual * page in a pageblock. * * Note: the function may return non-NULL struct page even for a page block * which contains a memory hole (i.e. there is no physical memory for a subset * of the pfn range). For example, if the pageblock order is MAX_ORDER, which * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole * even though the start pfn is online and valid. This should be safe most of * the time because struct pages are still initialized via init_unavailable_range() * and pfn walkers shouldn't touch any physical memory range for which they do * not recognize any specific metadata in struct pages. */ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) { struct page *start_page; struct page *end_page; /* end_pfn is one past the range we are checking */ end_pfn--; if (!pfn_valid(end_pfn)) return NULL; start_page = pfn_to_online_page(start_pfn); if (!start_page) return NULL; if (page_zone(start_page) != zone) return NULL; end_page = pfn_to_page(end_pfn); /* This gives a shorter code than deriving page_zone(end_page) */ if (page_zone_id(start_page) != page_zone_id(end_page)) return NULL; return start_page; } /* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression * testing. Specifically, as large blocks of memory are subdivided, * the order in which smaller blocks are delivered depends on the order * they're subdivided in this function. This is the primary factor * influencing the order in which pages are delivered to the IO * subsystem according to empirical testing, and this is also justified * by considering the behavior of a buddy system containing a single * large block of memory acted on by a series of small allocations. * This behavior is a critical factor in sglist merging's success. * * -- nyc */ static inline void expand(struct zone *zone, struct page *page, int low, int high, int migratetype) { unsigned long size = 1 << high; while (high > low) { high--; size >>= 1; VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); /* * Mark as guard pages (or page), that will allow to * merge back to allocator when buddy will be freed. * Corresponding page table entries will not be touched, * pages will stay not present in virtual address space */ if (set_page_guard(zone, &page[size], high, migratetype)) continue; add_to_free_list(&page[size], zone, high, migratetype); set_buddy_order(&page[size], high); } } static void check_new_page_bad(struct page *page) { if (unlikely(page->flags & __PG_HWPOISON)) { /* Don't complain about hwpoisoned pages */ page_mapcount_reset(page); /* remove PageBuddy */ return; } bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); } /* * This page is about to be returned from the page allocator */ static int check_new_page(struct page *page) { if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) return 0; check_new_page_bad(page); return 1; } static inline bool check_new_pages(struct page *page, unsigned int order) { if (is_check_pages_enabled()) { for (int i = 0; i < (1 << order); i++) { struct page *p = page + i; if (check_new_page(p)) return true; } } return false; } static inline bool should_skip_kasan_unpoison(gfp_t flags) { /* Don't skip if a software KASAN mode is enabled. */ if (IS_ENABLED(CONFIG_KASAN_GENERIC) || IS_ENABLED(CONFIG_KASAN_SW_TAGS)) return false; /* Skip, if hardware tag-based KASAN is not enabled. */ if (!kasan_hw_tags_enabled()) return true; /* * With hardware tag-based KASAN enabled, skip if this has been * requested via __GFP_SKIP_KASAN. */ return flags & __GFP_SKIP_KASAN; } static inline bool should_skip_init(gfp_t flags) { /* Don't skip, if hardware tag-based KASAN is not enabled. */ if (!kasan_hw_tags_enabled()) return false; /* For hardware tag-based KASAN, skip if requested. */ return (flags & __GFP_SKIP_ZERO); } inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && !should_skip_init(gfp_flags); bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); int i; set_page_private(page, 0); set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); /* * Page unpoisoning must happen before memory initialization. * Otherwise, the poison pattern will be overwritten for __GFP_ZERO * allocations and the page unpoisoning code will complain. */ kernel_unpoison_pages(page, 1 << order); /* * As memory initialization might be integrated into KASAN, * KASAN unpoisoning and memory initializion code must be * kept together to avoid discrepancies in behavior. */ /* * If memory tags should be zeroed * (which happens only when memory should be initialized as well). */ if (zero_tags) { /* Initialize both memory and memory tags. */ for (i = 0; i != 1 << order; ++i) tag_clear_highpage(page + i); /* Take note that memory was initialized by the loop above. */ init = false; } if (!should_skip_kasan_unpoison(gfp_flags) && kasan_unpoison_pages(page, order, init)) { /* Take note that memory was initialized by KASAN. */ if (kasan_has_integrated_init()) init = false; } else { /* * If memory tags have not been set by KASAN, reset the page * tags to ensure page_address() dereferencing does not fault. */ for (i = 0; i != 1 << order; ++i) page_kasan_tag_reset(page + i); } /* If memory is still not initialized, initialize it now. */ if (init) kernel_init_pages(page, 1 << order); set_page_owner(page, order, gfp_flags); page_table_check_alloc(page, order); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) { post_alloc_hook(page, order, gfp_flags); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking * steps that will free more memory. The caller should avoid the page * being used for !PFMEMALLOC purposes. */ if (alloc_flags & ALLOC_NO_WATERMARKS) set_page_pfmemalloc(page); else clear_page_pfmemalloc(page); } /* * Go through the free lists for the given migratetype and remove * the smallest available page from the freelists */ static __always_inline struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, int migratetype) { unsigned int current_order; struct free_area *area; struct page *page; /* Find a page of the appropriate size in the preferred list */ for (current_order = order; current_order <= MAX_ORDER; ++current_order) { area = &(zone->free_area[current_order]); page = get_page_from_free_area(area, migratetype); if (!page) continue; del_page_from_free_list(page, zone, current_order); expand(zone, page, order, current_order, migratetype); set_pcppage_migratetype(page, migratetype); trace_mm_page_alloc_zone_locked(page, order, migratetype, pcp_allowed_order(order) && migratetype < MIGRATE_PCPTYPES); return page; } return NULL; } /* * This array describes the order lists are fallen back to when * the free lists for the desirable migrate type are depleted * * The other migratetypes do not have fallbacks. */ static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, }; #ifdef CONFIG_CMA static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) { return __rmqueue_smallest(zone, order, MIGRATE_CMA); } #else static inline struct page *__rmqueue_cma_fallback(struct zone *zone, unsigned int order) { return NULL; } #endif /* * Move the free pages in a range to the freelist tail of the requested type. * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ static int move_freepages(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn, int migratetype, int *num_movable) { struct page *page; unsigned long pfn; unsigned int order; int pages_moved = 0; for (pfn = start_pfn; pfn <= end_pfn;) { page = pfn_to_page(pfn); if (!PageBuddy(page)) { /* * We assume that pages that could be isolated for * migration are movable. But we don't actually try * isolating, as that would be expensive. */ if (num_movable && (PageLRU(page) || __PageMovable(page))) (*num_movable)++; pfn++; continue; } /* Make sure we are not inadvertently changing nodes */ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); VM_BUG_ON_PAGE(page_zone(page) != zone, page); order = buddy_order(page); move_to_free_list(page, zone, order, migratetype); pfn += 1 << order; pages_moved += 1 << order; } return pages_moved; } int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable) { unsigned long start_pfn, end_pfn, pfn; if (num_movable) *num_movable = 0; pfn = page_to_pfn(page); start_pfn = pageblock_start_pfn(pfn); end_pfn = pageblock_end_pfn(pfn) - 1; /* Do not cross zone boundaries */ if (!zone_spans_pfn(zone, start_pfn)) start_pfn = pfn; if (!zone_spans_pfn(zone, end_pfn)) return 0; return move_freepages(zone, start_pfn, end_pfn, migratetype, num_movable); } static void change_pageblock_range(struct page *pageblock_page, int start_order, int migratetype) { int nr_pageblocks = 1 << (start_order - pageblock_order); while (nr_pageblocks--) { set_pageblock_migratetype(pageblock_page, migratetype); pageblock_page += pageblock_nr_pages; } } /* * When we are falling back to another migratetype during allocation, try to * steal extra free pages from the same pageblocks to satisfy further * allocations, instead of polluting multiple pageblocks. * * If we are stealing a relatively large buddy page, it is likely there will * be more free pages in the pageblock, so try to steal them all. For * reclaimable and unmovable allocations, we steal regardless of page size, * as fragmentation caused by those allocations polluting movable pageblocks * is worse than movable allocations stealing from unmovable and reclaimable * pageblocks. */ static bool can_steal_fallback(unsigned int order, int start_mt) { /* * Leaving this order check is intended, although there is * relaxed order check in next check. The reason is that * we can actually steal whole pageblock if this condition met, * but, below check doesn't guarantee it and that is just heuristic * so could be changed anytime. */ if (order >= pageblock_order) return true; if (order >= pageblock_order / 2 || start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE || page_group_by_mobility_disabled) return true; return false; } static inline bool boost_watermark(struct zone *zone) { unsigned long max_boost; if (!watermark_boost_factor) return false; /* * Don't bother in zones that are unlikely to produce results. * On small machines, including kdump capture kernels running * in a small area, boosting the watermark can cause an out of * memory situation immediately. */ if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) return false; max_boost = mult_frac(zone->_watermark[WMARK_HIGH], watermark_boost_factor, 10000); /* * high watermark may be uninitialised if fragmentation occurs * very early in boot so do not boost. We do not fall * through and boost by pageblock_nr_pages as failing * allocations that early means that reclaim is not going * to help and it may even be impossible to reclaim the * boosted watermark resulting in a hang. */ if (!max_boost) return false; max_boost = max(pageblock_nr_pages, max_boost); zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, max_boost); return true; } /* * This function implements actual steal behaviour. If order is large enough, * we can steal whole pageblock. If not, we first move freepages in this * pageblock to our migratetype and determine how many already-allocated pages * are there in the pageblock with a compatible migratetype. If at least half * of pages are free or compatible, we can change migratetype of the pageblock * itself, so pages freed in the future will be put on the correct free list. */ static void steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) { unsigned int current_order = buddy_order(page); int free_pages, movable_pages, alike_pages; int old_block_type; old_block_type = get_pageblock_migratetype(page); /* * This can happen due to races and we want to prevent broken * highatomic accounting. */ if (is_migrate_highatomic(old_block_type)) goto single_page; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { change_pageblock_range(page, current_order, start_type); goto single_page; } /* * Boost watermarks to increase reclaim pressure to reduce the * likelihood of future fallbacks. Wake kswapd now as the node * may be balanced overall and kswapd will not wake naturally. */ if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); /* We are not allowed to try stealing from the whole block */ if (!whole_block) goto single_page; free_pages = move_freepages_block(zone, page, start_type, &movable_pages); /* moving whole block can fail due to zone boundary conditions */ if (!free_pages) goto single_page; /* * Determine how many pages are compatible with our allocation. * For movable allocation, it's the number of movable pages which * we just obtained. For other types it's a bit more tricky. */ if (start_type == MIGRATE_MOVABLE) { alike_pages = movable_pages; } else { /* * If we are falling back a RECLAIMABLE or UNMOVABLE allocation * to MOVABLE pageblock, consider all non-movable pages as * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or * vice versa, be conservative since we can't distinguish the * exact migratetype of non-movable pages. */ if (old_block_type == MIGRATE_MOVABLE) alike_pages = pageblock_nr_pages - (free_pages + movable_pages); else alike_pages = 0; } /* * If a sufficient number of pages in the block are either free or of * compatible migratability as our allocation, claim the whole block. */ if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || page_group_by_mobility_disabled) set_pageblock_migratetype(page, start_type); return; single_page: move_to_free_list(page, zone, current_order, start_type); } /* * Check whether there is a suitable fallback freepage with requested order. * If only_stealable is true, this function returns fallback_mt only if * we can steal other freepages all together. This would help to reduce * fragmentation due to mixed migratetype pages in one pageblock. */ int find_suitable_fallback(struct free_area *area, unsigned int order, int migratetype, bool only_stealable, bool *can_steal) { int i; int fallback_mt; if (area->nr_free == 0) return -1; *can_steal = false; for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { fallback_mt = fallbacks[migratetype][i]; if (free_area_empty(area, fallback_mt)) continue; if (can_steal_fallback(order, migratetype)) *can_steal = true; if (!only_stealable) return fallback_mt; if (*can_steal) return fallback_mt; } return -1; } /* * Reserve a pageblock for exclusive use of high-order atomic allocations if * there are no empty page blocks that contain a page with a suitable order */ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) { int mt; unsigned long max_managed, flags; /* * Limit the number reserved to 1 pageblock or roughly 1% of a zone. * Check is race-prone but harmless. */ max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; if (zone->nr_reserved_highatomic >= max_managed) return; spin_lock_irqsave(&zone->lock, flags); /* Recheck the nr_reserved_highatomic limit under the lock */ if (zone->nr_reserved_highatomic >= max_managed) goto out_unlock; /* Yoink! */ mt = get_pageblock_migratetype(page); /* Only reserve normal pageblocks (i.e., they can merge with others) */ if (migratetype_is_mergeable(mt)) { zone->nr_reserved_highatomic += pageblock_nr_pages; set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); } out_unlock: spin_unlock_irqrestore(&zone->lock, flags); } /* * Used when an allocation is about to fail under memory pressure. This * potentially hurts the reliability of high-order allocations when under * intense memory pressure but failed atomic allocations should be easier * to recover from than an OOM. * * If @force is true, try to unreserve a pageblock even though highatomic * pageblock is exhausted. */ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, bool force) { struct zonelist *zonelist = ac->zonelist; unsigned long flags; struct zoneref *z; struct zone *zone; struct page *page; int order; bool ret; for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, ac->nodemask) { /* * Preserve at least one pageblock unless memory pressure * is really high. */ if (!force && zone->nr_reserved_highatomic <= pageblock_nr_pages) continue; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order <= MAX_ORDER; order++) { struct free_area *area = &(zone->free_area[order]); page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); if (!page) continue; /* * In page freeing path, migratetype change is racy so * we can counter several free pages in a pageblock * in this loop although we changed the pageblock type * from highatomic to ac->migratetype. So we should * adjust the count once. */ if (is_migrate_highatomic_page(page)) { /* * It should never happen but changes to * locking could inadvertently allow a per-cpu * drain to add pages to MIGRATE_HIGHATOMIC * while unreserving so be safe and watch for * underflows. */ zone->nr_reserved_highatomic -= min( pageblock_nr_pages, zone->nr_reserved_highatomic); } /* * Convert to ac->migratetype and avoid the normal * pageblock stealing heuristics. Minimally, the caller * is doing the work and needs the pages. More * importantly, if the block was always converted to * MIGRATE_UNMOVABLE or another type then the number * of pageblocks that cannot be completely freed * may increase. */ set_pageblock_migratetype(page, ac->migratetype); ret = move_freepages_block(zone, page, ac->migratetype, NULL); if (ret) { spin_unlock_irqrestore(&zone->lock, flags); return ret; } } spin_unlock_irqrestore(&zone->lock, flags); } return false; } /* * Try finding a free buddy page on the fallback list and put it on the free * list of requested migratetype, possibly along with other pages from the same * block, depending on fragmentation avoidance heuristics. Returns true if * fallback was found so that __rmqueue_smallest() can grab it. * * The use of signed ints for order and current_order is a deliberate * deviation from the rest of this file, to make the for loop * condition simpler. */ static __always_inline bool __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) { struct free_area *area; int current_order; int min_order = order; struct page *page; int fallback_mt; bool can_steal; /* * Do not steal pages from freelists belonging to other pageblocks * i.e. orders < pageblock_order. If there are no local zones free, * the zonelists will be reiterated without ALLOC_NOFRAGMENT. */ if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) min_order = pageblock_order; /* * Find the largest available free page in the other list. This roughly * approximates finding the pageblock with the most free pages, which * would be too costly to do exactly. */ for (current_order = MAX_ORDER; current_order >= min_order; --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, start_migratetype, false, &can_steal); if (fallback_mt == -1) continue; /* * We cannot steal all free pages from the pageblock and the * requested migratetype is movable. In that case it's better to * steal and split the smallest available page instead of the * largest available page, because even if the next movable * allocation falls back into a different pageblock than this * one, it won't cause permanent fragmentation. */ if (!can_steal && start_migratetype == MIGRATE_MOVABLE && current_order > order) goto find_smallest; goto do_steal; } return false; find_smallest: for (current_order = order; current_order <= MAX_ORDER; current_order++) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, start_migratetype, false, &can_steal); if (fallback_mt != -1) break; } /* * This should not happen - we already found a suitable fallback * when looking for the largest page. */ VM_BUG_ON(current_order > MAX_ORDER); do_steal: page = get_page_from_free_area(area, fallback_mt); steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, can_steal); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); return true; } /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ static __always_inline struct page * __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) { struct page *page; if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by * allocating from CMA when over half of the zone's free memory * is in the CMA area. */ if (alloc_flags & ALLOC_CMA && zone_page_state(zone, NR_FREE_CMA_PAGES) > zone_page_state(zone, NR_FREE_PAGES) / 2) { page = __rmqueue_cma_fallback(zone, order); if (page) return page; } } retry: page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { if (alloc_flags & ALLOC_CMA) page = __rmqueue_cma_fallback(zone, order); if (!page && __rmqueue_fallback(zone, order, migratetype, alloc_flags)) goto retry; } return page; } /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. * Returns the number of new pages which were placed at *list. */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) { unsigned long flags; int i; spin_lock_irqsave(&zone->lock, flags); for (i = 0; i < count; ++i) { struct page *page = __rmqueue(zone, order, migratetype, alloc_flags); if (unlikely(page == NULL)) break; /* * Split buddy pages returned by expand() are received here in * physical page order. The page is added to the tail of * caller's list. From the callers perspective, the linked list * is ordered by page number under some conditions. This is * useful for IO devices that can forward direction from the * head, thus also in the physical page order. This is useful * for IO devices that can merge IO requests if the physical * pages are ordered properly. */ list_add_tail(&page->pcp_list, list); if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock_irqrestore(&zone->lock, flags); return i; } #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this * currently executing processor on remote nodes after they have * expired. */ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { int to_drain, batch; batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) { spin_lock(&pcp->lock); free_pcppages_bulk(zone, to_drain, pcp, 0); spin_unlock(&pcp->lock); } } #endif /* * Drain pcplists of the indicated processor and zone. */ static void drain_pages_zone(unsigned int cpu, struct zone *zone) { struct per_cpu_pages *pcp; pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); if (pcp->count) { spin_lock(&pcp->lock); free_pcppages_bulk(zone, pcp->count, pcp, 0); spin_unlock(&pcp->lock); } } /* * Drain pcplists of all zones on the indicated processor. */ static void drain_pages(unsigned int cpu) { struct zone *zone; for_each_populated_zone(zone) { drain_pages_zone(cpu, zone); } } /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. */ void drain_local_pages(struct zone *zone) { int cpu = smp_processor_id(); if (zone) drain_pages_zone(cpu, zone); else drain_pages(cpu); } /* * The implementation of drain_all_pages(), exposing an extra parameter to * drain on all cpus. * * drain_all_pages() is optimized to only execute on cpus where pcplists are * not empty. The check for non-emptiness can however race with a free to * pcplist that has not yet increased the pcp->count from 0 to 1. Callers * that need the guarantee that every CPU has drained can disable the * optimizing racy check. */ static void __drain_all_pages(struct zone *zone, bool force_all_cpus) { int cpu; /* * Allocate in the BSS so we won't require allocation in * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y */ static cpumask_t cpus_with_pcps; /* * Do not drain if one is already in progress unless it's specific to * a zone. Such callers are primarily CMA and memory hotplug and need * the drain to be complete when the call returns. */ if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { if (!zone) return; mutex_lock(&pcpu_drain_mutex); } /* * We don't care about racing with CPU hotplug event * as offline notification will cause the notified * cpu to drain that CPU pcps and on_each_cpu_mask * disables preemption as part of its processing */ for_each_online_cpu(cpu) { struct per_cpu_pages *pcp; struct zone *z; bool has_pcps = false; if (force_all_cpus) { /* * The pcp.count check is racy, some callers need a * guarantee that no cpu is missed. */ has_pcps = true; } else if (zone) { pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); if (pcp->count) has_pcps = true; } else { for_each_populated_zone(z) { pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); if (pcp->count) { has_pcps = true; break; } } } if (has_pcps) cpumask_set_cpu(cpu, &cpus_with_pcps); else cpumask_clear_cpu(cpu, &cpus_with_pcps); } for_each_cpu(cpu, &cpus_with_pcps) { if (zone) drain_pages_zone(cpu, zone); else drain_pages(cpu); } mutex_unlock(&pcpu_drain_mutex); } /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator. * * When zone parameter is non-NULL, spill just the single zone's pages. */ void drain_all_pages(struct zone *zone) { __drain_all_pages(zone, false); } static bool free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order) { int migratetype; if (!free_pages_prepare(page, order, FPI_NONE)) return false; migratetype = get_pfnblock_migratetype(page, pfn); set_pcppage_migratetype(page, migratetype); return true; } static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) { int min_nr_free, max_nr_free; int batch = READ_ONCE(pcp->batch); /* Free everything if batch freeing high-order pages. */ if (unlikely(free_high)) return pcp->count; /* Check for PCP disabled or boot pageset */ if (unlikely(high < batch)) return 1; /* Leave at least pcp->batch pages on the list */ min_nr_free = batch; max_nr_free = high - batch; /* * Double the number of pages freed each time there is subsequent * freeing of pages without any allocation. */ batch <<= pcp->free_factor; if (batch < max_nr_free) pcp->free_factor++; batch = clamp(batch, min_nr_free, max_nr_free); return batch; } static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, bool free_high) { int high = READ_ONCE(pcp->high); if (unlikely(!high || free_high)) return 0; if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) return high; /* * If reclaim is active, limit the number of pages that can be * stored on pcp lists */ return min(READ_ONCE(pcp->batch) << 2, high); } static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, struct page *page, int migratetype, unsigned int order) { int high; int pindex; bool free_high; __count_vm_events(PGFREE, 1 << order); pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; /* * As high-order pages other than THP's stored on PCP can contribute * to fragmentation, limit the number stored when PCP is heavily * freeing without allocation. The remainder after bulk freeing * stops will be drained from vmstat refresh context. */ free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); high = nr_pcp_high(pcp, zone, free_high); if (pcp->count >= high) { free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex); } } /* * Free a pcp page */ void free_unref_page(struct page *page, unsigned int order) { unsigned long __maybe_unused UP_flags; struct per_cpu_pages *pcp; struct zone *zone; unsigned long pfn = page_to_pfn(page); int migratetype, pcpmigratetype; if (!free_unref_page_prepare(page, pfn, order)) return; /* * We only track unmovable, reclaimable and movable on pcp lists. * Place ISOLATE pages on the isolated list because they are being * offlined but treat HIGHATOMIC and CMA as movable pages so we can * get those areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ migratetype = pcpmigratetype = get_pcppage_migratetype(page); if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { if (unlikely(is_migrate_isolate(migratetype))) { free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); return; } pcpmigratetype = MIGRATE_MOVABLE; } zone = page_zone(page); pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (pcp) { free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); pcp_spin_unlock(pcp); } else { free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); } pcp_trylock_finish(UP_flags); } /* * Free a list of 0-order pages */ void free_unref_page_list(struct list_head *list) { unsigned long __maybe_unused UP_flags; struct page *page, *next; struct per_cpu_pages *pcp = NULL; struct zone *locked_zone = NULL; int batch_count = 0; int migratetype; /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_to_pfn(page); if (!free_unref_page_prepare(page, pfn, 0)) { list_del(&page->lru); continue; } /* * Free isolated pages directly to the allocator, see * comment in free_unref_page. */ migratetype = get_pcppage_migratetype(page); if (unlikely(is_migrate_isolate(migratetype))) { list_del(&page->lru); free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); continue; } } list_for_each_entry_safe(page, next, list, lru) { struct zone *zone = page_zone(page); list_del(&page->lru); migratetype = get_pcppage_migratetype(page); /* * Either different zone requiring a different pcp lock or * excessive lock hold times when freeing a large list of * pages. */ if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { if (pcp) { pcp_spin_unlock(pcp); pcp_trylock_finish(UP_flags); } batch_count = 0; /* * trylock is necessary as pages may be getting freed * from IRQ or SoftIRQ context after an IO completion. */ pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (unlikely(!pcp)) { pcp_trylock_finish(UP_flags); free_one_page(zone, page, page_to_pfn(page), 0, migratetype, FPI_NONE); locked_zone = NULL; continue; } locked_zone = zone; } /* * Non-isolated types over MIGRATE_PCPTYPES get added * to the MIGRATE_MOVABLE pcp list. */ if (unlikely(migratetype >= MIGRATE_PCPTYPES)) migratetype = MIGRATE_MOVABLE; trace_mm_page_free_batched(page); free_unref_page_commit(zone, pcp, page, migratetype, 0); batch_count++; } if (pcp) { pcp_spin_unlock(pcp); pcp_trylock_finish(UP_flags); } } /* * split_page takes a non-compound higher-order page, and splits it into * n (1<<order) sub-pages: page[0..n] * Each sub-page must be freed individually. * * Note: this is probably too low level an operation for use in drivers. * Please consult with lkml before using this in your driver. */ void split_page(struct page *page, unsigned int order) { int i; VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, 1 << order); split_page_memcg(page, 1 << order); } EXPORT_SYMBOL_GPL(split_page); int __isolate_free_page(struct page *page, unsigned int order) { struct zone *zone = page_zone(page); int mt = get_pageblock_migratetype(page); if (!is_migrate_isolate(mt)) { unsigned long watermark; /* * Obey watermarks as if the page was being allocated. We can * emulate a high-order watermark check with a raised order-0 * watermark, because we already know our high-order page * exists. */ watermark = zone->_watermark[WMARK_MIN] + (1UL << order); if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) return 0; __mod_zone_freepage_state(zone, -(1UL << order), mt); } del_page_from_free_list(page, zone, order); /* * Set the pageblock if the isolated page is at least half of a * pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; for (; page < endpage; page += pageblock_nr_pages) { int mt = get_pageblock_migratetype(page); /* * Only change normal pageblocks (i.e., they can merge * with others) */ if (migratetype_is_mergeable(mt)) set_pageblock_migratetype(page, MIGRATE_MOVABLE); } } return 1UL << order; } /** * __putback_isolated_page - Return a now-isolated page back where we got it * @page: Page that was isolated * @order: Order of the isolated page * @mt: The page's pageblock's migratetype * * This function is meant to return a page pulled from the free lists via * __isolate_free_page back to the free lists they were pulled from. */ void __putback_isolated_page(struct page *page, unsigned int order, int mt) { struct zone *zone = page_zone(page); /* zone lock should be held when this function is called */ lockdep_assert_held(&zone->lock); /* Return isolated page to tail of freelist. */ __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); } /* * Update NUMA hit/miss statistics */ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, long nr_account) { #ifdef CONFIG_NUMA enum numa_stat_item local_stat = NUMA_LOCAL; /* skip numa counters update if numa stats is disabled */ if (!static_branch_likely(&vm_numa_stat_key)) return; if (zone_to_nid(z) != numa_node_id()) local_stat = NUMA_OTHER; if (zone_to_nid(z) == zone_to_nid(preferred_zone)) __count_numa_events(z, NUMA_HIT, nr_account); else { __count_numa_events(z, NUMA_MISS, nr_account); __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); } __count_numa_events(z, local_stat, nr_account); #endif } static __always_inline struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, unsigned int order, unsigned int alloc_flags, int migratetype) { struct page *page; unsigned long flags; do { page = NULL; spin_lock_irqsave(&zone->lock, flags); if (alloc_flags & ALLOC_HIGHATOMIC) page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (!page) { page = __rmqueue(zone, order, migratetype, alloc_flags); /* * If the allocation fails, allow OOM handling access * to HIGHATOMIC reserves as failing now is worse than * failing a high-order atomic allocation in the * future. */ if (!page && (alloc_flags & ALLOC_OOM)) page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (!page) { spin_unlock_irqrestore(&zone->lock, flags); return NULL; } } __mod_zone_freepage_state(zone, -(1 << order), get_pcppage_migratetype(page)); spin_unlock_irqrestore(&zone->lock, flags); } while (check_new_pages(page, order)); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone, 1); return page; } /* Remove page from the per-cpu list, caller must protect the list */ static inline struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list) { struct page *page; do { if (list_empty(list)) { int batch = READ_ONCE(pcp->batch); int alloced; /* * Scale batch relative to order if batch implies * free pages can be stored on the PCP. Batch can * be 1 for small zones or for boot pagesets which * should never store free pages as the pages may * belong to arbitrary zones. */ if (batch > 1) batch = max(batch >> order, 2); alloced = rmqueue_bulk(zone, order, batch, list, migratetype, alloc_flags); pcp->count += alloced << order; if (unlikely(list_empty(list))) return NULL; } page = list_first_entry(list, struct page, pcp_list); list_del(&page->pcp_list); pcp->count -= 1 << order; } while (check_new_pages(page, order)); return page; } /* Lock and remove page from the per-cpu list */ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) { struct per_cpu_pages *pcp; struct list_head *list; struct page *page; unsigned long __maybe_unused UP_flags; /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (!pcp) { pcp_trylock_finish(UP_flags); return NULL; } /* * On allocation, reduce the number of pages that are batch freed. * See nr_pcp_free() where free_factor is increased for subsequent * frees. */ pcp->free_factor >>= 1; list = &pcp->lists[order_to_pindex(migratetype, order)]; page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); pcp_spin_unlock(pcp); pcp_trylock_finish(UP_flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone, 1); } return page; } /* * Allocate a page from the given zone. * Use pcplists for THP or "cheap" high-order allocations. */ /* * Do not instrument rmqueue() with KMSAN. This function may call * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it * may call rmqueue() again, which will result in a deadlock. */ __no_sanitize_memory static inline struct page *rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) { struct page *page; /* * We most definitely don't want callers attempting to * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, migratetype); out: /* Separate test+clear to avoid unnecessary atomics */ if ((alloc_flags & ALLOC_KSWAPD) && unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); wakeup_kswapd(zone, 0, 0, zone_idx(zone)); } VM_BUG_ON_PAGE(page && bad_range(zone, page), page); return page; } noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); static inline long __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) { long unusable_free = (1 << order) - 1; /* * If the caller does not have rights to reserves below the min * watermark then subtract the high-atomic reserves. This will * over-estimate the size of the atomic reserve but it avoids a search. */ if (likely(!(alloc_flags & ALLOC_RESERVES))) unusable_free += z->nr_reserved_highatomic; #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ if (!(alloc_flags & ALLOC_CMA)) unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); #endif #ifdef CONFIG_UNACCEPTED_MEMORY unusable_free += zone_page_state(z, NR_UNACCEPTED); #endif return unusable_free; } /* * Return true if free base pages are above 'mark'. For high-order checks it * will return true of the order-0 watermark is reached and there is at least * one free page of a suitable size. Checking now avoids taking the zone lock * to check in the allocation paths if no pages are free. */ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages) { long min = mark; int o; /* free_pages may go negative - that's OK */ free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); if (unlikely(alloc_flags & ALLOC_RESERVES)) { /* * __GFP_HIGH allows access to 50% of the min reserve as well * as OOM. */ if (alloc_flags & ALLOC_MIN_RESERVE) { min -= min / 2; /* * Non-blocking allocations (e.g. GFP_ATOMIC) can * access more reserves than just __GFP_HIGH. Other * non-blocking allocations requests such as GFP_NOWAIT * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get * access to the min reserve. */ if (alloc_flags & ALLOC_NON_BLOCK) min -= min / 4; } /* * OOM victims can try even harder than the normal reserve * users on the grounds that it's definitely going to be in * the exit path shortly and free memory. Any allocation it * makes during the free path will be small and short-lived. */ if (alloc_flags & ALLOC_OOM) min -= min / 2; } /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead * even if a suitable page happened to be free. */ if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) return false; /* If this is an order-0 request then the watermark is fine */ if (!order) return true; /* For a high-order request, check at least one suitable page is free */ for (o = order; o <= MAX_ORDER; o++) { struct free_area *area = &z->free_area[o]; int mt; if (!area->nr_free) continue; for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { if (!free_area_empty(area, mt)) return true; } #ifdef CONFIG_CMA if ((alloc_flags & ALLOC_CMA) && !free_area_empty(area, MIGRATE_CMA)) { return true; } #endif if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && !free_area_empty(area, MIGRATE_HIGHATOMIC)) { return true; } } return false; } bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags) { return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, zone_page_state(z, NR_FREE_PAGES)); } static inline bool zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) { long free_pages; free_pages = zone_page_state(z, NR_FREE_PAGES); /* * Fast check for order-0 only. If this fails then the reserves * need to be calculated. */ if (!order) { long usable_free; long reserved; usable_free = free_pages; reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); /* reserved may over estimate high-atomic reserves. */ usable_free -= min(usable_free, reserved); if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) return true; } if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, free_pages)) return true; /* * Ignore watermark boosting for __GFP_HIGH order-0 allocations * when checking the min watermark. The min watermark is the * point where boosting is ignored so that kswapd is woken up * when below the low watermark. */ if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { mark = z->_watermark[WMARK_MIN]; return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, free_pages); } return false; } bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx) { long free_pages = zone_page_state(z, NR_FREE_PAGES); if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, free_pages); } #ifdef CONFIG_NUMA int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) { return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= node_reclaim_distance; } #else /* CONFIG_NUMA */ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) { return true; } #endif /* CONFIG_NUMA */ /* * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid * fragmentation is subtle. If the preferred zone was HIGHMEM then * premature use of a lower zone may cause lowmem pressure problems that * are worse than fragmentation. If the next zone is ZONE_DMA then it is * probably too small. It only makes sense to spread allocations to avoid * fragmentation between the Normal and DMA32 zones. */ static inline unsigned int alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) { unsigned int alloc_flags; /* * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD * to save a branch. */ alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); #ifdef CONFIG_ZONE_DMA32 if (!zone) return alloc_flags; if (zone_idx(zone) != ZONE_NORMAL) return alloc_flags; /* * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and * the pointer is within zone->zone_pgdat->node_zones[]. Also assume * on UMA that if Normal is populated then so is DMA32. */ BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); if (nr_online_nodes > 1 && !populated_zone(--zone)) return alloc_flags; alloc_flags |= ALLOC_NOFRAGMENT; #endif /* CONFIG_ZONE_DMA32 */ return alloc_flags; } /* Must be called after current_gfp_context() which can change gfp_mask */ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; } /* * get_page_from_freelist goes through the zonelist trying to allocate * a page. */ static struct page * get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) { struct zoneref *z; struct zone *zone; struct pglist_data *last_pgdat = NULL; bool last_pgdat_dirty_ok = false; bool no_fallback; retry: /* * Scan zonelist, looking for a zone with enough free. * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. */ no_fallback = alloc_flags & ALLOC_NOFRAGMENT; z = ac->preferred_zoneref; for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, ac->nodemask) { struct page *page; unsigned long mark; if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && !__cpuset_zone_allowed(zone, gfp_mask)) continue; /* * When allocating a page cache page for writing, we * want to get it from a node that is within its dirty * limit, such that no single node holds more than its * proportional share of globally allowed dirty pages. * The dirty limits take into account the node's * lowmem reserves and high watermark so that kswapd * should be able to balance it without having to * write pages from its LRU list. * * XXX: For now, allow allocations to potentially * exceed the per-node dirty limit in the slowpath * (spread_dirty_pages unset) before going into reclaim, * which is important when on a NUMA setup the allowed * nodes are together not big enough to reach the * global limit. The proper fix for these situations * will require awareness of nodes in the * dirty-throttling and the flusher threads. */ if (ac->spread_dirty_pages) { if (last_pgdat != zone->zone_pgdat) { last_pgdat = zone->zone_pgdat; last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); } if (!last_pgdat_dirty_ok) continue; } if (no_fallback && nr_online_nodes > 1 && zone != ac->preferred_zoneref->zone) { int local_nid; /* * If moving to a remote node, retry but allow * fragmenting fallbacks. Locality is more important * than fragmentation avoidance. */ local_nid = zone_to_nid(ac->preferred_zoneref->zone); if (zone_to_nid(zone) != local_nid) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; } } mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (!zone_watermark_fast(zone, order, mark, ac->highest_zoneidx, alloc_flags, gfp_mask)) { int ret; if (has_unaccepted_memory()) { if (try_to_accept_memory(zone, order)) goto try_this_zone; } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * Watermark failed for this zone, but see if we can * grow this zone if it contains deferred pages. */ if (deferred_pages_enabled()) { if (_deferred_grow_zone(zone, order)) goto try_this_zone; } #endif /* Checked here to keep the fast path fast */ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); if (alloc_flags & ALLOC_NO_WATERMARKS) goto try_this_zone; if (!node_reclaim_enabled() || !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) continue; ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); switch (ret) { case NODE_RECLAIM_NOSCAN: /* did not scan */ continue; case NODE_RECLAIM_FULL: /* scanned but unreclaimable */ continue; default: /* did we reclaim enough */ if (zone_watermark_ok(zone, order, mark, ac->highest_zoneidx, alloc_flags)) goto try_this_zone; continue; } } try_this_zone: page = rmqueue(ac->preferred_zoneref->zone, zone, order, gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); /* * If this is a high-order atomic allocation then check * if the pageblock should be reserved for the future */ if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) reserve_highatomic_pageblock(page, zone); return page; } else { if (has_unaccepted_memory()) { if (try_to_accept_memory(zone, order)) goto try_this_zone; } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* Try again if zone has deferred pages */ if (deferred_pages_enabled()) { if (_deferred_grow_zone(zone, order)) goto try_this_zone; } #endif } } /* * It's possible on a UMA machine to get through all zones that are * fragmented. If avoiding fragmentation, reset and try again. */ if (no_fallback) { alloc_flags &= ~ALLOC_NOFRAGMENT; goto retry; } return NULL; } static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) { unsigned int filter = SHOW_MEM_FILTER_NODES; /* * This documents exceptions given to allocations in certain * contexts that are allowed to allocate outside current's set * of allowed nodes. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) if (tsk_is_oom_victim(current) || (current->flags & (PF_MEMALLOC | PF_EXITING))) filter &= ~SHOW_MEM_FILTER_NODES; if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) filter &= ~SHOW_MEM_FILTER_NODES; __show_mem(filter, nodemask, gfp_zone(gfp_mask)); } void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) { struct va_format vaf; va_list args; static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || ((gfp_mask & __GFP_DMA) && !has_managed_dma())) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", current->comm, &vaf, gfp_mask, &gfp_mask, nodemask_pr_args(nodemask)); va_end(args); cpuset_print_current_mems_allowed(); pr_cont("\n"); dump_stack(); warn_alloc_show_mem(gfp_mask, nodemask); } static inline struct page * __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) { struct page *page; page = get_page_from_freelist(gfp_mask, order, alloc_flags|ALLOC_CPUSET, ac); /* * fallback to ignore cpuset restriction if our nodes * are depleted */ if (!page) page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); return page; } static inline struct page * __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) { struct oom_control oc = { .zonelist = ac->zonelist, .nodemask = ac->nodemask, .memcg = NULL, .gfp_mask = gfp_mask, .order = order, }; struct page *page; *did_some_progress = 0; /* * Acquire the oom lock. If that fails, somebody else is * making progress for us. */ if (!mutex_trylock(&oom_lock)) { *did_some_progress = 1; schedule_timeout_uninterruptible(1); return NULL; } /* * Go through the zonelist yet one more time, keep very high watermark * here, this is only to catch a parallel oom killing, we must fail if * we're still under heavy pressure. But make sure that this reclaim * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY * allocation which will never fail due to oom_lock already held. */ page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & ~__GFP_DIRECT_RECLAIM, order, ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); if (page) goto out; /* Coredumps can quickly deplete all memory reserves */ if (current->flags & PF_DUMPCORE) goto out; /* The OOM killer will not help higher order allocs */ if (order > PAGE_ALLOC_COSTLY_ORDER) goto out; /* * We have already exhausted all our reclaim opportunities without any * success so it is time to admit defeat. We will skip the OOM killer * because it is very likely that the caller has a more reasonable * fallback than shooting a random task. * * The OOM killer may not free memory on a specific node. */ if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) goto out; /* The OOM killer does not needlessly kill tasks for lowmem */ if (ac->highest_zoneidx < ZONE_NORMAL) goto out; if (pm_suspended_storage()) goto out; /* * XXX: GFP_NOFS allocations should rather fail than rely on * other request to make a forward progress. * We are in an unfortunate situation where out_of_memory cannot * do much for this context but let's try it to at least get * access to memory reserved if the current task is killed (see * out_of_memory). Once filesystems are ready to handle allocation * failures more gracefully we should just bail out here. */ /* Exhausted what can be done so it's blame time */ if (out_of_memory(&oc) || WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { *did_some_progress = 1; /* * Help non-failing allocations by giving them access to memory * reserves */ if (gfp_mask & __GFP_NOFAIL) page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_NO_WATERMARKS, ac); } out: mutex_unlock(&oom_lock); return page; } /* * Maximum number of compaction retries with a progress before OOM * killer is consider as the only way to move forward. */ #define MAX_COMPACT_RETRIES 16 #ifdef CONFIG_COMPACTION /* Try memory compaction for high-order allocations before reclaim */ static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) { struct page *page = NULL; unsigned long pflags; unsigned int noreclaim_flag; if (!order) return NULL; psi_memstall_enter(&pflags); delayacct_compact_start(); noreclaim_flag = memalloc_noreclaim_save(); *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, prio, &page); memalloc_noreclaim_restore(noreclaim_flag); psi_memstall_leave(&pflags); delayacct_compact_end(); if (*compact_result == COMPACT_SKIPPED) return NULL; /* * At least in one zone compaction wasn't deferred or skipped, so let's * count a compaction stall */ count_vm_event(COMPACTSTALL); /* Prep a captured page if available */ if (page) prep_new_page(page, order, gfp_mask, alloc_flags); /* Try get a page from the freelist if available */ if (!page) page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); if (page) { struct zone *zone = page_zone(page); zone->compact_blockskip_flush = false; compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); return page; } /* * It's bad if compaction run occurs and fails. The most likely reason * is that pages exist, but not enough to satisfy watermarks. */ count_vm_event(COMPACTFAIL); cond_resched(); return NULL; } static inline bool should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) { int max_retries = MAX_COMPACT_RETRIES; int min_priority; bool ret = false; int retries = *compaction_retries; enum compact_priority priority = *compact_priority; if (!order) return false; if (fatal_signal_pending(current)) return false; /* * Compaction was skipped due to a lack of free order-0 * migration targets. Continue if reclaim can help. */ if (compact_result == COMPACT_SKIPPED) { ret = compaction_zonelist_suitable(ac, order, alloc_flags); goto out; } /* * Compaction managed to coalesce some page blocks, but the * allocation failed presumably due to a race. Retry some. */ if (compact_result == COMPACT_SUCCESS) { /* * !costly requests are much more important than * __GFP_RETRY_MAYFAIL costly ones because they are de * facto nofail and invoke OOM killer to move on while * costly can fail and users are ready to cope with * that. 1/4 retries is rather arbitrary but we would * need much more detailed feedback from compaction to * make a better decision. */ if (order > PAGE_ALLOC_COSTLY_ORDER) max_retries /= 4; if (++(*compaction_retries) <= max_retries) { ret = true; goto out; } } /* * Compaction failed. Retry with increasing priority. */ min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; if (*compact_priority > min_priority) { (*compact_priority)--; *compaction_retries = 0; ret = true; } out: trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); return ret; } #else static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) { *compact_result = COMPACT_SKIPPED; return NULL; } static inline bool should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) { struct zone *zone; struct zoneref *z; if (!order || order > PAGE_ALLOC_COSTLY_ORDER) return false; /* * There are setups with compaction disabled which would prefer to loop * inside the allocator rather than hit the oom killer prematurely. * Let's give them a good hope and keep retrying while the order-0 * watermarks are OK. */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), ac->highest_zoneidx, alloc_flags)) return true; } return false; } #endif /* CONFIG_COMPACTION */ #ifdef CONFIG_LOCKDEP static struct lockdep_map __fs_reclaim_map = STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); static bool __need_reclaim(gfp_t gfp_mask) { /* no reclaim without waiting on it */ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) return false; /* this guy won't enter reclaim */ if (current->flags & PF_MEMALLOC) return false; if (gfp_mask & __GFP_NOLOCKDEP) return false; return true; } void __fs_reclaim_acquire(unsigned long ip) { lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); } void __fs_reclaim_release(unsigned long ip) { lock_release(&__fs_reclaim_map, ip); } void fs_reclaim_acquire(gfp_t gfp_mask) { gfp_mask = current_gfp_context(gfp_mask); if (__need_reclaim(gfp_mask)) { if (gfp_mask & __GFP_FS) __fs_reclaim_acquire(_RET_IP_); #ifdef CONFIG_MMU_NOTIFIER lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); lock_map_release(&__mmu_notifier_invalidate_range_start_map); #endif } } EXPORT_SYMBOL_GPL(fs_reclaim_acquire); void fs_reclaim_release(gfp_t gfp_mask) { gfp_mask = current_gfp_context(gfp_mask); if (__need_reclaim(gfp_mask)) { if (gfp_mask & __GFP_FS) __fs_reclaim_release(_RET_IP_); } } EXPORT_SYMBOL_GPL(fs_reclaim_release); #endif /* * Zonelists may change due to hotplug during allocation. Detect when zonelists * have been rebuilt so allocation retries. Reader side does not lock and * retries the allocation if zonelist changes. Writer side is protected by the * embedded spin_lock. */ static DEFINE_SEQLOCK(zonelist_update_seq); static unsigned int zonelist_iter_begin(void) { if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) return read_seqbegin(&zonelist_update_seq); return 0; } static unsigned int check_retry_zonelist(unsigned int seq) { if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) return read_seqretry(&zonelist_update_seq, seq); return seq; } /* Perform direct synchronous page reclaim */ static unsigned long __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) { unsigned int noreclaim_flag; unsigned long progress; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); fs_reclaim_acquire(gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); progress = try_to_free_pages(ac->zonelist, order, gfp_mask, ac->nodemask); memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(gfp_mask); cond_resched(); return progress; } /* The really slow allocator path where we enter direct reclaim */ static inline struct page * __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) { struct page *page = NULL; unsigned long pflags; bool drained = false; psi_memstall_enter(&pflags); *did_some_progress = __perform_reclaim(gfp_mask, order, ac); if (unlikely(!(*did_some_progress))) goto out; retry: page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); /* * If an allocation failed after direct reclaim, it could be because * pages are pinned on the per-cpu lists or in high alloc reserves. * Shrink them and try again */ if (!page && !drained) { unreserve_highatomic_pageblock(ac, false); drain_all_pages(NULL); drained = true; goto retry; } out: psi_memstall_leave(&pflags); return page; } static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) { struct zoneref *z; struct zone *zone; pg_data_t *last_pgdat = NULL; enum zone_type highest_zoneidx = ac->highest_zoneidx; for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, ac->nodemask) { if (!managed_zone(zone)) continue; if (last_pgdat != zone->zone_pgdat) { wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); last_pgdat = zone->zone_pgdat; } } } static inline unsigned int gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) { unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; /* * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD * to save two branches. */ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); /* * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { /* * Not worth trying to allocate harder for __GFP_NOMEMALLOC even * if it can't schedule. */ if (!(gfp_mask & __GFP_NOMEMALLOC)) { alloc_flags |= ALLOC_NON_BLOCK; if (order > 0) alloc_flags |= ALLOC_HIGHATOMIC; } /* * Ignore cpuset mems for non-blocking __GFP_HIGH (probably * GFP_ATOMIC) rather than fail, see the comment for * cpuset_node_allowed(). */ if (alloc_flags & ALLOC_MIN_RESERVE) alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && in_task()) alloc_flags |= ALLOC_MIN_RESERVE; alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); return alloc_flags; } static bool oom_reserves_allowed(struct task_struct *tsk) { if (!tsk_is_oom_victim(tsk)) return false; /* * !MMU doesn't have oom reaper so give access to memory reserves * only to the thread with TIF_MEMDIE set */ if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) return false; return true; } /* * Distinguish requests which really need access to full memory * reserves from oom victims which can live with a portion of it */ static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) { if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) return 0; if (gfp_mask & __GFP_MEMALLOC) return ALLOC_NO_WATERMARKS; if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) return ALLOC_NO_WATERMARKS; if (!in_interrupt()) { if (current->flags & PF_MEMALLOC) return ALLOC_NO_WATERMARKS; else if (oom_reserves_allowed(current)) return ALLOC_OOM; } return 0; } bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) { return !!__gfp_pfmemalloc_flags(gfp_mask); } /* * Checks whether it makes sense to retry the reclaim to make a forward progress * for the given allocation request. * * We give up when we either have tried MAX_RECLAIM_RETRIES in a row * without success, or when we couldn't even meet the watermark if we * reclaimed all remaining pages on the LRU lists. * * Returns true if a retry is viable or false to enter the oom path. */ static inline bool should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) { struct zone *zone; struct zoneref *z; bool ret = false; /* * Costly allocations might have made a progress but this doesn't mean * their order will become available due to high fragmentation so * always increment the no progress counter for them */ if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) *no_progress_loops = 0; else (*no_progress_loops)++; /* * Make sure we converge to OOM if we cannot make any progress * several times in the row. */ if (*no_progress_loops > MAX_RECLAIM_RETRIES) { /* Before OOM, exhaust highatomic_reserve */ return unreserve_highatomic_pageblock(ac, true); } /* * Keep reclaiming pages while there is a chance this will lead * somewhere. If none of the target zones can satisfy our allocation * request even if all reclaimable pages are considered then we are * screwed and have to go OOM. */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; unsigned long reclaimable; unsigned long min_wmark = min_wmark_pages(zone); bool wmark; available = reclaimable = zone_reclaimable_pages(zone); available += zone_page_state_snapshot(zone, NR_FREE_PAGES); /* * Would the allocation succeed if we reclaimed all * reclaimable pages? */ wmark = __zone_watermark_ok(zone, order, min_wmark, ac->highest_zoneidx, alloc_flags, available); trace_reclaim_retry_zone(z, order, reclaimable, available, min_wmark, *no_progress_loops, wmark); if (wmark) { ret = true; break; } } /* * Memory allocation/reclaim might be called from a WQ context and the * current implementation of the WQ concurrency control doesn't * recognize that a particular WQ is congested if the worker thread is * looping without ever sleeping. Therefore we have to do a short sleep * here rather than calling cond_resched(). */ if (current->flags & PF_WQ_WORKER) schedule_timeout_uninterruptible(1); else cond_resched(); return ret; } static inline bool check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) { /* * It's possible that cpuset's mems_allowed and the nodemask from * mempolicy don't intersect. This should be normally dealt with by * policy_nodemask(), but it's possible to race with cpuset update in * such a way the check therein was true, and then it became false * before we got our cpuset_mems_cookie here. * This assumes that for all allocations, ac->nodemask can come only * from MPOL_BIND mempolicy (whose documented semantics is to be ignored * when it does not intersect with the cpuset restrictions) or the * caller can deal with a violated nodemask. */ if (cpusets_enabled() && ac->nodemask && !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { ac->nodemask = NULL; return true; } /* * When updating a task's mems_allowed or mempolicy nodemask, it is * possible to race with parallel threads in such a way that our * allocation can fail while the mask is being updated. If we are about * to fail, check if the cpuset changed during allocation and if so, * retry. */ if (read_mems_allowed_retry(cpuset_mems_cookie)) return true; return false; } static inline struct page * __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) { bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; struct page *page = NULL; unsigned int alloc_flags; unsigned long did_some_progress; enum compact_priority compact_priority; enum compact_result compact_result; int compaction_retries; int no_progress_loops; unsigned int cpuset_mems_cookie; unsigned int zonelist_iter_cookie; int reserve_flags; restart: compaction_retries = 0; no_progress_loops = 0; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); zonelist_iter_cookie = zonelist_iter_begin(); /* * The fast path uses conservative alloc_flags to succeed only until * kswapd needs to be woken up, and to avoid the cost of setting up * alloc_flags precisely. So we do that now. */ alloc_flags = gfp_to_alloc_flags(gfp_mask, order); /* * We need to recalculate the starting point for the zonelist iterator * because we might have used different nodemask in the fast path, or * there was a cpuset modification and we are retrying - otherwise we * could end up iterating over non-eligible zones endlessly. */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); if (!ac->preferred_zoneref->zone) goto nopage; /* * Check for insane configurations where the cpuset doesn't contain * any suitable zone to satisfy the request - e.g. non-movable * GFP_HIGHUSER allocations from MOVABLE nodes only. */ if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { struct zoneref *z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, &cpuset_current_mems_allowed); if (!z->zone) goto nopage; } if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); /* * The adjusted alloc_flags might result in immediate success, so try * that first */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); if (page) goto got_pg; /* * For costly allocations, try direct compaction first, as it's likely * that we have enough base pages and don't need to reclaim. For non- * movable high-order allocations, do that as well, as compaction will * try prevent permanent fragmentation by migrating from blocks of the * same migratetype. * Don't try this for allocations that are allowed to ignore * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. */ if (can_direct_reclaim && (costly_order || (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) && !gfp_pfmemalloc_allowed(gfp_mask)) { page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, INIT_COMPACT_PRIORITY, &compact_result); if (page) goto got_pg; /* * Checks for costly allocations with __GFP_NORETRY, which * includes some THP page fault allocations */ if (costly_order && (gfp_mask & __GFP_NORETRY)) { /* * If allocating entire pageblock(s) and compaction * failed because all zones are below low watermarks * or is prohibited because it recently failed at this * order, fail immediately unless the allocator has * requested compaction and reclaim retry. * * Reclaim is * - potentially very expensive because zones are far * below their low watermarks or this is part of very * bursty high order allocations, * - not guaranteed to help because isolate_freepages() * may not iterate over freed pages as part of its * linear scan, and * - unlikely to make entire pageblocks free on its * own. */ if (compact_result == COMPACT_SKIPPED || compact_result == COMPACT_DEFERRED) goto nopage; /* * Looks like reclaim/compaction is worth trying, but * sync compaction could be very expensive, so keep * using async compaction. */ compact_priority = INIT_COMPACT_PRIORITY; } } retry: /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); if (reserve_flags) alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | (alloc_flags & ALLOC_KSWAPD); /* * Reset the nodemask and zonelist iterators if memory policies can be * ignored. These allocations are high priority and system rather than * user oriented. */ if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { ac->nodemask = NULL; ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); } /* Attempt with potentially adjusted zonelist and alloc_flags */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); if (page) goto got_pg; /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) goto nopage; /* Avoid recursion of direct reclaim */ if (current->flags & PF_MEMALLOC) goto nopage; /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); if (page) goto got_pg; /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, compact_priority, &compact_result); if (page) goto got_pg; /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) goto nopage; /* * Do not retry costly high order allocations unless they are * __GFP_RETRY_MAYFAIL */ if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) goto nopage; if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, did_some_progress > 0, &no_progress_loops)) goto retry; /* * It doesn't make any sense to retry for the compaction if the order-0 * reclaim is not able to make any progress because the current * implementation of the compaction depends on the sufficient amount * of free memory (see __compaction_suitable) */ if (did_some_progress > 0 && should_compact_retry(ac, order, alloc_flags, compact_result, &compact_priority, &compaction_retries)) goto retry; /* * Deal with possible cpuset update races or zonelist updates to avoid * a unnecessary OOM kill. */ if (check_retry_cpuset(cpuset_mems_cookie, ac) || check_retry_zonelist(zonelist_iter_cookie)) goto restart; /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); if (page) goto got_pg; /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && (alloc_flags & ALLOC_OOM || (gfp_mask & __GFP_NOMEMALLOC))) goto nopage; /* Retry as long as the OOM killer is making progress */ if (did_some_progress) { no_progress_loops = 0; goto retry; } nopage: /* * Deal with possible cpuset update races or zonelist updates to avoid * a unnecessary OOM kill. */ if (check_retry_cpuset(cpuset_mems_cookie, ac) || check_retry_zonelist(zonelist_iter_cookie)) goto restart; /* * Make sure that __GFP_NOFAIL request doesn't leak out and make sure * we always retry */ if (gfp_mask & __GFP_NOFAIL) { /* * All existing users of the __GFP_NOFAIL are blockable, so warn * of any new users that actually require GFP_NOWAIT */ if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) goto fail; /* * PF_MEMALLOC request from this context is rather bizarre * because we cannot reclaim anything and only can loop waiting * for somebody to do a work for us */ WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); /* * non failing costly orders are a hard requirement which we * are not prepared for much so let's warn about these users * so that we can identify them and convert them to something * else. */ WARN_ON_ONCE_GFP(costly_order, gfp_mask); /* * Help non-failing allocations by giving some access to memory * reserves normally used for high priority non-blocking * allocations but do not use ALLOC_NO_WATERMARKS because this * could deplete whole memory reserves which would just make * the situation worse. */ page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); if (page) goto got_pg; cond_resched(); goto retry; } fail: warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: return page; } static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) { ac->highest_zoneidx = gfp_zone(gfp_mask); ac->zonelist = node_zonelist(preferred_nid, gfp_mask); ac->nodemask = nodemask; ac->migratetype = gfp_migratetype(gfp_mask); if (cpusets_enabled()) { *alloc_gfp |= __GFP_HARDWALL; /* * When we are in the interrupt context, it is irrelevant * to the current task context. It means that any node ok. */ if (in_task() && !ac->nodemask) ac->nodemask = &cpuset_current_mems_allowed; else *alloc_flags |= ALLOC_CPUSET; } might_alloc(gfp_mask); if (should_fail_alloc_page(gfp_mask, order)) return false; *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); /* Dirty zone balancing only done in the fast path */ ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); /* * The preferred zone is used for statistics but crucially it is * also used as the starting point for the zonelist iterator. It * may get reset for allocations that ignore memory policies. */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); return true; } /* * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array * @gfp: GFP flags for the allocation * @preferred_nid: The preferred NUMA node ID to allocate from * @nodemask: Set of nodes to allocate from, may be NULL * @nr_pages: The number of pages desired on the list or array * @page_list: Optional list to store the allocated pages * @page_array: Optional array to store the pages * * This is a batched version of the page allocator that attempts to * allocate nr_pages quickly. Pages are added to page_list if page_list * is not NULL, otherwise it is assumed that the page_array is valid. * * For lists, nr_pages is the number of pages that should be allocated. * * For arrays, only NULL elements are populated with pages and nr_pages * is the maximum number of pages that will be stored in the array. * * Returns the number of pages on the list or array. */ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array) { struct page *page; unsigned long __maybe_unused UP_flags; struct zone *zone; struct zoneref *z; struct per_cpu_pages *pcp; struct list_head *pcp_list; struct alloc_context ac; gfp_t alloc_gfp; unsigned int alloc_flags = ALLOC_WMARK_LOW; int nr_populated = 0, nr_account = 0; /* * Skip populated array elements to determine if any pages need * to be allocated before disabling IRQs. */ while (page_array && nr_populated < nr_pages && page_array[nr_populated]) nr_populated++; /* No pages requested? */ if (unlikely(nr_pages <= 0)) goto out; /* Already populated array? */ if (unlikely(page_array && nr_pages - nr_populated == 0)) goto out; /* Bulk allocator does not support memcg accounting. */ if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) goto failed; /* Use the single page allocator for one page. */ if (nr_pages - nr_populated == 1) goto failed; #ifdef CONFIG_PAGE_OWNER /* * PAGE_OWNER may recurse into the allocator to allocate space to * save the stack with pagesets.lock held. Releasing/reacquiring * removes much of the performance benefit of bulk allocation so * force the caller to allocate one page at a time as it'll have * similar performance to added complexity to the bulk allocator. */ if (static_branch_unlikely(&page_owner_inited)) goto failed; #endif /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ gfp &= gfp_allowed_mask; alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) goto out; gfp = alloc_gfp; /* Find an allowed local zone that meets the low watermark. */ for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { unsigned long mark; if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && !__cpuset_zone_allowed(zone, gfp)) { continue; } if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { goto failed; } mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; if (zone_watermark_fast(zone, 0, mark, zonelist_zone_idx(ac.preferred_zoneref), alloc_flags, gfp)) { break; } } /* * If there are no allowed local zones that meets the watermarks then * try to allocate a single page and reclaim if necessary. */ if (unlikely(!zone)) goto failed; /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (!pcp) goto failed_irq; /* Attempt the batch allocation */ pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; while (nr_populated < nr_pages) { /* Skip existing pages */ if (page_array && page_array[nr_populated]) { nr_populated++; continue; } page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ if (!nr_account) { pcp_spin_unlock(pcp); goto failed_irq; } break; } nr_account++; prep_new_page(page, 0, gfp, 0); if (page_list) list_add(&page->lru, page_list); else page_array[nr_populated] = page; nr_populated++; } pcp_spin_unlock(pcp); pcp_trylock_finish(UP_flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); out: return nr_populated; failed_irq: pcp_trylock_finish(UP_flags); failed: page = __alloc_pages(gfp, 0, preferred_nid, nodemask); if (page) { if (page_list) list_add(&page->lru, page_list); else page_array[nr_populated] = page; nr_populated++; } goto out; } EXPORT_SYMBOL_GPL(__alloc_pages_bulk); /* * This is the 'heart' of the zoned buddy allocator. */ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ struct alloc_context ac = { }; /* * There are several places where we assume that the order value is sane * so bail out early if the request is out of bound. */ if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) return NULL; gfp &= gfp_allowed_mask; /* * Apply scoped allocation constraints. This is mainly about GFP_NOFS * resp. GFP_NOIO which has to be inherited for all allocation requests * from a particular context which has been marked by * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures * movable zones are not used during allocation. */ gfp = current_gfp_context(gfp); alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) return NULL; /* * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) goto out; alloc_gfp = gfp; ac.spread_dirty_pages = false; /* * Restore the original nodemask if it was potentially replaced with * &cpuset_current_mems_allowed to optimize the fast-path attempt. */ ac.nodemask = nodemask; page = __alloc_pages_slowpath(alloc_gfp, order, &ac); out: if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { __free_pages(page, order); page = NULL; } trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); kmsan_alloc_page(page, order, alloc_gfp); return page; } EXPORT_SYMBOL(__alloc_pages); struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { struct page *page = __alloc_pages(gfp | __GFP_COMP, order, preferred_nid, nodemask); struct folio *folio = (struct folio *)page; if (folio && order > 1) folio_prep_large_rmappable(folio); return folio; } EXPORT_SYMBOL(__folio_alloc); /* * Common helper functions. Never use with __GFP_HIGHMEM because the returned * address cannot represent highmem pages. Use alloc_pages and then kmap if * you need to access high mem. */ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page *page; page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); if (!page) return 0; return (unsigned long) page_address(page); } EXPORT_SYMBOL(__get_free_pages); unsigned long get_zeroed_page(gfp_t gfp_mask) { return __get_free_page(gfp_mask | __GFP_ZERO); } EXPORT_SYMBOL(get_zeroed_page); /** * __free_pages - Free pages allocated with alloc_pages(). * @page: The page pointer returned from alloc_pages(). * @order: The order of the allocation. * * This function can free multi-page allocations that are not compound * pages. It does not check that the @order passed in matches that of * the allocation, so it is easy to leak memory. Freeing more memory * than was allocated will probably emit a warning. * * If the last reference to this page is speculative, it will be released * by put_page() which only frees the first page of a non-compound * allocation. To prevent the remaining pages from being leaked, we free * the subsequent pages here. If you want to use the page's reference * count to decide when to free the allocation, you should allocate a * compound page, and use put_page() instead of __free_pages(). * * Context: May be called in interrupt context or while holding a normal * spinlock, but not in NMI context or while holding a raw spinlock. */ void __free_pages(struct page *page, unsigned int order) { /* get PageHead before we drop reference */ int head = PageHead(page); if (put_page_testzero(page)) free_the_page(page, order); else if (!head) while (order-- > 0) free_the_page(page + (1 << order), order); } EXPORT_SYMBOL(__free_pages); void free_pages(unsigned long addr, unsigned int order) { if (addr != 0) { VM_BUG_ON(!virt_addr_valid((void *)addr)); __free_pages(virt_to_page((void *)addr), order); } } EXPORT_SYMBOL(free_pages); /* * Page Fragment: * An arbitrary-length arbitrary-offset area of memory which resides * within a 0 or higher order page. Multiple fragments within that page * are individually refcounted, in the page's reference counter. * * The page_frag functions below provide a simple allocation framework for * page fragments. This is used by the network stack and network device * drivers to provide a backing region of memory for use as either an * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. */ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) { struct page *page = NULL; gfp_t gfp = gfp_mask; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC; page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER); nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; #endif if (unlikely(!page)) page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); nc->va = page ? page_address(page) : NULL; return page; } void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); if (page_ref_sub_and_test(page, count)) free_the_page(page, compound_order(page)); } EXPORT_SYMBOL(__page_frag_cache_drain); void *page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) { unsigned int size = PAGE_SIZE; struct page *page; int offset; if (unlikely(!nc->va)) { refill: page = __page_frag_cache_refill(nc, gfp_mask); if (!page) return NULL; #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) /* if size can vary use size else just use PAGE_SIZE */ size = nc->size; #endif /* Even if we own the page, we do not use atomic_set(). * This would break get_page_unless_zero() users. */ page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); /* reset page count bias and offset to start of new frag */ nc->pfmemalloc = page_is_pfmemalloc(page); nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; nc->offset = size; } offset = nc->offset - fragsz; if (unlikely(offset < 0)) { page = virt_to_page(nc->va); if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) goto refill; if (unlikely(nc->pfmemalloc)) { free_the_page(page, compound_order(page)); goto refill; } #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) /* if size can vary use size else just use PAGE_SIZE */ size = nc->size; #endif /* OK, page count is 0, we can safely set it */ set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); /* reset page count bias and offset to start of new frag */ nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; offset = size - fragsz; if (unlikely(offset < 0)) { /* * The caller is trying to allocate a fragment * with fragsz > PAGE_SIZE but the cache isn't big * enough to satisfy the request, this may * happen in low memory conditions. * We don't release the cache page because * it could make memory pressure worse * so we simply return NULL here. */ return NULL; } } nc->pagecnt_bias--; offset &= align_mask; nc->offset = offset; return nc->va + offset; } EXPORT_SYMBOL(page_frag_alloc_align); /* * Frees a page fragment allocated out of either a compound or order 0 page. */ void page_frag_free(void *addr) { struct page *page = virt_to_head_page(addr); if (unlikely(put_page_testzero(page))) free_the_page(page, compound_order(page)); } EXPORT_SYMBOL(page_frag_free); static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { if (addr) { unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); struct page *page = virt_to_page((void *)addr); struct page *last = page + nr; split_page_owner(page, 1 << order); split_page_memcg(page, 1 << order); while (page < --last) set_page_refcounted(last); last = page + (1UL << order); for (page += nr; page < last; page++) __free_pages_ok(page, 0, FPI_TO_TAIL); } return (void *)addr; } /** * alloc_pages_exact - allocate an exact number physically-contiguous pages. * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP * * This function is similar to alloc_pages(), except that it allocates the * minimum number of pages to satisfy the request. alloc_pages() can only * allocate memory in power-of-two pages. * * This function is also limited by MAX_ORDER. * * Memory allocated by this function must be released by free_pages_exact(). * * Return: pointer to the allocated area or %NULL in case of error. */ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); unsigned long addr; if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); addr = __get_free_pages(gfp_mask, order); return make_alloc_exact(addr, order, size); } EXPORT_SYMBOL(alloc_pages_exact); /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous * pages on a node. * @nid: the preferred node ID where memory should be allocated * @size: the number of bytes to allocate * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP * * Like alloc_pages_exact(), but try to allocate on node nid first before falling * back. * * Return: pointer to the allocated area or %NULL in case of error. */ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); struct page *p; if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); p = alloc_pages_node(nid, gfp_mask, order); if (!p) return NULL; return make_alloc_exact((unsigned long)page_address(p), order, size); } /** * free_pages_exact - release memory allocated via alloc_pages_exact() * @virt: the value returned by alloc_pages_exact. * @size: size of allocation, same value as passed to alloc_pages_exact(). * * Release the memory allocated by a previous call to alloc_pages_exact. */ void free_pages_exact(void *virt, size_t size) { unsigned long addr = (unsigned long)virt; unsigned long end = addr + PAGE_ALIGN(size); while (addr < end) { free_page(addr); addr += PAGE_SIZE; } } EXPORT_SYMBOL(free_pages_exact); /** * nr_free_zone_pages - count number of pages beyond high watermark * @offset: The zone index of the highest zone * * nr_free_zone_pages() counts the number of pages which are beyond the * high watermark within all zones at or below a given zone index. For each * zone, the number of pages is calculated as: * * nr_free_zone_pages = managed_pages - high_pages * * Return: number of pages beyond high watermark. */ static unsigned long nr_free_zone_pages(int offset) { struct zoneref *z; struct zone *zone; /* Just pick one node, since fallback list is circular */ unsigned long sum = 0; struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); for_each_zone_zonelist(zone, z, zonelist, offset) { unsigned long size = zone_managed_pages(zone); unsigned long high = high_wmark_pages(zone); if (size > high) sum += size - high; } return sum; } /** * nr_free_buffer_pages - count number of pages beyond high watermark * * nr_free_buffer_pages() counts the number of pages which are beyond the high * watermark within ZONE_DMA and ZONE_NORMAL. * * Return: number of pages beyond high watermark within ZONE_DMA and * ZONE_NORMAL. */ unsigned long nr_free_buffer_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_USER)); } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) { zoneref->zone = zone; zoneref->zone_idx = zone_idx(zone); } /* * Builds allocation fallback zone lists. * * Add all populated zones of a node to the zonelist. */ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) { struct zone *zone; enum zone_type zone_type = MAX_NR_ZONES; int nr_zones = 0; do { zone_type--; zone = pgdat->node_zones + zone_type; if (populated_zone(zone)) { zoneref_set_zone(zone, &zonerefs[nr_zones++]); check_highest_zone(zone_type); } } while (zone_type); return nr_zones; } #ifdef CONFIG_NUMA static int __parse_numa_zonelist_order(char *s) { /* * We used to support different zonelists modes but they turned * out to be just not useful. Let's keep the warning in place * if somebody still use the cmd line parameter so that we do * not fail it silently */ if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); return -EINVAL; } return 0; } static char numa_zonelist_order[] = "Node"; #define NUMA_ZONELIST_ORDER_LEN 16 /* * sysctl handler for numa_zonelist_order */ static int numa_zonelist_order_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { if (write) return __parse_numa_zonelist_order(buffer); return proc_dostring(table, write, buffer, length, ppos); } static int node_load[MAX_NUMNODES]; /** * find_next_best_node - find the next node that should appear in a given node's fallback list * @node: node whose fallback list we're appending * @used_node_mask: nodemask_t of already used nodes * * We use a number of factors to determine which is the next node that should * appear on a given node's fallback list. The node should not have appeared * already in @node's fallback list, and it should be the next closest node * according to the distance array (which contains arbitrary distance values * from each node to each node in the system), and should also prefer nodes * with no CPUs, since presumably they'll have very little allocation pressure * on them otherwise. * * Return: node id of the found node or %NUMA_NO_NODE if no node is found. */ int find_next_best_node(int node, nodemask_t *used_node_mask) { int n, val; int min_val = INT_MAX; int best_node = NUMA_NO_NODE; /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { node_set(node, *used_node_mask); return node; } for_each_node_state(n, N_MEMORY) { /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) continue; /* Use the distance array to find the distance */ val = node_distance(node, n); /* Penalize nodes under us ("prefer the next node") */ val += (n < node); /* Give preference to headless and unused nodes */ if (!cpumask_empty(cpumask_of_node(n))) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ val *= MAX_NUMNODES; val += node_load[n]; if (val < min_val) { min_val = val; best_node = n; } } if (best_node >= 0) node_set(best_node, *used_node_mask); return best_node; } /* * Build zonelists ordered by node and zones within node. * This results in maximum locality--normal zone overflows into local * DMA zone, if any--but risks exhausting DMA zone. */ static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, unsigned nr_nodes) { struct zoneref *zonerefs; int i; zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; for (i = 0; i < nr_nodes; i++) { int nr_zones; pg_data_t *node = NODE_DATA(node_order[i]); nr_zones = build_zonerefs_node(node, zonerefs); zonerefs += nr_zones; } zonerefs->zone = NULL; zonerefs->zone_idx = 0; } /* * Build gfp_thisnode zonelists */ static void build_thisnode_zonelists(pg_data_t *pgdat) { struct zoneref *zonerefs; int nr_zones; zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; nr_zones = build_zonerefs_node(pgdat, zonerefs); zonerefs += nr_zones; zonerefs->zone = NULL; zonerefs->zone_idx = 0; } /* * Build zonelists ordered by zone and nodes within zones. * This results in conserving DMA zone[s] until all Normal memory is * exhausted, but results in overflowing to remote node while memory * may still exist in local DMA zone. */ static void build_zonelists(pg_data_t *pgdat) { static int node_order[MAX_NUMNODES]; int node, nr_nodes = 0; nodemask_t used_mask = NODE_MASK_NONE; int local_node, prev_node; /* NUMA-aware ordering of nodes */ local_node = pgdat->node_id; prev_node = local_node; memset(node_order, 0, sizeof(node_order)); while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { /* * We don't want to pressure a particular node. * So adding penalty to the first node in same * distance group to make it round-robin. */ if (node_distance(local_node, node) != node_distance(local_node, prev_node)) node_load[node] += 1; node_order[nr_nodes++] = node; prev_node = node; } build_zonelists_in_node_order(pgdat, node_order, nr_nodes); build_thisnode_zonelists(pgdat); pr_info("Fallback order for Node %d: ", local_node); for (node = 0; node < nr_nodes; node++) pr_cont("%d ", node_order[node]); pr_cont("\n"); } #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * Return node id of node used for "local" allocations. * I.e., first node id of first zone in arg node's generic zonelist. * Used for initializing percpu 'numa_mem', which is used primarily * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. */ int local_memory_node(int node) { struct zoneref *z; z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL); return zone_to_nid(z->zone); } #endif static void setup_min_unmapped_ratio(void); static void setup_min_slab_ratio(void); #else /* CONFIG_NUMA */ static void build_zonelists(pg_data_t *pgdat) { int node, local_node; struct zoneref *zonerefs; int nr_zones; local_node = pgdat->node_id; zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; nr_zones = build_zonerefs_node(pgdat, zonerefs); zonerefs += nr_zones; /* * Now we build the zonelist so that it contains the zones * of all the other nodes. * We don't want to pressure a particular node, so when * building the zones for node N, we make sure that the * zones coming right after the local ones are those from * node N+1 (modulo N) */ for (node = local_node + 1; node < MAX_NUMNODES; node++) { if (!node_online(node)) continue; nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); zonerefs += nr_zones; } for (node = 0; node < local_node; node++) { if (!node_online(node)) continue; nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); zonerefs += nr_zones; } zonerefs->zone = NULL; zonerefs->zone_idx = 0; } #endif /* CONFIG_NUMA */ /* * Boot pageset table. One per cpu which is going to be used for all * zones and all nodes. The parameters will be set in such a way * that an item put on a list will immediately be handed over to * the buddy list. This is safe since pageset manipulation is done * with interrupts disabled. * * The boot_pagesets must be kept even after bootup is complete for * unused processors and/or zones. They do play a role for bootstrapping * hotplugged processors. * * zoneinfo_show() and maybe other functions do * not check if the processor is online before following the pageset pointer. * Other parts of the kernel may not check if the zone is available. */ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); /* These effectively disable the pcplists in the boot pageset completely */ #define BOOT_PAGESET_HIGH 0 #define BOOT_PAGESET_BATCH 1 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); static void __build_all_zonelists(void *data) { int nid; int __maybe_unused cpu; pg_data_t *self = data; unsigned long flags; /* * The zonelist_update_seq must be acquired with irqsave because the * reader can be invoked from IRQ with GFP_ATOMIC. */ write_seqlock_irqsave(&zonelist_update_seq, flags); /* * Also disable synchronous printk() to prevent any printk() from * trying to hold port->lock, for * tty_insert_flip_string_and_push_buffer() on other CPU might be * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. */ printk_deferred_enter(); #ifdef CONFIG_NUMA memset(node_load, 0, sizeof(node_load)); #endif /* * This node is hotadded and no memory is yet present. So just * building zonelists is fine - no need to touch other nodes. */ if (self && !node_online(self->node_id)) { build_zonelists(self); } else { /* * All possible nodes have pgdat preallocated * in free_area_init */ for_each_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); build_zonelists(pgdat); } #ifdef CONFIG_HAVE_MEMORYLESS_NODES /* * We now know the "local memory node" for each node-- * i.e., the node of the first zone in the generic zonelist. * Set up numa_mem percpu variable for on-line cpus. During * boot, only the boot cpu should be on-line; we'll init the * secondary cpus' numa_mem as they come on-line. During * node/memory hotplug, we'll fixup all on-line cpus. */ for_each_online_cpu(cpu) set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); #endif } printk_deferred_exit(); write_sequnlock_irqrestore(&zonelist_update_seq, flags); } static noinline void __init build_all_zonelists_init(void) { int cpu; __build_all_zonelists(NULL); /* * Initialize the boot_pagesets that are going to be used * for bootstrapping processors. The real pagesets for * each zone will be allocated later when the per cpu * allocator is available. * * boot_pagesets are used also for bootstrapping offline * cpus if the system is already booted because the pagesets * are needed to initialize allocators on a specific cpu too. * F.e. the percpu allocator needs the page allocator which * needs the percpu allocator in order to allocate its pagesets * (a chicken-egg dilemma). */ for_each_possible_cpu(cpu) per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); mminit_verify_zonelist(); cpuset_init_current_mems_allowed(); } /* * unless system_state == SYSTEM_BOOTING. * * __ref due to call of __init annotated helper build_all_zonelists_init * [protected by SYSTEM_BOOTING]. */ void __ref build_all_zonelists(pg_data_t *pgdat) { unsigned long vm_total_pages; if (system_state == SYSTEM_BOOTING) { build_all_zonelists_init(); } else { __build_all_zonelists(pgdat); /* cpuset refresh routine should be here */ } /* Get the number of free pages beyond high watermark in all zones. */ vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); /* * Disable grouping by mobility if the number of pages in the * system is too low to allow the mechanism to work. It would be * more accurate, but expensive to check per-zone. This check is * made on memory-hotadd so a system can start with mobility * disabled and enable it later */ if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) page_group_by_mobility_disabled = 1; else page_group_by_mobility_disabled = 0; pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", nr_online_nodes, page_group_by_mobility_disabled ? "off" : "on", vm_total_pages); #ifdef CONFIG_NUMA pr_info("Policy zone: %s\n", zone_names[policy_zone]); #endif } static int zone_batchsize(struct zone *zone) { #ifdef CONFIG_MMU int batch; /* * The number of pages to batch allocate is either ~0.1% * of the zone or 1MB, whichever is smaller. The batch * size is striking a balance between allocation latency * and zone lock contention. */ batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); batch /= 4; /* We effectively *= 4 below */ if (batch < 1) batch = 1; /* * Clamp the batch to a 2^n - 1 value. Having a power * of 2 value was found to be more likely to have * suboptimal cache aliasing properties in some cases. * * For example if 2 tasks are alternately allocating * batches of pages, one task can end up with a lot * of pages of one half of the possible page colors * and the other with pages of the other colors. */ batch = rounddown_pow_of_two(batch + batch/2) - 1; return batch; #else /* The deferral and batching of frees should be suppressed under NOMMU * conditions. * * The problem is that NOMMU needs to be able to allocate large chunks * of contiguous memory as there's no hardware page translation to * assemble apparent contiguous memory from discontiguous pages. * * Queueing large contiguous runs of pages for batching, however, * causes the pages to actually be freed in smaller chunks. As there * can be a significant delay between the individual batches being * recycled, this leads to the once large chunks of space being * fragmented and becoming unavailable for high-order allocations. */ return 0; #endif } static int percpu_pagelist_high_fraction; static int zone_highsize(struct zone *zone, int batch, int cpu_online) { #ifdef CONFIG_MMU int high; int nr_split_cpus; unsigned long total_pages; if (!percpu_pagelist_high_fraction) { /* * By default, the high value of the pcp is based on the zone * low watermark so that if they are full then background * reclaim will not be started prematurely. */ total_pages = low_wmark_pages(zone); } else { /* * If percpu_pagelist_high_fraction is configured, the high * value is based on a fraction of the managed pages in the * zone. */ total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; } /* * Split the high value across all online CPUs local to the zone. Note * that early in boot that CPUs may not be online yet and that during * CPU hotplug that the cpumask is not yet updated when a CPU is being * onlined. For memory nodes that have no CPUs, split pcp->high across * all online CPUs to mitigate the risk that reclaim is triggered * prematurely due to pages stored on pcp lists. */ nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; if (!nr_split_cpus) nr_split_cpus = num_online_cpus(); high = total_pages / nr_split_cpus; /* * Ensure high is at least batch*4. The multiple is based on the * historical relationship between high and batch. */ high = max(high, batch << 2); return high; #else return 0; #endif } /* * pcp->high and pcp->batch values are related and generally batch is lower * than high. They are also related to pcp->count such that count is lower * than high, and as soon as it reaches high, the pcplist is flushed. * * However, guaranteeing these relations at all times would require e.g. write * barriers here but also careful usage of read barriers at the read side, and * thus be prone to error and bad for performance. Thus the update only prevents * store tearing. Any new users of pcp->batch and pcp->high should ensure they * can cope with those fields changing asynchronously, and fully trust only the * pcp->count field on the local CPU with interrupts disabled. * * mutex_is_locked(&pcp_batch_high_lock) required when calling this function * outside of boot time (or some other assurance that no concurrent updaters * exist). */ static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, unsigned long batch) { WRITE_ONCE(pcp->batch, batch); WRITE_ONCE(pcp->high, high); } static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) { int pindex; memset(pcp, 0, sizeof(*pcp)); memset(pzstats, 0, sizeof(*pzstats)); spin_lock_init(&pcp->lock); for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) INIT_LIST_HEAD(&pcp->lists[pindex]); /* * Set batch and high values safe for a boot pageset. A true percpu * pageset's initialization will update them subsequently. Here we don't * need to be as careful as pageset_update() as nobody can access the * pageset yet. */ pcp->high = BOOT_PAGESET_HIGH; pcp->batch = BOOT_PAGESET_BATCH; pcp->free_factor = 0; } static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, unsigned long batch) { struct per_cpu_pages *pcp; int cpu; for_each_possible_cpu(cpu) { pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); pageset_update(pcp, high, batch); } } /* * Calculate and set new high and batch values for all per-cpu pagesets of a * zone based on the zone's size. */ static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) { int new_high, new_batch; new_batch = max(1, zone_batchsize(zone)); new_high = zone_highsize(zone, new_batch, cpu_online); if (zone->pageset_high == new_high && zone->pageset_batch == new_batch) return; zone->pageset_high = new_high; zone->pageset_batch = new_batch; __zone_set_pageset_high_and_batch(zone, new_high, new_batch); } void __meminit setup_zone_pageset(struct zone *zone) { int cpu; /* Size may be 0 on !SMP && !NUMA */ if (sizeof(struct per_cpu_zonestat) > 0) zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); for_each_possible_cpu(cpu) { struct per_cpu_pages *pcp; struct per_cpu_zonestat *pzstats; pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); per_cpu_pages_init(pcp, pzstats); } zone_set_pageset_high_and_batch(zone, 0); } /* * The zone indicated has a new number of managed_pages; batch sizes and percpu * page high values need to be recalculated. */ static void zone_pcp_update(struct zone *zone, int cpu_online) { mutex_lock(&pcp_batch_high_lock); zone_set_pageset_high_and_batch(zone, cpu_online); mutex_unlock(&pcp_batch_high_lock); } /* * Allocate per cpu pagesets and initialize them. * Before this call only boot pagesets were available. */ void __init setup_per_cpu_pageset(void) { struct pglist_data *pgdat; struct zone *zone; int __maybe_unused cpu; for_each_populated_zone(zone) setup_zone_pageset(zone); #ifdef CONFIG_NUMA /* * Unpopulated zones continue using the boot pagesets. * The numa stats for these pagesets need to be reset. * Otherwise, they will end up skewing the stats of * the nodes these zones are associated with. */ for_each_possible_cpu(cpu) { struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); memset(pzstats->vm_numa_event, 0, sizeof(pzstats->vm_numa_event)); } #endif for_each_online_pgdat(pgdat) pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); } __meminit void zone_pcp_init(struct zone *zone) { /* * per cpu subsystem is not up at this point. The following code * relies on the ability of the linker to provide the * offset of a (static) per cpu variable into the per cpu area. */ zone->per_cpu_pageset = &boot_pageset; zone->per_cpu_zonestats = &boot_zonestats; zone->pageset_high = BOOT_PAGESET_HIGH; zone->pageset_batch = BOOT_PAGESET_BATCH; if (populated_zone(zone)) pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, zone->present_pages, zone_batchsize(zone)); } void adjust_managed_page_count(struct page *page, long count) { atomic_long_add(count, &page_zone(page)->managed_pages); totalram_pages_add(count); #ifdef CONFIG_HIGHMEM if (PageHighMem(page)) totalhigh_pages_add(count); #endif } EXPORT_SYMBOL(adjust_managed_page_count); unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) { void *pos; unsigned long pages = 0; start = (void *)PAGE_ALIGN((unsigned long)start); end = (void *)((unsigned long)end & PAGE_MASK); for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { struct page *page = virt_to_page(pos); void *direct_map_addr; /* * 'direct_map_addr' might be different from 'pos' * because some architectures' virt_to_page() * work with aliases. Getting the direct map * address ensures that we get a _writeable_ * alias for the memset(). */ direct_map_addr = page_address(page); /* * Perform a kasan-unchecked memset() since this memory * has not been initialized. */ direct_map_addr = kasan_reset_tag(direct_map_addr); if ((unsigned int)poison <= 0xFF) memset(direct_map_addr, poison, PAGE_SIZE); free_reserved_page(page); } if (pages && s) pr_info("Freeing %s memory: %ldK\n", s, K(pages)); return pages; } static int page_alloc_cpu_dead(unsigned int cpu) { struct zone *zone; lru_add_drain_cpu(cpu); mlock_drain_remote(cpu); drain_pages(cpu); /* * Spill the event counters of the dead processor * into the current processors event counters. * This artificially elevates the count of the current * processor. */ vm_events_fold_cpu(cpu); /* * Zero the differential counters of the dead processor * so that the vm statistics are consistent. * * This is only okay since the processor is dead and cannot * race with what we are doing. */ cpu_vm_stats_fold(cpu); for_each_populated_zone(zone) zone_pcp_update(zone, 0); return 0; } static int page_alloc_cpu_online(unsigned int cpu) { struct zone *zone; for_each_populated_zone(zone) zone_pcp_update(zone, 1); return 0; } void __init page_alloc_init_cpuhp(void) { int ret; ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, "mm/page_alloc:pcp", page_alloc_cpu_online, page_alloc_cpu_dead); WARN_ON(ret < 0); } /* * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio * or min_free_kbytes changes. */ static void calculate_totalreserve_pages(void) { struct pglist_data *pgdat; unsigned long reserve_pages = 0; enum zone_type i, j; for_each_online_pgdat(pgdat) { pgdat->totalreserve_pages = 0; for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; long max = 0; unsigned long managed_pages = zone_managed_pages(zone); /* Find valid and maximum lowmem_reserve in the zone */ for (j = i; j < MAX_NR_ZONES; j++) { if (zone->lowmem_reserve[j] > max) max = zone->lowmem_reserve[j]; } /* we treat the high watermark as reserved pages. */ max += high_wmark_pages(zone); if (max > managed_pages) max = managed_pages; pgdat->totalreserve_pages += max; reserve_pages += max; } } totalreserve_pages = reserve_pages; } /* * setup_per_zone_lowmem_reserve - called whenever * sysctl_lowmem_reserve_ratio changes. Ensures that each zone * has a correct pages reserved value, so an adequate number of * pages are left in the zone after a successful __alloc_pages(). */ static void setup_per_zone_lowmem_reserve(void) { struct pglist_data *pgdat; enum zone_type i, j; for_each_online_pgdat(pgdat) { for (i = 0; i < MAX_NR_ZONES - 1; i++) { struct zone *zone = &pgdat->node_zones[i]; int ratio = sysctl_lowmem_reserve_ratio[i]; bool clear = !ratio || !zone_managed_pages(zone); unsigned long managed_pages = 0; for (j = i + 1; j < MAX_NR_ZONES; j++) { struct zone *upper_zone = &pgdat->node_zones[j]; managed_pages += zone_managed_pages(upper_zone); if (clear) zone->lowmem_reserve[j] = 0; else zone->lowmem_reserve[j] = managed_pages / ratio; } } } /* update totalreserve_pages */ calculate_totalreserve_pages(); } static void __setup_per_zone_wmarks(void) { unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); unsigned long lowmem_pages = 0; struct zone *zone; unsigned long flags; /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ for_each_zone(zone) { if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) lowmem_pages += zone_managed_pages(zone); } for_each_zone(zone) { u64 tmp; spin_lock_irqsave(&zone->lock, flags); tmp = (u64)pages_min * zone_managed_pages(zone); do_div(tmp, lowmem_pages); if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { /* * __GFP_HIGH and PF_MEMALLOC allocations usually don't * need highmem and movable zones pages, so cap pages_min * to a small value here. * * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) * deltas control async page reclaim, and so should * not be capped for highmem and movable zones. */ unsigned long min_pages; min_pages = zone_managed_pages(zone) / 1024; min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); zone->_watermark[WMARK_MIN] = min_pages; } else { /* * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ zone->_watermark[WMARK_MIN] = tmp; } /* * Set the kswapd watermarks distance according to the * scale factor in proportion to available memory, but * ensure a minimum size on small systems. */ tmp = max_t(u64, tmp >> 2, mult_frac(zone_managed_pages(zone), watermark_scale_factor, 10000)); zone->watermark_boost = 0; zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; spin_unlock_irqrestore(&zone->lock, flags); } /* update totalreserve_pages */ calculate_totalreserve_pages(); } /** * setup_per_zone_wmarks - called when min_free_kbytes changes * or when memory is hot-{added|removed} * * Ensures that the watermark[min,low,high] values for each zone are set * correctly with respect to min_free_kbytes. */ void setup_per_zone_wmarks(void) { struct zone *zone; static DEFINE_SPINLOCK(lock); spin_lock(&lock); __setup_per_zone_wmarks(); spin_unlock(&lock); /* * The watermark size have changed so update the pcpu batch * and high limits or the limits may be inappropriate. */ for_each_zone(zone) zone_pcp_update(zone, 0); } /* * Initialise min_free_kbytes. * * For small machines we want it small (128k min). For large machines * we want it large (256MB max). But it is not linear, because network * bandwidth does not increase linearly with machine size. We use * * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: * min_free_kbytes = sqrt(lowmem_kbytes * 16) * * which yields * * 16MB: 512k * 32MB: 724k * 64MB: 1024k * 128MB: 1448k * 256MB: 2048k * 512MB: 2896k * 1024MB: 4096k * 2048MB: 5792k * 4096MB: 8192k * 8192MB: 11584k * 16384MB: 16384k */ void calculate_min_free_kbytes(void) { unsigned long lowmem_kbytes; int new_min_free_kbytes; lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); if (new_min_free_kbytes > user_min_free_kbytes) min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); else pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", new_min_free_kbytes, user_min_free_kbytes); } int __meminit init_per_zone_wmark_min(void) { calculate_min_free_kbytes(); setup_per_zone_wmarks(); refresh_zone_stat_thresholds(); setup_per_zone_lowmem_reserve(); #ifdef CONFIG_NUMA setup_min_unmapped_ratio(); setup_min_slab_ratio(); #endif khugepaged_min_free_kbytes_update(); return 0; } postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * that we can call two helper functions whenever min_free_kbytes * changes. */ static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; if (write) { user_min_free_kbytes = min_free_kbytes; setup_per_zone_wmarks(); } return 0; } static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; if (write) setup_per_zone_wmarks(); return 0; } #ifdef CONFIG_NUMA static void setup_min_unmapped_ratio(void) { pg_data_t *pgdat; struct zone *zone; for_each_online_pgdat(pgdat) pgdat->min_unmapped_pages = 0; for_each_zone(zone) zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * sysctl_min_unmapped_ratio) / 100; } static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; setup_min_unmapped_ratio(); return 0; } static void setup_min_slab_ratio(void) { pg_data_t *pgdat; struct zone *zone; for_each_online_pgdat(pgdat) pgdat->min_slab_pages = 0; for_each_zone(zone) zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * sysctl_min_slab_ratio) / 100; } static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; setup_min_slab_ratio(); return 0; } #endif /* * lowmem_reserve_ratio_sysctl_handler - just a wrapper around * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() * whenever sysctl_lowmem_reserve_ratio changes. * * The reserve ratio obviously has absolutely no relation with the * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int i; proc_dointvec_minmax(table, write, buffer, length, ppos); for (i = 0; i < MAX_NR_ZONES; i++) { if (sysctl_lowmem_reserve_ratio[i] < 1) sysctl_lowmem_reserve_ratio[i] = 0; } setup_per_zone_lowmem_reserve(); return 0; } /* * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each * cpu. It is the fraction of total pages in each zone that a hot per cpu * pagelist can have before it gets flushed back to buddy allocator. */ static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct zone *zone; int old_percpu_pagelist_high_fraction; int ret; mutex_lock(&pcp_batch_high_lock); old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); if (!write || ret < 0) goto out; /* Sanity checking to avoid pcp imbalance */ if (percpu_pagelist_high_fraction && percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; ret = -EINVAL; goto out; } /* No change? */ if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) goto out; for_each_populated_zone(zone) zone_set_pageset_high_and_batch(zone, 0); out: mutex_unlock(&pcp_batch_high_lock); return ret; } static struct ctl_table page_alloc_sysctl_table[] = { { .procname = "min_free_kbytes", .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = SYSCTL_ZERO, }, { .procname = "watermark_boost_factor", .data = &watermark_boost_factor, .maxlen = sizeof(watermark_boost_factor), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, { .procname = "watermark_scale_factor", .data = &watermark_scale_factor, .maxlen = sizeof(watermark_scale_factor), .mode = 0644, .proc_handler = watermark_scale_factor_sysctl_handler, .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_THREE_THOUSAND, }, { .procname = "percpu_pagelist_high_fraction", .data = &percpu_pagelist_high_fraction, .maxlen = sizeof(percpu_pagelist_high_fraction), .mode = 0644, .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, .extra1 = SYSCTL_ZERO, }, { .procname = "lowmem_reserve_ratio", .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, .proc_handler = numa_zonelist_order_handler, }, { .procname = "min_unmapped_ratio", .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "min_slab_ratio", .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, #endif {} }; void __init page_alloc_sysctl_init(void) { register_sysctl_init("vm", page_alloc_sysctl_table); } #ifdef CONFIG_CONTIG_ALLOC /* Usage: See admin-guide/dynamic-debug-howto.rst */ static void alloc_contig_dump_pages(struct list_head *page_list) { DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); if (DYNAMIC_DEBUG_BRANCH(descriptor)) { struct page *page; dump_stack(); list_for_each_entry(page, page_list, lru) dump_page(page, "migration failure"); } } /* [start, end) must belong to a single zone. */ int __alloc_contig_migrate_range(struct compact_control *cc, unsigned long start, unsigned long end) { /* This function is based on compact_zone() from compaction.c. */ unsigned int nr_reclaimed; unsigned long pfn = start; unsigned int tries = 0; int ret = 0; struct migration_target_control mtc = { .nid = zone_to_nid(cc->zone), .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, }; lru_cache_disable(); while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { ret = -EINTR; break; } if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; ret = isolate_migratepages_range(cc, pfn, end); if (ret && ret != -EAGAIN) break; pfn = cc->migrate_pfn; tries = 0; } else if (++tries == 5) { ret = -EBUSY; break; } nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, &cc->migratepages); cc->nr_migratepages -= nr_reclaimed; ret = migrate_pages(&cc->migratepages, alloc_migration_target, NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); /* * On -ENOMEM, migrate_pages() bails out right away. It is pointless * to retry again over this error, so do the same here. */ if (ret == -ENOMEM) break; } lru_cache_enable(); if (ret < 0) { if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) alloc_contig_dump_pages(&cc->migratepages); putback_movable_pages(&cc->migratepages); return ret; } return 0; } /** * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate * @end: one-past-the-last PFN to allocate * @migratetype: migratetype of the underlying pageblocks (either * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. * @gfp_mask: GFP mask to use during compaction * * The PFN range does not have to be pageblock aligned. The PFN range must * belong to a single zone. * * The first thing this routine does is attempt to MIGRATE_ISOLATE all * pageblocks in the range. Once isolated, the pageblocks should not * be modified by others. * * Return: zero on success or negative error code. On success all * pages which PFN is in [start, end) are allocated for the caller and * need to be freed with free_contig_range(). */ int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) { unsigned long outer_start, outer_end; int order; int ret = 0; struct compact_control cc = { .nr_migratepages = 0, .order = -1, .zone = page_zone(pfn_to_page(start)), .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .no_set_skip_hint = true, .gfp_mask = current_gfp_context(gfp_mask), .alloc_contig = true, }; INIT_LIST_HEAD(&cc.migratepages); /* * What we do here is we mark all pageblocks in range as * MIGRATE_ISOLATE. Because pageblock and max order pages may * have different sizes, and due to the way page allocator * work, start_isolate_page_range() has special handlings for this. * * Once the pageblocks are marked as MIGRATE_ISOLATE, we * migrate the pages from an unaligned range (ie. pages that * we are interested in). This will put all the pages in * range back to page allocator as MIGRATE_ISOLATE. * * When this is done, we take the pages in range from page * allocator removing them from the buddy system. This way * page allocator will never consider using them. * * This lets us mark the pageblocks back as * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the * aligned range but not in the unaligned, original range are * put back to page allocator so that buddy can use them. */ ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); if (ret) goto done; drain_all_pages(cc.zone); /* * In case of -EBUSY, we'd like to know which page causes problem. * So, just fall through. test_pages_isolated() has a tracepoint * which will report the busy page. * * It is possible that busy pages could become available before * the call to test_pages_isolated, and the range will actually be * allocated. So, if we fall through be sure to clear ret so that * -EBUSY is not accidentally used or returned to caller. */ ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY) goto done; ret = 0; /* * Pages from [start, end) are within a pageblock_nr_pages * aligned blocks that are marked as MIGRATE_ISOLATE. What's * more, all pages in [start, end) are free in page allocator. * What we are going to do is to allocate all pages from * [start, end) (that is remove them from page allocator). * * The only problem is that pages at the beginning and at the * end of interesting range may be not aligned with pages that * page allocator holds, ie. they can be part of higher order * pages. Because of this, we reserve the bigger range and * once this is done free the pages we are not interested in. * * We don't have to hold zone->lock here because the pages are * isolated thus they won't get removed from buddy. */ order = 0; outer_start = start; while (!PageBuddy(pfn_to_page(outer_start))) { if (++order > MAX_ORDER) { outer_start = start; break; } outer_start &= ~0UL << order; } if (outer_start != start) { order = buddy_order(pfn_to_page(outer_start)); /* * outer_start page could be small order buddy page and * it doesn't include start page. Adjust outer_start * in this case to report failed page properly * on tracepoint in test_pages_isolated() */ if (outer_start + (1UL << order) <= start) outer_start = start; } /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, 0)) { ret = -EBUSY; goto done; } /* Grab isolated pages from freelists. */ outer_end = isolate_freepages_range(&cc, outer_start, end); if (!outer_end) { ret = -EBUSY; goto done; } /* Free head and tail (if any) */ if (start != outer_start) free_contig_range(outer_start, start - outer_start); if (end != outer_end) free_contig_range(end, outer_end - end); done: undo_isolate_page_range(start, end, migratetype); return ret; } EXPORT_SYMBOL(alloc_contig_range); static int __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) { unsigned long end_pfn = start_pfn + nr_pages; return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, gfp_mask); } static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) { unsigned long i, end_pfn = start_pfn + nr_pages; struct page *page; for (i = start_pfn; i < end_pfn; i++) { page = pfn_to_online_page(i); if (!page) return false; if (page_zone(page) != z) return false; if (PageReserved(page)) return false; if (PageHuge(page)) return false; } return true; } static bool zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) { unsigned long last_pfn = start_pfn + nr_pages - 1; return zone_spans_pfn(zone, last_pfn); } /** * alloc_contig_pages() -- tries to find and allocate contiguous range of pages * @nr_pages: Number of contiguous pages to allocate * @gfp_mask: GFP mask to limit search and used during compaction * @nid: Target node * @nodemask: Mask for other possible nodes * * This routine is a wrapper around alloc_contig_range(). It scans over zones * on an applicable zonelist to find a contiguous pfn range which can then be * tried for allocation with alloc_contig_range(). This routine is intended * for allocation requests which can not be fulfilled with the buddy allocator. * * The allocated memory is always aligned to a page boundary. If nr_pages is a * power of two, then allocated range is also guaranteed to be aligned to same * nr_pages (e.g. 1GB request would be aligned to 1GB). * * Allocated pages can be freed with free_contig_range() or by manually calling * __free_page() on each allocated page. * * Return: pointer to contiguous pages on success, or NULL if not successful. */ struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { unsigned long ret, pfn, flags; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; zonelist = node_zonelist(nid, gfp_mask); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { spin_lock_irqsave(&zone->lock, flags); pfn = ALIGN(zone->zone_start_pfn, nr_pages); while (zone_spans_last_pfn(zone, pfn, nr_pages)) { if (pfn_range_valid_contig(zone, pfn, nr_pages)) { /* * We release the zone lock here because * alloc_contig_range() will also lock the zone * at some point. If there's an allocation * spinning on this lock, it may win the race * and cause alloc_contig_range() to fail... */ spin_unlock_irqrestore(&zone->lock, flags); ret = __alloc_contig_pages(pfn, nr_pages, gfp_mask); if (!ret) return pfn_to_page(pfn); spin_lock_irqsave(&zone->lock, flags); } pfn += nr_pages; } spin_unlock_irqrestore(&zone->lock, flags); } return NULL; } #endif /* CONFIG_CONTIG_ALLOC */ void free_contig_range(unsigned long pfn, unsigned long nr_pages) { unsigned long count = 0; for (; nr_pages--; pfn++) { struct page *page = pfn_to_page(pfn); count += page_count(page) != 1; __free_page(page); } WARN(count != 0, "%lu pages are still in use!\n", count); } EXPORT_SYMBOL(free_contig_range); /* * Effectively disable pcplists for the zone by setting the high limit to 0 * and draining all cpus. A concurrent page freeing on another CPU that's about * to put the page on pcplist will either finish before the drain and the page * will be drained, or observe the new high limit and skip the pcplist. * * Must be paired with a call to zone_pcp_enable(). */ void zone_pcp_disable(struct zone *zone) { mutex_lock(&pcp_batch_high_lock); __zone_set_pageset_high_and_batch(zone, 0, 1); __drain_all_pages(zone, true); } void zone_pcp_enable(struct zone *zone) { __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); mutex_unlock(&pcp_batch_high_lock); } void zone_pcp_reset(struct zone *zone) { int cpu; struct per_cpu_zonestat *pzstats; if (zone->per_cpu_pageset != &boot_pageset) { for_each_online_cpu(cpu) { pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); drain_zonestat(zone, pzstats); } free_percpu(zone->per_cpu_pageset); zone->per_cpu_pageset = &boot_pageset; if (zone->per_cpu_zonestats != &boot_zonestats) { free_percpu(zone->per_cpu_zonestats); zone->per_cpu_zonestats = &boot_zonestats; } } } #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be in a single zone, must not contain holes, * must span full sections, and must be isolated before calling this function. */ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn = start_pfn; struct page *page; struct zone *zone; unsigned int order; unsigned long flags; offline_mem_sections(pfn, end_pfn); zone = page_zone(pfn_to_page(pfn)); spin_lock_irqsave(&zone->lock, flags); while (pfn < end_pfn) { page = pfn_to_page(pfn); /* * The HWPoisoned page may be not in buddy system, and * page_count() is not 0. */ if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { pfn++; continue; } /* * At this point all remaining PageOffline() pages have a * reference count of 0 and can simply be skipped. */ if (PageOffline(page)) { BUG_ON(page_count(page)); BUG_ON(PageBuddy(page)); pfn++; continue; } BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); order = buddy_order(page); del_page_from_free_list(page, zone, order); pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); } #endif /* * This function returns a stable result only if called under zone lock. */ bool is_free_buddy_page(struct page *page) { unsigned long pfn = page_to_pfn(page); unsigned int order; for (order = 0; order <= MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); if (PageBuddy(page_head) && buddy_order_unsafe(page_head) >= order) break; } return order <= MAX_ORDER; } EXPORT_SYMBOL(is_free_buddy_page); #ifdef CONFIG_MEMORY_FAILURE /* * Break down a higher-order page in sub-pages, and keep our target out of * buddy allocator. */ static void break_down_buddy_pages(struct zone *zone, struct page *page, struct page *target, int low, int high, int migratetype) { unsigned long size = 1 << high; struct page *current_buddy, *next_page; while (high > low) { high--; size >>= 1; if (target >= &page[size]) { next_page = page + size; current_buddy = page; } else { next_page = page; current_buddy = page + size; } if (set_page_guard(zone, current_buddy, high, migratetype)) continue; if (current_buddy != target) { add_to_free_list(current_buddy, zone, high, migratetype); set_buddy_order(current_buddy, high); page = next_page; } } } /* * Take a page that will be marked as poisoned off the buddy allocator. */ bool take_page_off_buddy(struct page *page) { struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; unsigned int order; bool ret = false; spin_lock_irqsave(&zone->lock, flags); for (order = 0; order <= MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); int page_order = buddy_order(page_head); if (PageBuddy(page_head) && page_order >= order) { unsigned long pfn_head = page_to_pfn(page_head); int migratetype = get_pfnblock_migratetype(page_head, pfn_head); del_page_from_free_list(page_head, zone, page_order); break_down_buddy_pages(zone, page_head, page, 0, page_order, migratetype); SetPageHWPoisonTakenOff(page); if (!is_migrate_isolate(migratetype)) __mod_zone_freepage_state(zone, -1, migratetype); ret = true; break; } if (page_count(page_head) > 0) break; } spin_unlock_irqrestore(&zone->lock, flags); return ret; } /* * Cancel takeoff done by take_page_off_buddy(). */ bool put_page_back_buddy(struct page *page) { struct zone *zone = page_zone(page); unsigned long pfn = page_to_pfn(page); unsigned long flags; int migratetype = get_pfnblock_migratetype(page, pfn); bool ret = false; spin_lock_irqsave(&zone->lock, flags); if (put_page_testzero(page)) { ClearPageHWPoisonTakenOff(page); __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); if (TestClearPageHWPoison(page)) { ret = true; } } spin_unlock_irqrestore(&zone->lock, flags); return ret; } #endif #ifdef CONFIG_ZONE_DMA bool has_managed_dma(void) { struct pglist_data *pgdat; for_each_online_pgdat(pgdat) { struct zone *zone = &pgdat->node_zones[ZONE_DMA]; if (managed_zone(zone)) return true; } return false; } #endif /* CONFIG_ZONE_DMA */ #ifdef CONFIG_UNACCEPTED_MEMORY /* Counts number of zones with unaccepted pages. */ static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); static bool lazy_accept = true; static int __init accept_memory_parse(char *p) { if (!strcmp(p, "lazy")) { lazy_accept = true; return 0; } else if (!strcmp(p, "eager")) { lazy_accept = false; return 0; } else { return -EINVAL; } } early_param("accept_memory", accept_memory_parse); static bool page_contains_unaccepted(struct page *page, unsigned int order) { phys_addr_t start = page_to_phys(page); phys_addr_t end = start + (PAGE_SIZE << order); return range_contains_unaccepted_memory(start, end); } static void accept_page(struct page *page, unsigned int order) { phys_addr_t start = page_to_phys(page); accept_memory(start, start + (PAGE_SIZE << order)); } static bool try_to_accept_memory_one(struct zone *zone) { unsigned long flags; struct page *page; bool last; if (list_empty(&zone->unaccepted_pages)) return false; spin_lock_irqsave(&zone->lock, flags); page = list_first_entry_or_null(&zone->unaccepted_pages, struct page, lru); if (!page) { spin_unlock_irqrestore(&zone->lock, flags); return false; } list_del(&page->lru); last = list_empty(&zone->unaccepted_pages); __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); spin_unlock_irqrestore(&zone->lock, flags); accept_page(page, MAX_ORDER); __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); if (last) static_branch_dec(&zones_with_unaccepted_pages); return true; } static bool try_to_accept_memory(struct zone *zone, unsigned int order) { long to_accept; int ret = false; /* How much to accept to get to high watermark? */ to_accept = high_wmark_pages(zone) - (zone_page_state(zone, NR_FREE_PAGES) - __zone_watermark_unusable_free(zone, order, 0)); /* Accept at least one page */ do { if (!try_to_accept_memory_one(zone)) break; ret = true; to_accept -= MAX_ORDER_NR_PAGES; } while (to_accept > 0); return ret; } static inline bool has_unaccepted_memory(void) { return static_branch_unlikely(&zones_with_unaccepted_pages); } static bool __free_unaccepted(struct page *page) { struct zone *zone = page_zone(page); unsigned long flags; bool first = false; if (!lazy_accept) return false; spin_lock_irqsave(&zone->lock, flags); first = list_empty(&zone->unaccepted_pages); list_add_tail(&page->lru, &zone->unaccepted_pages); __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); spin_unlock_irqrestore(&zone->lock, flags); if (first) static_branch_inc(&zones_with_unaccepted_pages); return true; } #else static bool page_contains_unaccepted(struct page *page, unsigned int order) { return false; } static void accept_page(struct page *page, unsigned int order) { } static bool try_to_accept_memory(struct zone *zone, unsigned int order) { return false; } static inline bool has_unaccepted_memory(void) { return false; } static bool __free_unaccepted(struct page *page) { BUILD_BUG(); return false; } #endif /* CONFIG_UNACCEPTED_MEMORY */
linux-master
mm/page_alloc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/pagewalk.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/secretmem.h> #include "internal.h" struct mlock_fbatch { local_lock_t lock; struct folio_batch fbatch; }; static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = { .lock = INIT_LOCAL_LOCK(lock), }; bool can_do_mlock(void) { if (rlimit(RLIMIT_MEMLOCK) != 0) return true; if (capable(CAP_IPC_LOCK)) return true; return false; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked folios are marked with the PG_mlocked flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it * will be ostensibly placed on the LRU "unevictable" list (actually no such * list exists), rather than the [in]active lists. PG_unevictable is set to * indicate the unevictable state. */ static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) { /* There is nothing more we can do while it's off LRU */ if (!folio_test_clear_lru(folio)) return lruvec; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (unlikely(folio_evictable(folio))) { /* * This is a little surprising, but quite possible: PG_mlocked * must have got cleared already by another CPU. Could this * folio be unevictable? I'm not sure, but move it now if so. */ if (folio_test_unevictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, folio_nr_pages(folio)); } goto out; } if (folio_test_unevictable(folio)) { if (folio_test_mlocked(folio)) folio->mlock_count++; goto out; } lruvec_del_folio(lruvec, folio); folio_clear_active(folio); folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: folio_set_lru(folio); return lruvec; } static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) { VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); lruvec = folio_lruvec_relock_irq(folio, lruvec); /* As above, this is a little surprising, but possible */ if (unlikely(folio_evictable(folio))) goto out; folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: lruvec_add_folio(lruvec, folio); folio_set_lru(folio); return lruvec; } static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec) { int nr_pages = folio_nr_pages(folio); bool isolated = false; if (!folio_test_clear_lru(folio)) goto munlock; isolated = true; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (folio_test_unevictable(folio)) { /* Then mlock_count is maintained, but might undercount */ if (folio->mlock_count) folio->mlock_count--; if (folio->mlock_count) goto out; } /* else assume that was the last mlock: reclaim will fix it if not */ munlock: if (folio_test_clear_mlocked(folio)) { __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); if (isolated || !folio_test_unevictable(folio)) __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); else __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); } /* folio_evictable() has to be checked *after* clearing Mlocked */ if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } out: if (isolated) folio_set_lru(folio); return lruvec; } /* * Flags held in the low bits of a struct folio pointer on the mlock_fbatch. */ #define LRU_FOLIO 0x1 #define NEW_FOLIO 0x2 static inline struct folio *mlock_lru(struct folio *folio) { return (struct folio *)((unsigned long)folio + LRU_FOLIO); } static inline struct folio *mlock_new(struct folio *folio) { return (struct folio *)((unsigned long)folio + NEW_FOLIO); } /* * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can * make use of such folio pointer flags in future, but for now just keep it for * mlock. We could use three separate folio batches instead, but one feels * better (munlocking a full folio batch does not need to drain mlocking folio * batches first). */ static void mlock_folio_batch(struct folio_batch *fbatch) { struct lruvec *lruvec = NULL; unsigned long mlock; struct folio *folio; int i; for (i = 0; i < folio_batch_count(fbatch); i++) { folio = fbatch->folios[i]; mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO); folio = (struct folio *)((unsigned long)folio - mlock); fbatch->folios[i] = folio; if (mlock & LRU_FOLIO) lruvec = __mlock_folio(folio, lruvec); else if (mlock & NEW_FOLIO) lruvec = __mlock_new_folio(folio, lruvec); else lruvec = __munlock_folio(folio, lruvec); } if (lruvec) unlock_page_lruvec_irq(lruvec); folios_put(fbatch->folios, folio_batch_count(fbatch)); folio_batch_reinit(fbatch); } void mlock_drain_local(void) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } void mlock_drain_remote(int cpu) { struct folio_batch *fbatch; WARN_ON_ONCE(cpu_online(cpu)); fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); } bool need_mlock_drain(int cpu) { return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); } /** * mlock_folio - mlock a folio already on (or temporarily off) LRU * @folio: folio to be mlocked. */ void mlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (!folio_test_set_mlocked(folio)) { int nr_pages = folio_nr_pages(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); } folio_get(folio); if (!folio_batch_add(fbatch, mlock_lru(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * mlock_new_folio - mlock a newly allocated folio not yet on LRU * @folio: folio to be mlocked, either normal or a THP head. */ void mlock_new_folio(struct folio *folio) { struct folio_batch *fbatch; int nr_pages = folio_nr_pages(folio); local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); folio_set_mlocked(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); folio_get(folio); if (!folio_batch_add(fbatch, mlock_new(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * munlock_folio - munlock a folio * @folio: folio to be munlocked, either normal or a THP head. */ void munlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); /* * folio_test_clear_mlocked(folio) must be left to __munlock_folio(), * which will check whether the folio is multiply mlocked. */ folio_get(folio); if (!folio_batch_add(fbatch, folio) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } static int mlock_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; pte_t ptent; struct folio *folio; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (!pmd_present(*pmd)) goto out; if (is_huge_zero_pmd(*pmd)) goto out; folio = page_folio(pmd_page(*pmd)); if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); goto out; } start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!start_pte) { walk->action = ACTION_AGAIN; return 0; } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; if (folio_test_large(folio)) continue; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); } pte_unmap(start_pte); out: spin_unlock(ptl); cond_resched(); return 0; } /* * mlock_vma_pages_range() - mlock any pages already in the range, * or munlock all pages in the range. * @vma - vma containing range to be mlock()ed or munlock()ed * @start - start address in @vma of the range * @end - end of range in @vma * @newflags - the new set of flags for @vma. * * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED; * called for munlock() and munlockall(), to clear VM_LOCKED from @vma. */ static void mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, vm_flags_t newflags) { static const struct mm_walk_ops mlock_walk_ops = { .pmd_entry = mlock_pte_range, .walk_lock = PGWALK_WRLOCK_VERIFY, }; /* * There is a slight chance that concurrent page migration, * or page reclaim finding a page of this now-VM_LOCKED vma, * will call mlock_vma_folio() and raise page's mlock_count: * double counting, leaving the page unevictable indefinitely. * Communicate this danger to mlock_vma_folio() with VM_IO, * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas. * mmap_lock is held in write mode here, so this weird * combination should not be visible to other mmap_lock users; * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED. */ if (newflags & VM_LOCKED) newflags |= VM_IO; vma_start_write(vma); vm_flags_reset_once(vma, newflags); lru_add_drain(); walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); lru_add_drain(); if (newflags & VM_IO) { newflags &= ~VM_IO; vm_flags_reset_once(vma, newflags); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; pgoff_t pgoff; int nr_pages; int ret = 0; vm_flags_t oldflags = vma->vm_flags; if (newflags == oldflags || (oldflags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || vma_is_dax(vma) || vma_is_secretmem(vma)) /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(vmi, mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), vma->vm_userfaultfd_ctx, anon_vma_name(vma)); if (*prev) { vma = *prev; goto success; } if (start != vma->vm_start) { ret = split_vma(vmi, vma, start, 1); if (ret) goto out; } if (end != vma->vm_end) { ret = split_vma(vmi, vma, end, 0); if (ret) goto out; } success: /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!(newflags & VM_LOCKED)) nr_pages = -nr_pages; else if (oldflags & VM_LOCKED) nr_pages = 0; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_lock held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { /* No work to do, and mlocking twice would be wrong */ vma_start_write(vma); vm_flags_reset(vma, newflags); } else { mlock_vma_pages_range(vma, start, end, newflags); } out: *prev = vma; return ret; } static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; struct vm_area_struct *vma, *prev; VMA_ITERATOR(vmi, current->mm, start); VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = vma_iter_load(&vmi); if (!vma) return -ENOMEM; prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; nstart = start; tmp = vma->vm_start; for_each_vma_range(vmi, vma, end) { int error; vm_flags_t newflags; if (vma->vm_start != tmp) return -ENOMEM; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); if (error) return error; tmp = vma_iter_end(&vmi); nstart = tmp; } if (tmp < end) return -ENOMEM; return 0; } /* * Go through vma areas and sum size of mlocked * vma pages, as return value. * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) * is also counted. * Return value: previously mlocked page counts */ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; unsigned long count = 0; unsigned long end; VMA_ITERATOR(vmi, mm, start); /* Don't overflow past ULONG_MAX */ if (unlikely(ULONG_MAX - len < start)) end = ULONG_MAX; else end = start + len; for_each_vma_range(vmi, vma, end) { if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); if (end < vma->vm_end) { count += end - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; } } return count >> PAGE_SHIFT; } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; start = untagged_addr(start); if (!can_do_mlock()) return -EPERM; len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* * It is possible that the regions requested intersect with * previously mlocked areas, that part area in "mm->locked_vm" * should not be counted to new mlock increment count. So check * and adjust locked count if necessary. */ locked -= count_mm_mlocked_page_nr(current->mm, start, len); } /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = apply_vma_lock_flags(start, len, flags); mmap_write_unlock(current->mm); if (error) return error; error = __mm_populate(start, len, 0); if (error) return __mlock_posix_error_return(error); return 0; } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { return do_mlock(start, len, VM_LOCKED); } SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED; if (flags & ~MLOCK_ONFAULT) return -EINVAL; if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT; return do_mlock(start, len, vm_flags); } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; start = untagged_addr(start); len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_vma_lock_flags(start, len, 0); mmap_write_unlock(current->mm); return ret; } /* * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) * and translate into the appropriate modifications to mm->def_flags and/or the * flags for all current VMAs. * * There are a couple of subtleties with this. If mlockall() is called multiple * times with different flags, the values do not necessarily stack. If mlockall * is called once including the MCL_FUTURE flag and then a second time without * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. */ static int apply_mlockall_flags(int flags) { VMA_ITERATOR(vmi, current->mm, 0); struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= ~VM_LOCKED_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; if (flags & MCL_ONFAULT) current->mm->def_flags |= VM_LOCKONFAULT; if (!(flags & MCL_CURRENT)) goto out; } if (flags & MCL_CURRENT) { to_add |= VM_LOCKED; if (flags & MCL_ONFAULT) to_add |= VM_LOCKONFAULT; } for_each_vma(vmi, vma) { vm_flags_t newflags; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= to_add; /* Ignore errors */ mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, newflags); cond_resched(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || flags == MCL_ONFAULT) return -EINVAL; if (!can_do_mlock()) return -EPERM; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = apply_mlockall_flags(flags); mmap_write_unlock(current->mm); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); return ret; } SYSCALL_DEFINE0(munlockall) { int ret; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_mlockall_flags(0); mmap_write_unlock(current->mm); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct ucounts *ucounts) { unsigned long lock_limit, locked; long memlock; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit != RLIM_INFINITY) lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); goto out; } if (!get_ucounts(ucounts)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); allowed = 0; goto out; } allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct ucounts *ucounts) { spin_lock(&shmlock_user_lock); dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); spin_unlock(&shmlock_user_lock); put_ucounts(ucounts); }
linux-master
mm/mlock.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/debugfs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/memblock.h> #include <linux/stacktrace.h> #include <linux/page_owner.h> #include <linux/jump_label.h> #include <linux/migrate.h> #include <linux/stackdepot.h> #include <linux/seq_file.h> #include <linux/memcontrol.h> #include <linux/sched/clock.h> #include "internal.h" /* * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) * to use off stack temporal storage */ #define PAGE_OWNER_STACK_DEPTH (16) struct page_owner { unsigned short order; short last_migrate_reason; gfp_t gfp_mask; depot_stack_handle_t handle; depot_stack_handle_t free_handle; u64 ts_nsec; u64 free_ts_nsec; char comm[TASK_COMM_LEN]; pid_t pid; pid_t tgid; }; static bool page_owner_enabled __initdata; DEFINE_STATIC_KEY_FALSE(page_owner_inited); static depot_stack_handle_t dummy_handle; static depot_stack_handle_t failure_handle; static depot_stack_handle_t early_handle; static void init_early_allocated_pages(void); static int __init early_page_owner_param(char *buf) { int ret = kstrtobool(buf, &page_owner_enabled); if (page_owner_enabled) stack_depot_request_early_init(); return ret; } early_param("page_owner", early_page_owner_param); static __init bool need_page_owner(void) { return page_owner_enabled; } static __always_inline depot_stack_handle_t create_dummy_stack(void) { unsigned long entries[4]; unsigned int nr_entries; nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); return stack_depot_save(entries, nr_entries, GFP_KERNEL); } static noinline void register_dummy_stack(void) { dummy_handle = create_dummy_stack(); } static noinline void register_failure_stack(void) { failure_handle = create_dummy_stack(); } static noinline void register_early_stack(void) { early_handle = create_dummy_stack(); } static __init void init_page_owner(void) { if (!page_owner_enabled) return; register_dummy_stack(); register_failure_stack(); register_early_stack(); static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } struct page_ext_operations page_owner_ops = { .size = sizeof(struct page_owner), .need = need_page_owner, .init = init_page_owner, .need_shared_flags = true, }; static inline struct page_owner *get_page_owner(struct page_ext *page_ext) { return page_ext_data(page_ext, &page_owner_ops); } static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; depot_stack_handle_t handle; unsigned int nr_entries; /* * Avoid recursion. * * Sometimes page metadata allocation tracking requires more * memory to be allocated: * - when new stack trace is saved to stack depot * - when backtrace itself is calculated (ia64) */ if (current->in_page_owner) return dummy_handle; current->in_page_owner = 1; nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); handle = stack_depot_save(entries, nr_entries, flags); if (!handle) handle = failure_handle; current->in_page_owner = 0; return handle; } void __reset_page_owner(struct page *page, unsigned short order) { int i; struct page_ext *page_ext; depot_stack_handle_t handle; struct page_owner *page_owner; u64 free_ts_nsec = local_clock(); page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; handle = save_stack(GFP_NOWAIT | __GFP_NOWARN); for (i = 0; i < (1 << order); i++) { __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); page_owner = get_page_owner(page_ext); page_owner->free_handle = handle; page_owner->free_ts_nsec = free_ts_nsec; page_ext = page_ext_next(page_ext); } page_ext_put(page_ext); } static inline void __set_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask) { struct page_owner *page_owner; int i; u64 ts_nsec = local_clock(); for (i = 0; i < (1 << order); i++) { page_owner = get_page_owner(page_ext); page_owner->handle = handle; page_owner->order = order; page_owner->gfp_mask = gfp_mask; page_owner->last_migrate_reason = -1; page_owner->pid = current->pid; page_owner->tgid = current->tgid; page_owner->ts_nsec = ts_nsec; strscpy(page_owner->comm, current->comm, sizeof(page_owner->comm)); __set_bit(PAGE_EXT_OWNER, &page_ext->flags); __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags); page_ext = page_ext_next(page_ext); } } noinline void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) { struct page_ext *page_ext; depot_stack_handle_t handle; handle = save_stack(gfp_mask); page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; __set_page_owner_handle(page_ext, handle, order, gfp_mask); page_ext_put(page_ext); } void __set_page_owner_migrate_reason(struct page *page, int reason) { struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; if (unlikely(!page_ext)) return; page_owner = get_page_owner(page_ext); page_owner->last_migrate_reason = reason; page_ext_put(page_ext); } void __split_page_owner(struct page *page, unsigned int nr) { int i; struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; if (unlikely(!page_ext)) return; for (i = 0; i < nr; i++) { page_owner = get_page_owner(page_ext); page_owner->order = 0; page_ext = page_ext_next(page_ext); } page_ext_put(page_ext); } void __folio_copy_owner(struct folio *newfolio, struct folio *old) { struct page_ext *old_ext; struct page_ext *new_ext; struct page_owner *old_page_owner, *new_page_owner; old_ext = page_ext_get(&old->page); if (unlikely(!old_ext)) return; new_ext = page_ext_get(&newfolio->page); if (unlikely(!new_ext)) { page_ext_put(old_ext); return; } old_page_owner = get_page_owner(old_ext); new_page_owner = get_page_owner(new_ext); new_page_owner->order = old_page_owner->order; new_page_owner->gfp_mask = old_page_owner->gfp_mask; new_page_owner->last_migrate_reason = old_page_owner->last_migrate_reason; new_page_owner->handle = old_page_owner->handle; new_page_owner->pid = old_page_owner->pid; new_page_owner->tgid = old_page_owner->tgid; new_page_owner->ts_nsec = old_page_owner->ts_nsec; new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; strcpy(new_page_owner->comm, old_page_owner->comm); /* * We don't clear the bit on the old folio as it's going to be freed * after migration. Until then, the info can be useful in case of * a bug, and the overall stats will be off a bit only temporarily. * Also, migrate_misplaced_transhuge_page() can still fail the * migration and then we want the old folio to retain the info. But * in that case we also don't need to explicitly clear the info from * the new page, which will be freed. */ __set_bit(PAGE_EXT_OWNER, &new_ext->flags); __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags); page_ext_put(new_ext); page_ext_put(old_ext); } void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) { struct page *page; struct page_ext *page_ext; struct page_owner *page_owner; unsigned long pfn, block_end_pfn; unsigned long end_pfn = zone_end_pfn(zone); unsigned long count[MIGRATE_TYPES] = { 0, }; int pageblock_mt, page_mt; int i; /* Scan block by block. First and last block may be incomplete */ pfn = zone->zone_start_pfn; /* * Walk the zone in pageblock_nr_pages steps. If a page block spans * a zone boundary, it will be double counted between zones. This does * not matter as the mixed block count will still be correct */ for (; pfn < end_pfn; ) { page = pfn_to_online_page(pfn); if (!page) { pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); continue; } block_end_pfn = pageblock_end_pfn(pfn); block_end_pfn = min(block_end_pfn, end_pfn); pageblock_mt = get_pageblock_migratetype(page); for (; pfn < block_end_pfn; pfn++) { /* The pageblock is online, no need to recheck. */ page = pfn_to_page(pfn); if (page_zone(page) != zone) continue; if (PageBuddy(page)) { unsigned long freepage_order; freepage_order = buddy_order_unsafe(page); if (freepage_order <= MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } if (PageReserved(page)) continue; page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) goto ext_put_continue; page_owner = get_page_owner(page_ext); page_mt = gfp_migratetype(page_owner->gfp_mask); if (pageblock_mt != page_mt) { if (is_migrate_cma(pageblock_mt)) count[MIGRATE_MOVABLE]++; else count[pageblock_mt]++; pfn = block_end_pfn; page_ext_put(page_ext); break; } pfn += (1UL << page_owner->order) - 1; ext_put_continue: page_ext_put(page_ext); } } /* Print counts */ seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); for (i = 0; i < MIGRATE_TYPES; i++) seq_printf(m, "%12lu ", count[i]); seq_putc(m, '\n'); } /* * Looking for memcg information and print it out */ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, struct page *page) { #ifdef CONFIG_MEMCG unsigned long memcg_data; struct mem_cgroup *memcg; bool online; char name[80]; rcu_read_lock(); memcg_data = READ_ONCE(page->memcg_data); if (!memcg_data) goto out_unlock; if (memcg_data & MEMCG_DATA_OBJCGS) ret += scnprintf(kbuf + ret, count - ret, "Slab cache page\n"); memcg = page_memcg_check(page); if (!memcg) goto out_unlock; online = (memcg->css.flags & CSS_ONLINE); cgroup_name(memcg->css.cgroup, name, sizeof(name)); ret += scnprintf(kbuf + ret, count - ret, "Charged %sto %smemcg %s\n", PageMemcgKmem(page) ? "(via objcg) " : "", online ? "" : "offline ", name); out_unlock: rcu_read_unlock(); #endif /* CONFIG_MEMCG */ return ret; } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) { int ret, pageblock_mt, page_mt; char *kbuf; count = min_t(size_t, count, PAGE_SIZE); kbuf = kmalloc(count, GFP_KERNEL); if (!kbuf) return -ENOMEM; ret = scnprintf(kbuf, count, "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n", page_owner->order, page_owner->gfp_mask, &page_owner->gfp_mask, page_owner->pid, page_owner->tgid, page_owner->comm, page_owner->ts_nsec, page_owner->free_ts_nsec); /* Print information relevant to grouping pages by mobility */ pageblock_mt = get_pageblock_migratetype(page); page_mt = gfp_migratetype(page_owner->gfp_mask); ret += scnprintf(kbuf + ret, count - ret, "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, migratetype_names[pageblock_mt], &page->flags); ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0); if (ret >= count) goto err; if (page_owner->last_migrate_reason != -1) { ret += scnprintf(kbuf + ret, count - ret, "Page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_owner->last_migrate_reason]); } ret = print_page_owner_memcg(kbuf, count, ret, page); ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; if (copy_to_user(buf, kbuf, ret)) ret = -EFAULT; kfree(kbuf); return ret; err: kfree(kbuf); return -ENOMEM; } void __dump_page_owner(const struct page *page) { struct page_ext *page_ext = page_ext_get((void *)page); struct page_owner *page_owner; depot_stack_handle_t handle; gfp_t gfp_mask; int mt; if (unlikely(!page_ext)) { pr_alert("There is not page extension available.\n"); return; } page_owner = get_page_owner(page_ext); gfp_mask = page_owner->gfp_mask; mt = gfp_migratetype(gfp_mask); if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not present (never set?)\n"); page_ext_put(page_ext); return; } if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) pr_alert("page_owner tracks the page as allocated\n"); else pr_alert("page_owner tracks the page as freed\n"); pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n", page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask, page_owner->pid, page_owner->tgid, page_owner->comm, page_owner->ts_nsec, page_owner->free_ts_nsec); handle = READ_ONCE(page_owner->handle); if (!handle) pr_alert("page_owner allocation stack trace missing\n"); else stack_depot_print(handle); handle = READ_ONCE(page_owner->free_handle); if (!handle) { pr_alert("page_owner free stack trace missing\n"); } else { pr_alert("page last free stack trace:\n"); stack_depot_print(handle); } if (page_owner->last_migrate_reason != -1) pr_alert("page has been migrated, last migrate reason: %s\n", migrate_reason_names[page_owner->last_migrate_reason]); page_ext_put(page_ext); } static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; struct page_owner *page_owner; depot_stack_handle_t handle; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; page = NULL; if (*ppos == 0) pfn = min_low_pfn; else pfn = *ppos; /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) pfn++; /* Find an allocated page */ for (; pfn < max_pfn; pfn++) { /* * This temporary page_owner is required so * that we can avoid the context switches while holding * the rcu lock and copying the page owner information to * user through copy_to_user() or GFP_KERNEL allocations. */ struct page_owner page_owner_tmp; /* * If the new page is in a new MAX_ORDER_NR_PAGES area, * validate the area as existing, skip it if not */ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { pfn += MAX_ORDER_NR_PAGES - 1; continue; } page = pfn_to_page(pfn); if (PageBuddy(page)) { unsigned long freepage_order = buddy_order_unsafe(page); if (freepage_order <= MAX_ORDER) pfn += (1UL << freepage_order) - 1; continue; } page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; /* * Some pages could be missed by concurrent allocation or free, * because we don't hold the zone lock. */ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) goto ext_put_continue; /* * Although we do have the info about past allocation of free * pages, it's not relevant for current memory usage. */ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags)) goto ext_put_continue; page_owner = get_page_owner(page_ext); /* * Don't print "tail" pages of high-order allocations as that * would inflate the stats. */ if (!IS_ALIGNED(pfn, 1 << page_owner->order)) goto ext_put_continue; /* * Access to page_ext->handle isn't synchronous so we should * be careful to access it. */ handle = READ_ONCE(page_owner->handle); if (!handle) goto ext_put_continue; /* Record the next PFN to read in the file offset */ *ppos = pfn + 1; page_owner_tmp = *page_owner; page_ext_put(page_ext); return print_page_owner(buf, count, pfn, page, &page_owner_tmp, handle); ext_put_continue: page_ext_put(page_ext); } return 0; } static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig) { switch (orig) { case SEEK_SET: file->f_pos = offset; break; case SEEK_CUR: file->f_pos += offset; break; default: return -EINVAL; } return file->f_pos; } static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) { unsigned long pfn = zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(zone); unsigned long count = 0; /* * Walk the zone in pageblock_nr_pages steps. If a page block spans * a zone boundary, it will be double counted between zones. This does * not matter as the mixed block count will still be correct */ for (; pfn < end_pfn; ) { unsigned long block_end_pfn; if (!pfn_valid(pfn)) { pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); continue; } block_end_pfn = pageblock_end_pfn(pfn); block_end_pfn = min(block_end_pfn, end_pfn); for (; pfn < block_end_pfn; pfn++) { struct page *page = pfn_to_page(pfn); struct page_ext *page_ext; if (page_zone(page) != zone) continue; /* * To avoid having to grab zone->lock, be a little * careful when reading buddy page order. The only * danger is that we skip too much and potentially miss * some early allocated pages, which is better than * heavy lock contention. */ if (PageBuddy(page)) { unsigned long order = buddy_order_unsafe(page); if (order > 0 && order <= MAX_ORDER) pfn += (1UL << order) - 1; continue; } if (PageReserved(page)) continue; page_ext = page_ext_get(page); if (unlikely(!page_ext)) continue; /* Maybe overlapping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) goto ext_put_continue; /* Found early allocated page */ __set_page_owner_handle(page_ext, early_handle, 0, 0); count++; ext_put_continue: page_ext_put(page_ext); } cond_resched(); } pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", pgdat->node_id, zone->name, count); } static void init_zones_in_node(pg_data_t *pgdat) { struct zone *zone; struct zone *node_zones = pgdat->node_zones; for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { if (!populated_zone(zone)) continue; init_pages_in_zone(pgdat, zone); } } static void init_early_allocated_pages(void) { pg_data_t *pgdat; for_each_online_pgdat(pgdat) init_zones_in_node(pgdat); } static const struct file_operations proc_page_owner_operations = { .read = read_page_owner, .llseek = lseek_page_owner, }; static int __init pageowner_init(void) { if (!static_branch_unlikely(&page_owner_inited)) { pr_info("page_owner is disabled\n"); return 0; } debugfs_create_file("page_owner", 0400, NULL, NULL, &proc_page_owner_operations); return 0; } late_initcall(pageowner_init)
linux-master
mm/page_owner.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/list_lru.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memcontrol.h> #include "slab.h" #include "internal.h" #ifdef CONFIG_MEMCG_KMEM static LIST_HEAD(memcg_list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static inline bool list_lru_memcg_aware(struct list_lru *lru) { return lru->memcg_aware; } static void list_lru_register(struct list_lru *lru) { if (!list_lru_memcg_aware(lru)) return; mutex_lock(&list_lrus_mutex); list_add(&lru->list, &memcg_list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { if (!list_lru_memcg_aware(lru)) return; mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } static int lru_shrinker_id(struct list_lru *lru) { return lru->shrinker_id; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { if (list_lru_memcg_aware(lru) && idx >= 0) { struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); return mlru ? &mlru->node[nid] : NULL; } return &lru->node[nid].lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, struct mem_cgroup **memcg_ptr) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l = &nlru->lru; struct mem_cgroup *memcg = NULL; if (!list_lru_memcg_aware(lru)) goto out; memcg = mem_cgroup_from_slab_obj(ptr); if (!memcg) goto out; l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); out: if (memcg_ptr) *memcg_ptr = memcg; return l; } #else static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } static int lru_shrinker_id(struct list_lru *lru) { return -1; } static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { return &lru->node[nid].lru; } static inline struct list_lru_one * list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr, struct mem_cgroup **memcg_ptr) { if (memcg_ptr) *memcg_ptr = NULL; return &lru->node[nid].lru; } #endif /* CONFIG_MEMCG_KMEM */ bool list_lru_add(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct mem_cgroup *memcg; struct list_lru_one *l; spin_lock(&nlru->lock); if (list_empty(item)) { l = list_lru_from_kmem(lru, nid, item, &memcg); list_add_tail(item, &l->list); /* Set shrinker bit if the first element was added */ if (!l->nr_items++) set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); nlru->nr_items++; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_del(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (!list_empty(item)) { l = list_lru_from_kmem(lru, nid, item, NULL); list_del_init(item); l->nr_items--; nlru->nr_items--; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_del); void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { struct list_lru_one *l; long count; rcu_read_lock(); l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); count = l ? READ_ONCE(l->nr_items) : 0; rcu_read_unlock(); if (unlikely(count < 0)) count = 0; return count; } EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { struct list_lru_node *nlru; nlru = &lru->node[nid]; return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); static unsigned long __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; struct list_head *item, *n; unsigned long isolated = 0; restart: l = list_lru_from_memcg_idx(lru, nid, memcg_idx); if (!l) goto out; list_for_each_safe(item, n, &l->list) { enum lru_status ret; /* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbers of LRU_RETRY items */ if (!*nr_to_walk) break; --*nr_to_walk; ret = isolate(item, l, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); fallthrough; case LRU_REMOVED: isolated++; nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; break; case LRU_ROTATE: list_move_tail(item, &l->list); break; case LRU_SKIP: break; case LRU_RETRY: /* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ assert_spin_locked(&nlru->lock); goto restart; default: BUG(); } } out: return isolated; } unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock(&nlru->lock); ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); return ret; } EXPORT_SYMBOL_GPL(list_lru_walk_one); unsigned long list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock_irq(&nlru->lock); ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock_irq(&nlru->lock); return ret; } unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, nr_to_walk); #ifdef CONFIG_MEMCG_KMEM if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { struct list_lru_memcg *mlru; unsigned long index; xa_for_each(&lru->xa, index, mlru) { struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); isolated += __list_lru_walk_one(lru, nid, index, isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); if (*nr_to_walk <= 0) break; } } #endif return isolated; } EXPORT_SYMBOL_GPL(list_lru_walk_node); static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } #ifdef CONFIG_MEMCG_KMEM static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) { int nid; struct list_lru_memcg *mlru; mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); if (!mlru) return NULL; for_each_node(nid) init_one_lru(&mlru->node[nid]); return mlru; } static void memcg_list_lru_free(struct list_lru *lru, int src_idx) { struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); /* * The __list_lru_walk_one() can walk the list of this node. * We need kvfree_rcu() here. And the walking of the list * is under lru->node[nid]->lock, which can serve as a RCU * read-side critical section. */ if (mlru) kvfree_rcu(mlru, rcu); } static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { if (memcg_aware) xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ); lru->memcg_aware = memcg_aware; } static void memcg_destroy_list_lru(struct list_lru *lru) { XA_STATE(xas, &lru->xa, 0); struct list_lru_memcg *mlru; if (!list_lru_memcg_aware(lru)) return; xas_lock_irq(&xas); xas_for_each(&xas, mlru, ULONG_MAX) { kfree(mlru); xas_store(&xas, NULL); } xas_unlock_irq(&xas); } static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, int src_idx, struct mem_cgroup *dst_memcg) { struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; struct list_lru_one *src, *dst; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(lru, nid, src_idx); if (!src) goto out; dst = list_lru_from_memcg_idx(lru, nid, dst_idx); list_splice_init(&src->list, &dst->list); if (src->nr_items) { dst->nr_items += src->nr_items; set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; } out: spin_unlock_irq(&nlru->lock); } static void memcg_reparent_list_lru(struct list_lru *lru, int src_idx, struct mem_cgroup *dst_memcg) { int i; for_each_node(i) memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg); memcg_list_lru_free(lru, src_idx); } void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent) { struct cgroup_subsys_state *css; struct list_lru *lru; int src_idx = memcg->kmemcg_id; /* * Change kmemcg_id of this cgroup and all its descendants to the * parent's id, and then move all entries from this cgroup's list_lrus * to ones of the parent. * * After we have finished, all list_lrus corresponding to this cgroup * are guaranteed to remain empty. So we can safely free this cgroup's * list lrus in memcg_list_lru_free(). * * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc() * from allocating list lrus for this cgroup after memcg_list_lru_free() * call. */ rcu_read_lock(); css_for_each_descendant_pre(css, &memcg->css) { struct mem_cgroup *child; child = mem_cgroup_from_css(css); WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id); } rcu_read_unlock(); mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &memcg_list_lrus, list) memcg_reparent_list_lru(lru, src_idx, parent); mutex_unlock(&list_lrus_mutex); } static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg, struct list_lru *lru) { int idx = memcg->kmemcg_id; return idx < 0 || xa_load(&lru->xa, idx); } int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp) { int i; unsigned long flags; struct list_lru_memcg_table { struct list_lru_memcg *mlru; struct mem_cgroup *memcg; } *table; XA_STATE(xas, &lru->xa, 0); if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) return 0; gfp &= GFP_RECLAIM_MASK; table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp); if (!table) return -ENOMEM; /* * Because the list_lru can be reparented to the parent cgroup's * list_lru, we should make sure that this cgroup and all its * ancestors have allocated list_lru_memcg. */ for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { if (memcg_list_lru_allocated(memcg, lru)) break; table[i].memcg = memcg; table[i].mlru = memcg_init_list_lru_one(gfp); if (!table[i].mlru) { while (i--) kfree(table[i].mlru); kfree(table); return -ENOMEM; } } xas_lock_irqsave(&xas, flags); while (i--) { int index = READ_ONCE(table[i].memcg->kmemcg_id); struct list_lru_memcg *mlru = table[i].mlru; xas_set(&xas, index); retry: if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { kfree(mlru); } else { xas_store(&xas, mlru); if (xas_error(&xas) == -ENOMEM) { xas_unlock_irqrestore(&xas, flags); if (xas_nomem(&xas, gfp)) xas_set_err(&xas, 0); xas_lock_irqsave(&xas, flags); /* * The xas lock has been released, this memcg * can be reparented before us. So reload * memcg id. More details see the comments * in memcg_reparent_list_lrus(). */ index = READ_ONCE(table[i].memcg->kmemcg_id); if (index < 0) xas_set_err(&xas, 0); else if (!xas_error(&xas) && index != xas.xa_index) xas_set(&xas, index); goto retry; } } } /* xas_nomem() is used to free memory instead of memory allocation. */ if (xas.xa_alloc) xas_nomem(&xas, gfp); xas_unlock_irqrestore(&xas, flags); kfree(table); return xas_error(&xas); } #else static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { } static void memcg_destroy_list_lru(struct list_lru *lru) { } #endif /* CONFIG_MEMCG_KMEM */ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key, struct shrinker *shrinker) { int i; #ifdef CONFIG_MEMCG_KMEM if (shrinker) lru->shrinker_id = shrinker->id; else lru->shrinker_id = -1; #endif lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); if (!lru->node) return -ENOMEM; for_each_node(i) { spin_lock_init(&lru->node[i].lock); if (key) lockdep_set_class(&lru->node[i].lock, key); init_one_lru(&lru->node[i].lru); } memcg_init_list_lru(lru, memcg_aware); list_lru_register(lru); return 0; } EXPORT_SYMBOL_GPL(__list_lru_init); void list_lru_destroy(struct list_lru *lru) { /* Already destroyed or not yet initialized? */ if (!lru->node) return; list_lru_unregister(lru); memcg_destroy_list_lru(lru); kfree(lru->node); lru->node = NULL; #ifdef CONFIG_MEMCG_KMEM lru->shrinker_id = -1; #endif } EXPORT_SYMBOL_GPL(list_lru_destroy);
linux-master
mm/list_lru.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Red Hat, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/numa_balancing.h> #include <linux/highmem.h> #include <linux/hugetlb.h> #include <linux/mmu_notifier.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/shrinker.h> #include <linux/mm_inline.h> #include <linux/swapops.h> #include <linux/backing-dev.h> #include <linux/dax.h> #include <linux/khugepaged.h> #include <linux/freezer.h> #include <linux/pfn_t.h> #include <linux/mman.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/migrate.h> #include <linux/hashtable.h> #include <linux/userfaultfd_k.h> #include <linux/page_idle.h> #include <linux/shmem_fs.h> #include <linux/oom.h> #include <linux/numa.h> #include <linux/page_owner.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" #include "swap.h" #define CREATE_TRACE_POINTS #include <trace/events/thp.h> /* * By default, transparent hugepage support is disabled in order to avoid * risking an increased memory footprint for applications that are not * guaranteed to benefit from it. When transparent hugepage support is * enabled, it is for all mappings, and khugepaged scans all mappings. * Defrag is invoked by khugepaged hugepage allocations and by page faults * for all hugepage allocations. */ unsigned long transparent_hugepage_flags __read_mostly = #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS (1<<TRANSPARENT_HUGEPAGE_FLAG)| #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| #endif (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)| (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); static struct shrinker deferred_split_shrinker; static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps, bool in_pf, bool enforce_sysfs) { if (!vma->vm_mm) /* vdso */ return false; /* * Explicitly disabled through madvise or prctl, or some * architectures may disable THP for some mappings, for * example, s390 kvm. * */ if ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) return false; /* * If the hardware/firmware marked hugepage support disabled. */ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) return false; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ if (vma_is_dax(vma)) return in_pf; /* * Special VMA and hugetlb VMA. * Must be checked after dax since some dax mappings may have * VM_MIXEDMAP set. */ if (vm_flags & VM_NO_KHUGEPAGED) return false; /* * Check alignment for file vma and size for both file and anon vma. * * Skip the check for page fault. Huge fault does the check in fault * handlers. And this check is not suitable for huge PUD fault. */ if (!in_pf && !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) return false; /* * Enabled via shmem mount options or sysfs settings. * Must be done before hugepage flags check since shmem has its * own flags. */ if (!in_pf && shmem_file(vma->vm_file)) return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, !enforce_sysfs, vma->vm_mm, vm_flags); /* Enforce sysfs THP requirements as necessary */ if (enforce_sysfs && (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always()))) return false; /* Only regular file is valid */ if (!in_pf && file_thp_enabled(vma)) return true; if (!vma_is_anonymous(vma)) return false; if (vma_is_temporary_stack(vma)) return false; /* * THPeligible bit of smaps should show 1 for proper VMAs even * though anon_vma is not initialized yet. * * Allow page fault since anon_vma may be not initialized until * the first page fault. */ if (!vma->anon_vma) return (smaps || in_pf); return true; } static bool get_huge_zero_page(void) { struct page *zero_page; retry: if (likely(atomic_inc_not_zero(&huge_zero_refcount))) return true; zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, HPAGE_PMD_ORDER); if (!zero_page) { count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); return false; } preempt_disable(); if (cmpxchg(&huge_zero_page, NULL, zero_page)) { preempt_enable(); __free_pages(zero_page, compound_order(zero_page)); goto retry; } WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); /* We take additional reference here. It will be put back by shrinker */ atomic_set(&huge_zero_refcount, 2); preempt_enable(); count_vm_event(THP_ZERO_PAGE_ALLOC); return true; } static void put_huge_zero_page(void) { /* * Counter should never go to zero here. Only shrinker can put * last reference. */ BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); } struct page *mm_get_huge_zero_page(struct mm_struct *mm) { if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) return READ_ONCE(huge_zero_page); if (!get_huge_zero_page()) return NULL; if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) put_huge_zero_page(); return READ_ONCE(huge_zero_page); } void mm_put_huge_zero_page(struct mm_struct *mm) { if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) put_huge_zero_page(); } static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, struct shrink_control *sc) { /* we can free zero page only if last reference remains */ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; } static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, struct shrink_control *sc) { if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { struct page *zero_page = xchg(&huge_zero_page, NULL); BUG_ON(zero_page == NULL); WRITE_ONCE(huge_zero_pfn, ~0UL); __free_pages(zero_page, compound_order(zero_page)); return HPAGE_PMD_NR; } return 0; } static struct shrinker huge_zero_page_shrinker = { .count_objects = shrink_huge_zero_page_count, .scan_objects = shrink_huge_zero_page_scan, .seeks = DEFAULT_SEEKS, }; #ifdef CONFIG_SYSFS static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { const char *output; if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags)) output = "[always] madvise never"; else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags)) output = "always [madvise] never"; else output = "always madvise [never]"; return sysfs_emit(buf, "%s\n", output); } static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ssize_t ret = count; if (sysfs_streq(buf, "always")) { clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); } else ret = -EINVAL; if (ret > 0) { int err = start_stop_khugepaged(); if (err) ret = err; } return ret; } static struct kobj_attribute enabled_attr = __ATTR_RW(enabled); ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { return sysfs_emit(buf, "%d\n", !!test_bit(flag, &transparent_hugepage_flags)); } ssize_t single_hugepage_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) { unsigned long value; int ret; ret = kstrtoul(buf, 10, &value); if (ret < 0) return ret; if (value > 1) return -EINVAL; if (value) set_bit(flag, &transparent_hugepage_flags); else clear_bit(flag, &transparent_hugepage_flags); return count; } static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { const char *output; if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) output = "[always] defer defer+madvise madvise never"; else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) output = "always [defer] defer+madvise madvise never"; else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) output = "always defer [defer+madvise] madvise never"; else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) output = "always defer defer+madvise [madvise] never"; else output = "always defer defer+madvise madvise [never]"; return sysfs_emit(buf, "%s\n", output); } static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (sysfs_streq(buf, "always")) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "defer+madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "defer")) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); } else if (sysfs_streq(buf, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); } else return -EINVAL; return count; } static struct kobj_attribute defrag_attr = __ATTR_RW(defrag); static ssize_t use_zero_page_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); } static ssize_t use_zero_page_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); } static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page); static ssize_t hpage_pmd_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE); } static struct kobj_attribute hpage_pmd_size_attr = __ATTR_RO(hpage_pmd_size); static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, &use_zero_page_attr.attr, &hpage_pmd_size_attr.attr, #ifdef CONFIG_SHMEM &shmem_enabled_attr.attr, #endif NULL, }; static const struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { pr_err("failed to create transparent hugepage kobject\n"); return -ENOMEM; } err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); if (err) { pr_err("failed to register transparent hugepage group\n"); goto delete_obj; } err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); if (err) { pr_err("failed to register transparent hugepage group\n"); goto remove_hp_group; } return 0; remove_hp_group: sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); delete_obj: kobject_put(*hugepage_kobj); return err; } static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) { sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); kobject_put(hugepage_kobj); } #else static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) { return 0; } static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) { } #endif /* CONFIG_SYSFS */ static int __init hugepage_init(void) { int err; struct kobject *hugepage_kobj; if (!has_transparent_hugepage()) { transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED; return -EINVAL; } /* * hugepages can't be allocated by the buddy allocator */ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER); /* * we use page->mapping and page->index in second tail page * as list_head: assuming THP order >= 2 */ MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); err = hugepage_init_sysfs(&hugepage_kobj); if (err) goto err_sysfs; err = khugepaged_init(); if (err) goto err_slab; err = register_shrinker(&huge_zero_page_shrinker, "thp-zero"); if (err) goto err_hzp_shrinker; err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split"); if (err) goto err_split_shrinker; /* * By default disable transparent hugepages on smaller systems, * where the extra memory used could hurt more than TLB overhead * is likely to save. The admin can still enable it through /sys. */ if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) { transparent_hugepage_flags = 0; return 0; } err = start_stop_khugepaged(); if (err) goto err_khugepaged; return 0; err_khugepaged: unregister_shrinker(&deferred_split_shrinker); err_split_shrinker: unregister_shrinker(&huge_zero_page_shrinker); err_hzp_shrinker: khugepaged_destroy(); err_slab: hugepage_exit_sysfs(hugepage_kobj); err_sysfs: return err; } subsys_initcall(hugepage_init); static int __init setup_transparent_hugepage(char *str) { int ret = 0; if (!str) goto out; if (!strcmp(str, "always")) { set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "madvise")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } else if (!strcmp(str, "never")) { clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags); ret = 1; } out: if (!ret) pr_warn("transparent_hugepage= cannot parse, ignored\n"); return ret; } __setup("transparent_hugepage=", setup_transparent_hugepage); pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pmd = pmd_mkwrite(pmd, vma); return pmd; } #ifdef CONFIG_MEMCG static inline struct deferred_split *get_deferred_split_queue(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); if (memcg) return &memcg->deferred_split_queue; else return &pgdat->deferred_split_queue; } #else static inline struct deferred_split *get_deferred_split_queue(struct folio *folio) { struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); return &pgdat->deferred_split_queue; } #endif void folio_prep_large_rmappable(struct folio *folio) { VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); INIT_LIST_HEAD(&folio->_deferred_list); folio_set_large_rmappable(folio); } static inline bool is_transparent_hugepage(struct folio *folio) { if (!folio_test_large(folio)) return false; return is_huge_zero_page(&folio->page) || folio_test_large_rmappable(folio); } static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, loff_t off, unsigned long flags, unsigned long size) { loff_t off_end = off + len; loff_t off_align = round_up(off, size); unsigned long len_pad, ret; if (off_end <= off_align || (off_end - off_align) < size) return 0; len_pad = len + size; if (len_pad < len || (off + len_pad) < off) return 0; ret = current->mm->get_unmapped_area(filp, addr, len_pad, off >> PAGE_SHIFT, flags); /* * The failure might be due to length padding. The caller will retry * without the padding. */ if (IS_ERR_VALUE(ret)) return 0; /* * Do not try to align to THP boundary if allocation at the address * hint succeeds. */ if (ret == addr) return addr; ret += (off - ret) & (size - 1); return ret; } unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); if (ret) return ret; return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page, gfp_t gfp) { struct vm_area_struct *vma = vmf->vma; struct folio *folio = page_folio(page); pgtable_t pgtable; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; vm_fault_t ret = 0; VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { folio_put(folio); count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK_CHARGE); return VM_FAULT_FALLBACK; } folio_throttle_swaprate(folio, gfp); pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) { ret = VM_FAULT_OOM; goto release; } clear_huge_page(page, vmf->address, HPAGE_PMD_NR); /* * The memory barrier inside __folio_mark_uptodate makes sure that * clear_huge_page writes become visible before the set_pmd_at() * write. */ __folio_mark_uptodate(folio); vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_none(*vmf->pmd))) { goto unlock_release; } else { pmd_t entry; ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock_release; /* Deliver the page fault to userland */ if (userfaultfd_missing(vma)) { spin_unlock(vmf->ptl); folio_put(folio); pte_free(vma->vm_mm, pgtable); ret = handle_userfault(vmf, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); folio_add_new_anon_rmap(folio, vma, haddr); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); mm_inc_nr_ptes(vma->vm_mm); spin_unlock(vmf->ptl); count_vm_event(THP_FAULT_ALLOC); count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); } return 0; unlock_release: spin_unlock(vmf->ptl); release: if (pgtable) pte_free(vma->vm_mm, pgtable); folio_put(folio); return ret; } /* * always: directly stall for all thp allocations * defer: wake kswapd and fail if not immediately available * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise * fail if not immediately available * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately * available * never: never stall for any thp allocation */ gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) { const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE); /* Always do synchronous compaction */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); /* Kick kcompactd and fail quickly */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; /* Synchronous compaction if madvised, otherwise kick kcompactd */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : __GFP_KSWAPD_RECLAIM); /* Only do synchronous compaction if madvised */ if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); return GFP_TRANSHUGE_LIGHT; } /* Caller must hold page table lock. */ static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) { pmd_t entry; if (!pmd_none(*pmd)) return; entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); mm_inc_nr_ptes(mm); } vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; gfp_t gfp; struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; if (!transhuge_vma_suitable(vma, haddr)) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; khugepaged_enter_vma(vma, vma->vm_flags); if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm) && transparent_hugepage_use_zero_page()) { pgtable_t pgtable; struct page *zero_page; vm_fault_t ret; pgtable = pte_alloc_one(vma->vm_mm); if (unlikely(!pgtable)) return VM_FAULT_OOM; zero_page = mm_get_huge_zero_page(vma->vm_mm); if (unlikely(!zero_page)) { pte_free(vma->vm_mm, pgtable); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); ret = 0; if (pmd_none(*vmf->pmd)) { ret = check_stable_address_space(vma->vm_mm); if (ret) { spin_unlock(vmf->ptl); pte_free(vma->vm_mm, pgtable); } else if (userfaultfd_missing(vma)) { spin_unlock(vmf->ptl); pte_free(vma->vm_mm, pgtable); ret = handle_userfault(vmf, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); } else { set_huge_zero_page(pgtable, vma->vm_mm, vma, haddr, vmf->pmd, zero_page); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); } } else { spin_unlock(vmf->ptl); pte_free(vma->vm_mm, pgtable); } return ret; } gfp = vma_thp_gfp_mask(vma); folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); if (unlikely(!folio)) { count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); } static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) { struct mm_struct *mm = vma->vm_mm; pmd_t entry; spinlock_t *ptl; ptl = pmd_lock(mm, pmd); if (!pmd_none(*pmd)) { if (write) { if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); goto out_unlock; } entry = pmd_mkyoung(*pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) update_mmu_cache_pmd(vma, addr, pmd); } goto out_unlock; } entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); if (pfn_t_devmap(pfn)) entry = pmd_mkdevmap(entry); if (write) { entry = pmd_mkyoung(pmd_mkdirty(entry)); entry = maybe_pmd_mkwrite(entry, vma); } if (pgtable) { pgtable_trans_huge_deposit(mm, pmd, pgtable); mm_inc_nr_ptes(mm); pgtable = NULL; } set_pmd_at(mm, addr, pmd, entry); update_mmu_cache_pmd(vma, addr, pmd); out_unlock: spin_unlock(ptl); if (pgtable) pte_free(mm, pgtable); } /** * vmf_insert_pfn_pmd - insert a pmd size pfn * @vmf: Structure describing the fault * @pfn: pfn to insert * @write: whether it's a write fault * * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. * * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) { unsigned long addr = vmf->address & PMD_MASK; struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; pgtable_t pgtable = NULL; /* * If we had pmd_special, we could avoid all these restrictions, * but we need to be consistent with PTEs and architectures that * can't support a 'special' bit. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && !pfn_t_devmap(pfn)); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); if (!pgtable) return VM_FAULT_OOM; } track_pfn_insert(vma, &pgprot, pfn); insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) pud = pud_mkwrite(pud); return pud; } static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, pfn_t pfn, bool write) { struct mm_struct *mm = vma->vm_mm; pgprot_t prot = vma->vm_page_prot; pud_t entry; spinlock_t *ptl; ptl = pud_lock(mm, pud); if (!pud_none(*pud)) { if (write) { if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_huge_zero_pud(*pud)); goto out_unlock; } entry = pud_mkyoung(*pud); entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma); if (pudp_set_access_flags(vma, addr, pud, entry, 1)) update_mmu_cache_pud(vma, addr, pud); } goto out_unlock; } entry = pud_mkhuge(pfn_t_pud(pfn, prot)); if (pfn_t_devmap(pfn)) entry = pud_mkdevmap(entry); if (write) { entry = pud_mkyoung(pud_mkdirty(entry)); entry = maybe_pud_mkwrite(entry, vma); } set_pud_at(mm, addr, pud, entry); update_mmu_cache_pud(vma, addr, pud); out_unlock: spin_unlock(ptl); } /** * vmf_insert_pfn_pud - insert a pud size pfn * @vmf: Structure describing the fault * @pfn: pfn to insert * @write: whether it's a write fault * * Insert a pud size pfn. See vmf_insert_pfn() for additional info. * * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) { unsigned long addr = vmf->address & PUD_MASK; struct vm_area_struct *vma = vmf->vma; pgprot_t pgprot = vma->vm_page_prot; /* * If we had pud_special, we could avoid all these restrictions, * but we need to be consistent with PTEs and architectures that * can't support a 'special' bit. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && !pfn_t_devmap(pfn)); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; track_pfn_insert(vma, &pgprot, pfn); insert_pfn_pud(vma, addr, vmf->pud, pfn, write); return VM_FAULT_NOPAGE; } EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, bool write) { pmd_t _pmd; _pmd = pmd_mkyoung(*pmd); if (write) _pmd = pmd_mkdirty(_pmd); if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, pmd, _pmd, write)) update_mmu_cache_pmd(vma, addr, pmd); } struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) { unsigned long pfn = pmd_pfn(*pmd); struct mm_struct *mm = vma->vm_mm; struct page *page; int ret; assert_spin_locked(pmd_lockptr(mm, pmd)); if (flags & FOLL_WRITE && !pmd_write(*pmd)) return NULL; if (pmd_present(*pmd) && pmd_devmap(*pmd)) /* pass */; else return NULL; if (flags & FOLL_TOUCH) touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); /* * device mapped pages can only be returned if the * caller will manage the page reference count. */ if (!(flags & (FOLL_GET | FOLL_PIN))) return ERR_PTR(-EEXIST); pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT; *pgmap = get_dev_pagemap(pfn, *pgmap); if (!*pgmap) return ERR_PTR(-EFAULT); page = pfn_to_page(pfn); ret = try_grab_page(page, flags); if (ret) page = ERR_PTR(ret); return page; } int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { spinlock_t *dst_ptl, *src_ptl; struct page *src_page; pmd_t pmd; pgtable_t pgtable = NULL; int ret = -ENOMEM; /* Skip if can be re-fill on fault */ if (!vma_is_anonymous(dst_vma)) return 0; pgtable = pte_alloc_one(dst_mm); if (unlikely(!pgtable)) goto out; dst_ptl = pmd_lock(dst_mm, dst_pmd); src_ptl = pmd_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); ret = -EAGAIN; pmd = *src_pmd; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (unlikely(is_swap_pmd(pmd))) { swp_entry_t entry = pmd_to_swp_entry(pmd); VM_BUG_ON(!is_pmd_migration_entry(pmd)); if (!is_readable_migration_entry(entry)) { entry = make_readable_migration_entry( swp_offset(entry)); pmd = swp_entry_to_pmd(entry); if (pmd_swp_soft_dirty(*src_pmd)) pmd = pmd_swp_mksoft_dirty(pmd); if (pmd_swp_uffd_wp(*src_pmd)) pmd = pmd_swp_mkuffd_wp(pmd); set_pmd_at(src_mm, addr, src_pmd, pmd); } add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); mm_inc_nr_ptes(dst_mm); pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); if (!userfaultfd_wp(dst_vma)) pmd = pmd_swp_clear_uffd_wp(pmd); set_pmd_at(dst_mm, addr, dst_pmd, pmd); ret = 0; goto out_unlock; } #endif if (unlikely(!pmd_trans_huge(pmd))) { pte_free(dst_mm, pgtable); goto out_unlock; } /* * When page table lock is held, the huge zero pmd should not be * under splitting since we don't split the page itself, only pmd to * a page table. */ if (is_huge_zero_pmd(pmd)) { /* * get_huge_zero_page() will never allocate a new page here, * since we already have a zero page to copy. It just takes a * reference. */ mm_get_huge_zero_page(dst_mm); goto out_zero_page; } src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page); get_page(src_page); if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { /* Page maybe pinned: split and retry the fault on PTEs. */ put_page(src_page); pte_free(dst_mm, pgtable); spin_unlock(src_ptl); spin_unlock(dst_ptl); __split_huge_pmd(src_vma, src_pmd, addr, false, NULL); return -EAGAIN; } add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); out_zero_page: mm_inc_nr_ptes(dst_mm); pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); pmdp_set_wrprotect(src_mm, addr, src_pmd); if (!userfaultfd_wp(dst_vma)) pmd = pmd_clear_uffd_wp(pmd); pmd = pmd_mkold(pmd_wrprotect(pmd)); set_pmd_at(dst_mm, addr, dst_pmd, pmd); ret = 0; out_unlock: spin_unlock(src_ptl); spin_unlock(dst_ptl); out: return ret; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static void touch_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, bool write) { pud_t _pud; _pud = pud_mkyoung(*pud); if (write) _pud = pud_mkdirty(_pud); if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, pud, _pud, write)) update_mmu_cache_pud(vma, addr, pud); } struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) { unsigned long pfn = pud_pfn(*pud); struct mm_struct *mm = vma->vm_mm; struct page *page; int ret; assert_spin_locked(pud_lockptr(mm, pud)); if (flags & FOLL_WRITE && !pud_write(*pud)) return NULL; if (pud_present(*pud) && pud_devmap(*pud)) /* pass */; else return NULL; if (flags & FOLL_TOUCH) touch_pud(vma, addr, pud, flags & FOLL_WRITE); /* * device mapped pages can only be returned if the * caller will manage the page reference count. * * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: */ if (!(flags & (FOLL_GET | FOLL_PIN))) return ERR_PTR(-EEXIST); pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; *pgmap = get_dev_pagemap(pfn, *pgmap); if (!*pgmap) return ERR_PTR(-EFAULT); page = pfn_to_page(pfn); ret = try_grab_page(page, flags); if (ret) page = ERR_PTR(ret); return page; } int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma) { spinlock_t *dst_ptl, *src_ptl; pud_t pud; int ret; dst_ptl = pud_lock(dst_mm, dst_pud); src_ptl = pud_lockptr(src_mm, src_pud); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); ret = -EAGAIN; pud = *src_pud; if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud))) goto out_unlock; /* * When page table lock is held, the huge zero pud should not be * under splitting since we don't split the page itself, only pud to * a page table. */ if (is_huge_zero_pud(pud)) { /* No huge zero pud yet */ } /* * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() * and split if duplicating fails. */ pudp_set_wrprotect(src_mm, addr, src_pud); pud = pud_mkold(pud_wrprotect(pud)); set_pud_at(dst_mm, addr, dst_pud, pud); ret = 0; out_unlock: spin_unlock(src_ptl); spin_unlock(dst_ptl); return ret; } void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) { bool write = vmf->flags & FAULT_FLAG_WRITE; vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); if (unlikely(!pud_same(*vmf->pud, orig_pud))) goto unlock; touch_pud(vmf->vma, vmf->address, vmf->pud, write); unlock: spin_unlock(vmf->ptl); } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ void huge_pmd_set_accessed(struct vm_fault *vmf) { bool write = vmf->flags & FAULT_FLAG_WRITE; vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd))) goto unlock; touch_pmd(vmf->vma, vmf->address, vmf->pmd, write); unlock: spin_unlock(vmf->ptl); } vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) { const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; struct vm_area_struct *vma = vmf->vma; struct folio *folio; struct page *page; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; pmd_t orig_pmd = vmf->orig_pmd; vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); VM_BUG_ON_VMA(!vma->anon_vma, vma); if (is_huge_zero_pmd(orig_pmd)) goto fallback; spin_lock(vmf->ptl); if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { spin_unlock(vmf->ptl); return 0; } page = pmd_page(orig_pmd); folio = page_folio(page); VM_BUG_ON_PAGE(!PageHead(page), page); /* Early check when only holding the PT lock. */ if (PageAnonExclusive(page)) goto reuse; if (!folio_trylock(folio)) { folio_get(folio); spin_unlock(vmf->ptl); folio_lock(folio); spin_lock(vmf->ptl); if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { spin_unlock(vmf->ptl); folio_unlock(folio); folio_put(folio); return 0; } folio_put(folio); } /* Recheck after temporarily dropping the PT lock. */ if (PageAnonExclusive(page)) { folio_unlock(folio); goto reuse; } /* * See do_wp_page(): we can only reuse the folio exclusively if * there are no additional references. Note that we always drain * the LRU cache immediately after adding a THP. */ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) goto unlock_fallback; if (folio_test_swapcache(folio)) folio_free_swap(folio); if (folio_ref_count(folio) == 1) { pmd_t entry; page_move_anon_rmap(page, vma); folio_unlock(folio); reuse: if (unlikely(unshare)) { spin_unlock(vmf->ptl); return 0; } entry = pmd_mkyoung(orig_pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); return 0; } unlock_fallback: folio_unlock(folio); spin_unlock(vmf->ptl); fallback: __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; } static inline bool can_change_pmd_writable(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) { struct page *page; if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) return false; /* Don't touch entries that are not even readable (NUMA hinting). */ if (pmd_protnone(pmd)) return false; /* Do we need write faults for softdirty tracking? */ if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) return false; /* Do we need write faults for uffd-wp tracking? */ if (userfaultfd_huge_pmd_wp(vma, pmd)) return false; if (!(vma->vm_flags & VM_SHARED)) { /* See can_change_pte_writable(). */ page = vm_normal_page_pmd(vma, addr, pmd); return page && PageAnon(page) && PageAnonExclusive(page); } /* See can_change_pte_writable(). */ return pmd_dirty(pmd); } /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) { /* If the pmd is writable, we can write to the page. */ if (pmd_write(pmd)) return true; /* Maybe FOLL_FORCE is set to override it? */ if (!(flags & FOLL_FORCE)) return false; /* But FOLL_FORCE has no effect on shared mappings */ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) return false; /* ... or read-only private ones */ if (!(vma->vm_flags & VM_MAYWRITE)) return false; /* ... or already writable ones that just need to take a write fault */ if (vma->vm_flags & VM_WRITE) return false; /* * See can_change_pte_writable(): we broke COW and could map the page * writable if we have an exclusive anonymous page ... */ if (!page || !PageAnon(page) || !PageAnonExclusive(page)) return false; /* ... and a write-fault isn't required for other reasons. */ if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd)) return false; return !userfaultfd_huge_pmd_wp(vma, pmd); } struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) { struct mm_struct *mm = vma->vm_mm; struct page *page; int ret; assert_spin_locked(pmd_lockptr(mm, pmd)); page = pmd_page(*pmd); VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page); if ((flags & FOLL_WRITE) && !can_follow_write_pmd(*pmd, page, vma, flags)) return NULL; /* Avoid dumping huge zero page */ if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) return ERR_PTR(-EFAULT); if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags)) return NULL; if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page)) return ERR_PTR(-EMLINK); VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && !PageAnonExclusive(page), page); ret = try_grab_page(page, flags); if (ret) return ERR_PTR(ret); if (flags & FOLL_TOUCH) touch_pmd(vma, addr, pmd, flags & FOLL_WRITE); page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page); return page; } /* NUMA hinting page fault entry point for trans huge pmds */ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pmd_t oldpmd = vmf->orig_pmd; pmd_t pmd; struct page *page; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; int page_nid = NUMA_NO_NODE; int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); bool migrated = false, writable = false; int flags = 0; vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { spin_unlock(vmf->ptl); goto out; } pmd = pmd_modify(oldpmd, vma->vm_page_prot); /* * Detect now whether the PMD could be writable; this information * is only valid while holding the PT lock. */ writable = pmd_write(pmd); if (!writable && vma_wants_manual_pte_write_upgrade(vma) && can_change_pmd_writable(vma, vmf->address, pmd)) writable = true; page = vm_normal_page_pmd(vma, haddr, pmd); if (!page) goto out_map; /* See similar comment in do_numa_page for explanation */ if (!writable) flags |= TNF_NO_GROUP; page_nid = page_to_nid(page); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ if (node_is_toptier(page_nid)) last_cpupid = page_cpupid_last(page); target_nid = numa_migrate_prep(page, vma, haddr, page_nid, &flags); if (target_nid == NUMA_NO_NODE) { put_page(page); goto out_map; } spin_unlock(vmf->ptl); writable = false; migrated = migrate_misplaced_page(page, vma, target_nid); if (migrated) { flags |= TNF_MIGRATED; page_nid = target_nid; } else { flags |= TNF_MIGRATE_FAIL; vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { spin_unlock(vmf->ptl); goto out; } goto out_map; } out: if (page_nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); return 0; out_map: /* Restore the PMD */ pmd = pmd_modify(oldpmd, vma->vm_page_prot); pmd = pmd_mkyoung(pmd); if (writable) pmd = pmd_mkwrite(pmd, vma); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); goto out; } /* * Return true if we do MADV_FREE successfully on entire pmd page. * Otherwise, return false. */ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next) { spinlock_t *ptl; pmd_t orig_pmd; struct folio *folio; struct mm_struct *mm = tlb->mm; bool ret = false; tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) goto out_unlocked; orig_pmd = *pmd; if (is_huge_zero_pmd(orig_pmd)) goto out; if (unlikely(!pmd_present(orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(orig_pmd)); goto out; } folio = pfn_folio(pmd_pfn(orig_pmd)); /* * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ if (folio_estimated_sharers(folio) != 1) goto out; if (!folio_trylock(folio)) goto out; /* * If user want to discard part-pages of THP, split it so MADV_FREE * will deactivate only them. */ if (next - addr != HPAGE_PMD_SIZE) { folio_get(folio); spin_unlock(ptl); split_folio(folio); folio_unlock(folio); folio_put(folio); goto out_unlocked; } if (folio_test_dirty(folio)) folio_clear_dirty(folio); folio_unlock(folio); if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { pmdp_invalidate(vma, addr, pmd); orig_pmd = pmd_mkold(orig_pmd); orig_pmd = pmd_mkclean(orig_pmd); set_pmd_at(mm, addr, pmd, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); } folio_mark_lazyfree(folio); ret = true; out: spin_unlock(ptl); out_unlocked: return ret; } static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) { pgtable_t pgtable; pgtable = pgtable_trans_huge_withdraw(mm, pmd); pte_free(mm, pgtable); mm_dec_nr_ptes(mm); } int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { pmd_t orig_pmd; spinlock_t *ptl; tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) return 0; /* * For architectures like ppc64 we look at deposited pgtable * when calling pmdp_huge_get_and_clear. So do the * pgtable_trans_huge_withdraw after finishing pmdp related * operations. */ orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd, tlb->fullmm); arch_check_zapped_pmd(vma, orig_pmd); tlb_remove_pmd_tlb_entry(tlb, pmd, addr); if (vma_is_special_huge(vma)) { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else if (is_huge_zero_pmd(orig_pmd)) { zap_deposited_table(tlb->mm, pmd); spin_unlock(ptl); } else { struct page *page = NULL; int flush_needed = 1; if (pmd_present(orig_pmd)) { page = pmd_page(orig_pmd); page_remove_rmap(page, vma, true); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(!PageHead(page), page); } else if (thp_migration_supported()) { swp_entry_t entry; VM_BUG_ON(!is_pmd_migration_entry(orig_pmd)); entry = pmd_to_swp_entry(orig_pmd); page = pfn_swap_entry_to_page(entry); flush_needed = 0; } else WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); if (PageAnon(page)) { zap_deposited_table(tlb->mm, pmd); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); } else { if (arch_needs_pgtable_deposit()) zap_deposited_table(tlb->mm, pmd); add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); } spin_unlock(ptl); if (flush_needed) tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); } return 1; } #ifndef pmd_move_must_withdraw static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, spinlock_t *old_pmd_ptl, struct vm_area_struct *vma) { /* * With split pmd lock we also need to move preallocated * PTE page table if new_pmd is on different PMD page table. * * We also don't deposit and withdraw tables for file pages. */ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); } #endif static pmd_t move_soft_dirty_pmd(pmd_t pmd) { #ifdef CONFIG_MEM_SOFT_DIRTY if (unlikely(is_pmd_migration_entry(pmd))) pmd = pmd_swp_mksoft_dirty(pmd); else if (pmd_present(pmd)) pmd = pmd_mksoft_dirty(pmd); #endif return pmd; } bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; pmd_t pmd; struct mm_struct *mm = vma->vm_mm; bool force_flush = false; /* * The destination pmd shouldn't be established, free_pgtables() * should have released it; but move_page_tables() might have already * inserted a page table, if racing against shmem/file collapse. */ if (!pmd_none(*new_pmd)) { VM_BUG_ON(pmd_trans_huge(*new_pmd)); return false; } /* * We don't have to worry about the ordering of src and dst * ptlocks because exclusive mmap_lock prevents deadlock. */ old_ptl = __pmd_trans_huge_lock(old_pmd, vma); if (old_ptl) { new_ptl = pmd_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); if (pmd_present(pmd)) force_flush = true; VM_BUG_ON(!pmd_none(*new_pmd)); if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { pgtable_t pgtable; pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); pgtable_trans_huge_deposit(mm, new_pmd, pgtable); } pmd = move_soft_dirty_pmd(pmd); set_pmd_at(mm, new_addr, new_pmd, pmd); if (force_flush) flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE); if (new_ptl != old_ptl) spin_unlock(new_ptl); spin_unlock(old_ptl); return true; } return false; } /* * Returns * - 0 if PMD could not be locked * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary * or if prot_numa but THP migration is not supported * - HPAGE_PMD_NR if protections changed and TLB flush necessary */ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; pmd_t oldpmd, entry; bool prot_numa = cp_flags & MM_CP_PROT_NUMA; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; int ret = 1; tlb_change_page_size(tlb, HPAGE_PMD_SIZE); if (prot_numa && !thp_migration_supported()) return 1; ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) return 0; #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (is_swap_pmd(*pmd)) { swp_entry_t entry = pmd_to_swp_entry(*pmd); struct page *page = pfn_swap_entry_to_page(entry); pmd_t newpmd; VM_BUG_ON(!is_pmd_migration_entry(*pmd)); if (is_writable_migration_entry(entry)) { /* * A protection check is difficult so * just be safe and disable write */ if (PageAnon(page)) entry = make_readable_exclusive_migration_entry(swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); newpmd = swp_entry_to_pmd(entry); if (pmd_swp_soft_dirty(*pmd)) newpmd = pmd_swp_mksoft_dirty(newpmd); } else { newpmd = *pmd; } if (uffd_wp) newpmd = pmd_swp_mkuffd_wp(newpmd); else if (uffd_wp_resolve) newpmd = pmd_swp_clear_uffd_wp(newpmd); if (!pmd_same(*pmd, newpmd)) set_pmd_at(mm, addr, pmd, newpmd); goto unlock; } #endif if (prot_numa) { struct page *page; bool toptier; /* * Avoid trapping faults against the zero page. The read-only * data is likely to be read-cached on the local CPU and * local/remote hits to the zero page are not interesting. */ if (is_huge_zero_pmd(*pmd)) goto unlock; if (pmd_protnone(*pmd)) goto unlock; page = pmd_page(*pmd); toptier = node_is_toptier(page_to_nid(page)); /* * Skip scanning top tier node if normal numa * balancing is disabled */ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier) goto unlock; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) xchg_page_access_time(page, jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical * to not clear pmd intermittently to avoid race with MADV_DONTNEED * which is also under mmap_read_lock(mm): * * CPU0: CPU1: * change_huge_pmd(prot_numa=1) * pmdp_huge_get_and_clear_notify() * madvise_dontneed() * zap_pmd_range() * pmd_trans_huge(*pmd) == 0 (without ptl) * // skip the pmd * set_pmd_at(); * // pmd is re-established * * The race makes MADV_DONTNEED miss the huge pmd and don't clear it * which may break userspace. * * pmdp_invalidate_ad() is required to make sure we don't miss * dirty/young flags set by hardware. */ oldpmd = pmdp_invalidate_ad(vma, addr, pmd); entry = pmd_modify(oldpmd, newprot); if (uffd_wp) entry = pmd_mkuffd_wp(entry); else if (uffd_wp_resolve) /* * Leave the write bit to be handled by PF interrupt * handler, then things like COW could be properly * handled. */ entry = pmd_clear_uffd_wp(entry); /* See change_pte_range(). */ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) && can_change_pmd_writable(vma, addr, entry)) entry = pmd_mkwrite(entry, vma); ret = HPAGE_PMD_NR; set_pmd_at(mm, addr, pmd, entry); if (huge_pmd_needs_flush(oldpmd, entry)) tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE); unlock: spin_unlock(ptl); return ret; } /* * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. * * Note that if it returns page table lock pointer, this routine returns without * unlocking page table lock. So callers must unlock it. */ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { spinlock_t *ptl; ptl = pmd_lock(vma->vm_mm, pmd); if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) return ptl; spin_unlock(ptl); return NULL; } /* * Returns page table lock pointer if a given pud maps a thp, NULL otherwise. * * Note that if it returns page table lock pointer, this routine returns without * unlocking page table lock. So callers must unlock it. */ spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { spinlock_t *ptl; ptl = pud_lock(vma->vm_mm, pud); if (likely(pud_trans_huge(*pud) || pud_devmap(*pud))) return ptl; spin_unlock(ptl); return NULL; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr) { spinlock_t *ptl; ptl = __pud_trans_huge_lock(pud, vma); if (!ptl) return 0; pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm); tlb_remove_pud_tlb_entry(tlb, pud, addr); if (vma_is_special_huge(vma)) { spin_unlock(ptl); /* No zero page support yet */ } else { /* No support for anonymous PUD pages yet */ BUG(); } return 1; } static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, unsigned long haddr) { VM_BUG_ON(haddr & ~HPAGE_PUD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud)); count_vm_event(THP_SPLIT_PUD); pudp_huge_clear_flush(vma, haddr, pud); } void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address) { spinlock_t *ptl; struct mmu_notifier_range range; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address & HPAGE_PUD_MASK, (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE); mmu_notifier_invalidate_range_start(&range); ptl = pud_lock(vma->vm_mm, pud); if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud))) goto out; __split_huge_pud_locked(vma, pud, range.start); out: spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); } #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) { struct mm_struct *mm = vma->vm_mm; pgtable_t pgtable; pmd_t _pmd, old_pmd; unsigned long addr; pte_t *pte; int i; /* * Leave pmd empty until pte is filled note that it is fine to delay * notification until mmu_notifier_invalidate_range_end() as we are * replacing a zero pmd write protected page with a zero pte write * protected page. * * See Documentation/mm/mmu_notifier.rst */ old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte); for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { pte_t entry; entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); entry = pte_mkspecial(entry); if (pmd_uffd_wp(old_pmd)) entry = pte_mkuffd_wp(entry); VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } pte_unmap(pte - 1); smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); } static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) { struct mm_struct *mm = vma->vm_mm; struct page *page; pgtable_t pgtable; pmd_t old_pmd, _pmd; bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; bool anon_exclusive = false, dirty = false; unsigned long addr; pte_t *pte; int i; VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); VM_BUG_ON_VMA(vma->vm_start > haddr, vma); VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); count_vm_event(THP_SPLIT_PMD); if (!vma_is_anonymous(vma)) { old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); /* * We are going to unmap this huge page. So * just go ahead and zap it */ if (arch_needs_pgtable_deposit()) zap_deposited_table(mm, pmd); if (vma_is_special_huge(vma)) return; if (unlikely(is_pmd_migration_entry(old_pmd))) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); page = pfn_swap_entry_to_page(entry); } else { page = pmd_page(old_pmd); if (!PageDirty(page) && pmd_dirty(old_pmd)) set_page_dirty(page); if (!PageReferenced(page) && pmd_young(old_pmd)) SetPageReferenced(page); page_remove_rmap(page, vma, true); put_page(page); } add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; } if (is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below * inside __split_huge_pmd() ? * * We are going from a zero huge page write protected to zero * small page also write protected so it does not seems useful * to invalidate secondary mmu at this time. */ return __split_huge_zero_page_pmd(vma, haddr, pmd); } /* * Up to this point the pmd is present and huge and userland has the * whole access to the hugepage during the split (which happens in * place). If we overwrite the pmd with the not-huge version pointing * to the pte here (which of course we could if all CPUs were bug * free), userland could trigger a small page size TLB miss on the * small sized TLB while the hugepage TLB entry is still established in * the huge TLB. Some CPU doesn't like that. * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum * 383 on page 105. Intel should be safe but is also warns that it's * only safe if the permission and cache attributes of the two entries * loaded in the two TLB is identical (which should be the case here). * But it is generally safer to never allow small and huge TLB entries * for the same virtual address to be loaded simultaneously. So instead * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the * current pmd notpresent (atomically because here the pmd_trans_huge * must remain set at all times on the pmd until the split is complete * for this pmd), then we flush the SMP TLB and finally we write the * non-huge version of the pmd entry with pmd_populate. */ old_pmd = pmdp_invalidate(vma, haddr, pmd); pmd_migration = is_pmd_migration_entry(old_pmd); if (unlikely(pmd_migration)) { swp_entry_t entry; entry = pmd_to_swp_entry(old_pmd); page = pfn_swap_entry_to_page(entry); write = is_writable_migration_entry(entry); if (PageAnon(page)) anon_exclusive = is_readable_exclusive_migration_entry(entry); young = is_migration_entry_young(entry); dirty = is_migration_entry_dirty(entry); soft_dirty = pmd_swp_soft_dirty(old_pmd); uffd_wp = pmd_swp_uffd_wp(old_pmd); } else { page = pmd_page(old_pmd); if (pmd_dirty(old_pmd)) { dirty = true; SetPageDirty(page); } write = pmd_write(old_pmd); young = pmd_young(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd); uffd_wp = pmd_uffd_wp(old_pmd); VM_BUG_ON_PAGE(!page_count(page), page); /* * Without "freeze", we'll simply split the PMD, propagating the * PageAnonExclusive() flag for each PTE by setting it for * each subpage -- no need to (temporarily) clear. * * With "freeze" we want to replace mapped pages by * migration entries right away. This is only possible if we * managed to clear PageAnonExclusive() -- see * set_pmd_migration_entry(). * * In case we cannot clear PageAnonExclusive(), split the PMD * only and let try_to_migrate_one() fail later. * * See page_try_share_anon_rmap(): invalidate PMD first. */ anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) freeze = false; if (!freeze) page_ref_add(page, HPAGE_PMD_NR - 1); } /* * Withdraw the table only after we mark the pmd entry invalid. * This's critical for some architectures (Power). */ pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte); for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { pte_t entry; /* * Note that NUMA hinting access restrictions are not * transferred to avoid any possibility of altering * permissions across VMAs. */ if (freeze || pmd_migration) { swp_entry_t swp_entry; if (write) swp_entry = make_writable_migration_entry( page_to_pfn(page + i)); else if (anon_exclusive) swp_entry = make_readable_exclusive_migration_entry( page_to_pfn(page + i)); else swp_entry = make_readable_migration_entry( page_to_pfn(page + i)); if (young) swp_entry = make_migration_entry_young(swp_entry); if (dirty) swp_entry = make_migration_entry_dirty(swp_entry); entry = swp_entry_to_pte(swp_entry); if (soft_dirty) entry = pte_swp_mksoft_dirty(entry); if (uffd_wp) entry = pte_swp_mkuffd_wp(entry); } else { entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); if (write) entry = pte_mkwrite(entry, vma); if (anon_exclusive) SetPageAnonExclusive(page + i); if (!young) entry = pte_mkold(entry); /* NOTE: this may set soft-dirty too on some archs */ if (dirty) entry = pte_mkdirty(entry); if (soft_dirty) entry = pte_mksoft_dirty(entry); if (uffd_wp) entry = pte_mkuffd_wp(entry); page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); } VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } pte_unmap(pte - 1); if (!pmd_migration) page_remove_rmap(page, vma, true); if (freeze) put_page(page); smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); } void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) { spinlock_t *ptl; struct mmu_notifier_range range; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address & HPAGE_PMD_MASK, (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); ptl = pmd_lock(vma->vm_mm, pmd); /* * If caller asks to setup a migration entry, we need a folio to check * pmd against. Otherwise we can end up replacing wrong folio. */ VM_BUG_ON(freeze && !folio); VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)) { /* * It's safe to call pmd_page when folio is set because it's * guaranteed that pmd is present. */ if (folio && folio != page_folio(pmd_page(*pmd))) goto out; __split_huge_pmd_locked(vma, pmd, range.start, freeze); } out: spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); } void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, bool freeze, struct folio *folio) { pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); if (!pmd) return; __split_huge_pmd(vma, pmd, address, freeze, folio); } static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address) { /* * If the new address isn't hpage aligned and it could previously * contain an hugepage: check if we need to split an huge pmd. */ if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) && range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE), ALIGN(address, HPAGE_PMD_SIZE))) split_huge_pmd_address(vma, address, false, NULL); } void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) { /* Check if we need to split start first. */ split_huge_pmd_if_needed(vma, start); /* Check if we need to split end next. */ split_huge_pmd_if_needed(vma, end); /* * If we're also updating the next vma vm_start, * check if we need to split it. */ if (adjust_next > 0) { struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end); unsigned long nstart = next->vm_start; nstart += adjust_next; split_huge_pmd_if_needed(next, nstart); } } static void unmap_folio(struct folio *folio) { enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC; VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); /* * Anon pages need migration entries to preserve them, but file * pages can simply be left unmapped, then faulted back on demand. * If that is ever changed (perhaps for mlock), update remap_page(). */ if (folio_test_anon(folio)) try_to_migrate(folio, ttu_flags); else try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK); } static void remap_page(struct folio *folio, unsigned long nr) { int i = 0; /* If unmap_folio() uses try_to_migrate() on file, remove this check */ if (!folio_test_anon(folio)) return; for (;;) { remove_migration_ptes(folio, folio, true); i += folio_nr_pages(folio); if (i >= nr) break; folio = folio_next(folio); } } static void lru_add_page_tail(struct page *head, struct page *tail, struct lruvec *lruvec, struct list_head *list) { VM_BUG_ON_PAGE(!PageHead(head), head); VM_BUG_ON_PAGE(PageCompound(tail), head); VM_BUG_ON_PAGE(PageLRU(tail), head); lockdep_assert_held(&lruvec->lru_lock); if (list) { /* page reclaim is reclaiming a huge page */ VM_WARN_ON(PageLRU(head)); get_page(tail); list_add_tail(&tail->lru, list); } else { /* head is still on lru (and we have it frozen) */ VM_WARN_ON(!PageLRU(head)); if (PageUnevictable(tail)) tail->mlock_count = 0; else list_add_tail(&tail->lru, &head->lru); SetPageLRU(tail); } } static void __split_huge_page_tail(struct folio *folio, int tail, struct lruvec *lruvec, struct list_head *list) { struct page *head = &folio->page; struct page *page_tail = head + tail; /* * Careful: new_folio is not a "real" folio before we cleared PageTail. * Don't pass it around before clear_compound_head(). */ struct folio *new_folio = (struct folio *)page_tail; VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); /* * Clone page flags before unfreezing refcount. * * After successful get_page_unless_zero() might follow flags change, * for example lock_page() which set PG_waiters. * * Note that for mapped sub-pages of an anonymous THP, * PG_anon_exclusive has been cleared in unmap_folio() and is stored in * the migration entry instead from where remap_page() will restore it. * We can still have PG_anon_exclusive set on effectively unmapped and * unreferenced sub-pages of an anonymous THP: we can simply drop * PG_anon_exclusive (-> PG_mappedtodisk) for these here. */ page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (head->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | (1L << PG_swapcache) | (1L << PG_mlocked) | (1L << PG_uptodate) | (1L << PG_active) | (1L << PG_workingset) | (1L << PG_locked) | (1L << PG_unevictable) | #ifdef CONFIG_ARCH_USES_PG_ARCH_X (1L << PG_arch_2) | (1L << PG_arch_3) | #endif (1L << PG_dirty) | LRU_GEN_MASK | LRU_REFS_MASK)); /* ->mapping in first and second tail page is replaced by other uses */ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, page_tail); page_tail->mapping = head->mapping; page_tail->index = head->index + tail; /* * page->private should not be set in tail pages. Fix up and warn once * if private is unexpectedly set. */ if (unlikely(page_tail->private)) { VM_WARN_ON_ONCE_PAGE(true, page_tail); page_tail->private = 0; } if (folio_test_swapcache(folio)) new_folio->swap.val = folio->swap.val + tail; /* Page flags must be visible before we make the page non-compound. */ smp_wmb(); /* * Clear PageTail before unfreezing page refcount. * * After successful get_page_unless_zero() might follow put_page() * which needs correct compound_head(). */ clear_compound_head(page_tail); /* Finally unfreeze refcount. Additional reference from page cache. */ page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || PageSwapCache(head))); if (page_is_young(head)) set_page_young(page_tail); if (page_is_idle(head)) set_page_idle(page_tail); page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); /* * always add to the tail because some iterators expect new * pages to show after the currently processed elements - e.g. * migrate_pages */ lru_add_page_tail(head, page_tail, lruvec, list); } static void __split_huge_page(struct page *page, struct list_head *list, pgoff_t end) { struct folio *folio = page_folio(page); struct page *head = &folio->page; struct lruvec *lruvec; struct address_space *swap_cache = NULL; unsigned long offset = 0; unsigned int nr = thp_nr_pages(head); int i, nr_dropped = 0; /* complete memcg works before add pages to LRU */ split_page_memcg(head, nr); if (folio_test_anon(folio) && folio_test_swapcache(folio)) { offset = swp_offset(folio->swap); swap_cache = swap_address_space(folio->swap); xa_lock(&swap_cache->i_pages); } /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = folio_lruvec_lock(folio); ClearPageHasHWPoisoned(head); for (i = nr - 1; i >= 1; i--) { __split_huge_page_tail(folio, i, lruvec, list); /* Some pages can be beyond EOF: drop them from page cache */ if (head[i].index >= end) { struct folio *tail = page_folio(head + i); if (shmem_mapping(head->mapping)) nr_dropped++; else if (folio_test_clear_dirty(tail)) folio_account_cleaned(tail, inode_to_wb(folio->mapping->host)); __filemap_remove_folio(tail, NULL); folio_put(tail); } else if (!PageAnon(page)) { __xa_store(&head->mapping->i_pages, head[i].index, head + i, 0); } else if (swap_cache) { __xa_store(&swap_cache->i_pages, offset + i, head + i, 0); } } ClearPageCompound(head); unlock_page_lruvec(lruvec); /* Caller disabled irqs, so they are still disabled here */ split_page_owner(head, nr); /* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { /* Additional pin to swap cache */ if (PageSwapCache(head)) { page_ref_add(head, 2); xa_unlock(&swap_cache->i_pages); } else { page_ref_inc(head); } } else { /* Additional pin to page cache */ page_ref_add(head, 2); xa_unlock(&head->mapping->i_pages); } local_irq_enable(); if (nr_dropped) shmem_uncharge(head->mapping->host, nr_dropped); remap_page(folio, nr); if (folio_test_swapcache(folio)) split_swap_cluster(folio->swap); for (i = 0; i < nr; i++) { struct page *subpage = head + i; if (subpage == page) continue; unlock_page(subpage); /* * Subpages may be freed if there wasn't any mapping * like if add_to_swap() is running on a lru page that * had its mapping zapped. And freeing these pages * requires taking the lru_lock so we do the put_page * of the tail pages after the split is complete. */ free_page_and_swap_cache(subpage); } } /* Racy check whether the huge page can be split */ bool can_split_folio(struct folio *folio, int *pextra_pins) { int extra_pins; /* Additional pins from page cache */ if (folio_test_anon(folio)) extra_pins = folio_test_swapcache(folio) ? folio_nr_pages(folio) : 0; else extra_pins = folio_nr_pages(folio); if (pextra_pins) *pextra_pins = extra_pins; return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1; } /* * This function splits huge page into normal pages. @page can point to any * subpage of huge page to split. Split doesn't change the position of @page. * * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. * The huge page must be locked. * * If @list is null, tail pages will be added to LRU list, otherwise, to @list. * * Both head page and tail pages will inherit mapping, flags, and so on from * the hugepage. * * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if * they are not mapped. * * Returns 0 if the hugepage is split successfully. * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under * us. */ int split_huge_page_to_list(struct page *page, struct list_head *list) { struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); XA_STATE(xas, &folio->mapping->i_pages, folio->index); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; int extra_pins, ret; pgoff_t end; bool is_hzp; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); is_hzp = is_huge_zero_page(&folio->page); if (is_hzp) { pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); return -EBUSY; } if (folio_test_writeback(folio)) return -EBUSY; if (folio_test_anon(folio)) { /* * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a * reference to it and then lock the anon_vma for write. This * is similar to folio_lock_anon_vma_read except the write lock * is taken to serialise against parallel split or collapse * operations. */ anon_vma = folio_get_anon_vma(folio); if (!anon_vma) { ret = -EBUSY; goto out; } end = -1; mapping = NULL; anon_vma_lock_write(anon_vma); } else { gfp_t gfp; mapping = folio->mapping; /* Truncated ? */ if (!mapping) { ret = -EBUSY; goto out; } gfp = current_gfp_context(mapping_gfp_mask(mapping) & GFP_RECLAIM_MASK); if (!filemap_release_folio(folio, gfp)) { ret = -EBUSY; goto out; } xas_split_alloc(&xas, folio, folio_order(folio), gfp); if (xas_error(&xas)) { ret = xas_error(&xas); goto out; } anon_vma = NULL; i_mmap_lock_read(mapping); /* *__split_huge_page() may need to trim off pages beyond EOF: * but on 32-bit, i_size_read() takes an irq-unsafe seqlock, * which cannot be nested inside the page tree lock. So note * end now: i_size itself may be changed at any moment, but * folio lock is good enough to serialize the trimming. */ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); if (shmem_mapping(mapping)) end = shmem_fallocend(mapping->host, end); } /* * Racy check if we can split the page, before unmap_folio() will * split PMDs */ if (!can_split_folio(folio, &extra_pins)) { ret = -EAGAIN; goto out_unlock; } unmap_folio(folio); /* block interrupt reentry in xa_lock and spinlock */ local_irq_disable(); if (mapping) { /* * Check if the folio is present in page cache. * We assume all tail are present too, if folio is there. */ xas_lock(&xas); xas_reset(&xas); if (xas_load(&xas) != folio) goto fail; } /* Prevent deferred_split_scan() touching ->_refcount */ spin_lock(&ds_queue->split_queue_lock); if (folio_ref_freeze(folio, 1 + extra_pins)) { if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; list_del(&folio->_deferred_list); } spin_unlock(&ds_queue->split_queue_lock); if (mapping) { int nr = folio_nr_pages(folio); xas_split(&xas, folio, folio_order(folio)); if (folio_test_swapbacked(folio)) { __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); } else { __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); filemap_nr_thps_dec(mapping); } } __split_huge_page(page, list, end); ret = 0; } else { spin_unlock(&ds_queue->split_queue_lock); fail: if (mapping) xas_unlock(&xas); local_irq_enable(); remap_page(folio, folio_nr_pages(folio)); ret = -EAGAIN; } out_unlock: if (anon_vma) { anon_vma_unlock_write(anon_vma); put_anon_vma(anon_vma); } if (mapping) i_mmap_unlock_read(mapping); out: xas_destroy(&xas); count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; } void folio_undo_large_rmappable(struct folio *folio) { struct deferred_split *ds_queue; unsigned long flags; /* * At this point, there is no one trying to add the folio to * deferred_list. If folio is not in deferred_list, it's safe * to check without acquiring the split_queue_lock. */ if (data_race(list_empty(&folio->_deferred_list))) return; ds_queue = get_deferred_split_queue(folio); spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; list_del(&folio->_deferred_list); } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } void deferred_split_folio(struct folio *folio) { struct deferred_split *ds_queue = get_deferred_split_queue(folio); #ifdef CONFIG_MEMCG struct mem_cgroup *memcg = folio_memcg(folio); #endif unsigned long flags; VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); /* * The try_to_unmap() in page reclaim path might reach here too, * this may cause a race condition to corrupt deferred split queue. * And, if page reclaim is already handling the same folio, it is * unnecessary to handle it again in shrinker. * * Check the swapcache flag to determine if the folio is being * handled by page reclaim since THP swap would add the folio into * swap cache before calling try_to_unmap(). */ if (folio_test_swapcache(folio)) return; if (!list_empty(&folio->_deferred_list)) return; spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (list_empty(&folio->_deferred_list)) { count_vm_event(THP_DEFERRED_SPLIT_PAGE); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG if (memcg) set_shrinker_bit(memcg, folio_nid(folio), deferred_split_shrinker.id); #endif } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } static unsigned long deferred_split_count(struct shrinker *shrink, struct shrink_control *sc) { struct pglist_data *pgdata = NODE_DATA(sc->nid); struct deferred_split *ds_queue = &pgdata->deferred_split_queue; #ifdef CONFIG_MEMCG if (sc->memcg) ds_queue = &sc->memcg->deferred_split_queue; #endif return READ_ONCE(ds_queue->split_queue_len); } static unsigned long deferred_split_scan(struct shrinker *shrink, struct shrink_control *sc) { struct pglist_data *pgdata = NODE_DATA(sc->nid); struct deferred_split *ds_queue = &pgdata->deferred_split_queue; unsigned long flags; LIST_HEAD(list); struct folio *folio, *next; int split = 0; #ifdef CONFIG_MEMCG if (sc->memcg) ds_queue = &sc->memcg->deferred_split_queue; #endif spin_lock_irqsave(&ds_queue->split_queue_lock, flags); /* Take pin on all head pages to avoid freeing them under us */ list_for_each_entry_safe(folio, next, &ds_queue->split_queue, _deferred_list) { if (folio_try_get(folio)) { list_move(&folio->_deferred_list, &list); } else { /* We lost race with folio_put() */ list_del_init(&folio->_deferred_list); ds_queue->split_queue_len--; } if (!--sc->nr_to_scan) break; } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); list_for_each_entry_safe(folio, next, &list, _deferred_list) { if (!folio_trylock(folio)) goto next; /* split_huge_page() removes page from list on success */ if (!split_folio(folio)) split++; folio_unlock(folio); next: folio_put(folio); } spin_lock_irqsave(&ds_queue->split_queue_lock, flags); list_splice_tail(&list, &ds_queue->split_queue); spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); /* * Stop shrinker if we didn't split any page, but the queue is empty. * This can happen if pages were freed under us. */ if (!split && list_empty(&ds_queue->split_queue)) return SHRINK_STOP; return split; } static struct shrinker deferred_split_shrinker = { .count_objects = deferred_split_count, .scan_objects = deferred_split_scan, .seeks = DEFAULT_SEEKS, .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE | SHRINKER_NONSLAB, }; #ifdef CONFIG_DEBUG_FS static void split_huge_pages_all(void) { struct zone *zone; struct page *page; struct folio *folio; unsigned long pfn, max_zone_pfn; unsigned long total = 0, split = 0; pr_debug("Split all THPs\n"); for_each_zone(zone) { if (!managed_zone(zone)) continue; max_zone_pfn = zone_end_pfn(zone); for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { int nr_pages; page = pfn_to_online_page(pfn); if (!page || PageTail(page)) continue; folio = page_folio(page); if (!folio_try_get(folio)) continue; if (unlikely(page_folio(page) != folio)) goto next; if (zone != folio_zone(folio)) goto next; if (!folio_test_large(folio) || folio_test_hugetlb(folio) || !folio_test_lru(folio)) goto next; total++; folio_lock(folio); nr_pages = folio_nr_pages(folio); if (!split_folio(folio)) split++; pfn += nr_pages - 1; folio_unlock(folio); next: folio_put(folio); cond_resched(); } } pr_debug("%lu of %lu THP split\n", split, total); } static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) { return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) || is_vm_hugetlb_page(vma); } static int split_huge_pages_pid(int pid, unsigned long vaddr_start, unsigned long vaddr_end) { int ret = 0; struct task_struct *task; struct mm_struct *mm; unsigned long total = 0, split = 0; unsigned long addr; vaddr_start &= PAGE_MASK; vaddr_end &= PAGE_MASK; /* Find the task_struct from pid */ rcu_read_lock(); task = find_task_by_vpid(pid); if (!task) { rcu_read_unlock(); ret = -ESRCH; goto out; } get_task_struct(task); rcu_read_unlock(); /* Find the mm_struct */ mm = get_task_mm(task); put_task_struct(task); if (!mm) { ret = -EINVAL; goto out; } pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n", pid, vaddr_start, vaddr_end); mmap_read_lock(mm); /* * always increase addr by PAGE_SIZE, since we could have a PTE page * table filled with PTE-mapped THPs, each of which is distinct. */ for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) { struct vm_area_struct *vma = vma_lookup(mm, addr); struct page *page; struct folio *folio; if (!vma) break; /* skip special VMA and hugetlb VMA */ if (vma_not_suitable_for_thp_split(vma)) { addr = vma->vm_end; continue; } /* FOLL_DUMP to ignore special (like zero) pages */ page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP); if (IS_ERR_OR_NULL(page)) continue; folio = page_folio(page); if (!is_transparent_hugepage(folio)) goto next; total++; if (!can_split_folio(folio, NULL)) goto next; if (!folio_trylock(folio)) goto next; if (!split_folio(folio)) split++; folio_unlock(folio); next: folio_put(folio); cond_resched(); } mmap_read_unlock(mm); mmput(mm); pr_debug("%lu of %lu THP split\n", split, total); out: return ret; } static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, pgoff_t off_end) { struct filename *file; struct file *candidate; struct address_space *mapping; int ret = -EINVAL; pgoff_t index; int nr_pages = 1; unsigned long total = 0, split = 0; file = getname_kernel(file_path); if (IS_ERR(file)) return ret; candidate = file_open_name(file, O_RDONLY, 0); if (IS_ERR(candidate)) goto out; pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n", file_path, off_start, off_end); mapping = candidate->f_mapping; for (index = off_start; index < off_end; index += nr_pages) { struct folio *folio = filemap_get_folio(mapping, index); nr_pages = 1; if (IS_ERR(folio)) continue; if (!folio_test_large(folio)) goto next; total++; nr_pages = folio_nr_pages(folio); if (!folio_trylock(folio)) goto next; if (!split_folio(folio)) split++; folio_unlock(folio); next: folio_put(folio); cond_resched(); } filp_close(candidate, NULL); ret = 0; pr_debug("%lu of %lu file-backed THP split\n", split, total); out: putname(file); return ret; } #define MAX_INPUT_BUF_SZ 255 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, size_t count, loff_t *ppops) { static DEFINE_MUTEX(split_debug_mutex); ssize_t ret; /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ char input_buf[MAX_INPUT_BUF_SZ]; int pid; unsigned long vaddr_start, vaddr_end; ret = mutex_lock_interruptible(&split_debug_mutex); if (ret) return ret; ret = -EFAULT; memset(input_buf, 0, MAX_INPUT_BUF_SZ); if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ))) goto out; input_buf[MAX_INPUT_BUF_SZ - 1] = '\0'; if (input_buf[0] == '/') { char *tok; char *buf = input_buf; char file_path[MAX_INPUT_BUF_SZ]; pgoff_t off_start = 0, off_end = 0; size_t input_len = strlen(input_buf); tok = strsep(&buf, ","); if (tok) { strcpy(file_path, tok); } else { ret = -EINVAL; goto out; } ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); if (ret != 2) { ret = -EINVAL; goto out; } ret = split_huge_pages_in_file(file_path, off_start, off_end); if (!ret) ret = input_len; goto out; } ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); if (ret == 1 && pid == 1) { split_huge_pages_all(); ret = strlen(input_buf); goto out; } else if (ret != 3) { ret = -EINVAL; goto out; } ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); if (!ret) ret = strlen(input_buf); out: mutex_unlock(&split_debug_mutex); return ret; } static const struct file_operations split_huge_pages_fops = { .owner = THIS_MODULE, .write = split_huge_pages_write, .llseek = no_llseek, }; static int __init split_huge_pages_debugfs(void) { debugfs_create_file("split_huge_pages", 0200, NULL, NULL, &split_huge_pages_fops); return 0; } late_initcall(split_huge_pages_debugfs); #endif #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) { struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; bool anon_exclusive; pmd_t pmdval; swp_entry_t entry; pmd_t pmdswp; if (!(pvmw->pmd && !pvmw->pte)) return 0; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); pmdval = pmdp_invalidate(vma, address, pvmw->pmd); /* See page_try_share_anon_rmap(): invalidate PMD first. */ anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); return -EBUSY; } if (pmd_dirty(pmdval)) set_page_dirty(page); if (pmd_write(pmdval)) entry = make_writable_migration_entry(page_to_pfn(page)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry(page_to_pfn(page)); else entry = make_readable_migration_entry(page_to_pfn(page)); if (pmd_young(pmdval)) entry = make_migration_entry_young(entry); if (pmd_dirty(pmdval)) entry = make_migration_entry_dirty(entry); pmdswp = swp_entry_to_pmd(entry); if (pmd_soft_dirty(pmdval)) pmdswp = pmd_swp_mksoft_dirty(pmdswp); if (pmd_uffd_wp(pmdval)) pmdswp = pmd_swp_mkuffd_wp(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); page_remove_rmap(page, vma, true); put_page(page); trace_set_migration_pmd(address, pmd_val(pmdswp)); return 0; } void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) { struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; unsigned long haddr = address & HPAGE_PMD_MASK; pmd_t pmde; swp_entry_t entry; if (!(pvmw->pmd && !pvmw->pte)) return; entry = pmd_to_swp_entry(*pvmw->pmd); get_page(new); pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); if (pmd_swp_soft_dirty(*pvmw->pmd)) pmde = pmd_mksoft_dirty(pmde); if (is_writable_migration_entry(entry)) pmde = pmd_mkwrite(pmde, vma); if (pmd_swp_uffd_wp(*pvmw->pmd)) pmde = pmd_mkuffd_wp(pmde); if (!is_migration_entry_young(entry)) pmde = pmd_mkold(pmde); /* NOTE: this may contain setting soft-dirty on some archs */ if (PageDirty(new) && is_migration_entry_dirty(entry)) pmde = pmd_mkdirty(pmde); if (PageAnon(new)) { rmap_t rmap_flags = RMAP_COMPOUND; if (!is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(new, vma, haddr, rmap_flags); } else { page_add_file_rmap(new, vma, true); } VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); set_pmd_at(mm, haddr, pvmw->pmd, pmde); /* No need to invalidate - it was non-present before */ update_mmu_cache_pmd(vma, address, pvmw->pmd); trace_remove_migration_pmd(address, pmd_val(pmde)); } #endif
linux-master
mm/huge_memory.c