python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright 2008 Juergen Beisert, [email protected] * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, [email protected] * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. */ #include <linux/io.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/stmp_device.h> #define STMP_MODULE_CLKGATE (1 << 30) #define STMP_MODULE_SFTRST (1 << 31) /* * Clear the bit and poll it cleared. This is usually called with * a reset address and mask being either SFTRST(bit 31) or CLKGATE * (bit 30). */ static int stmp_clear_poll_bit(void __iomem *addr, u32 mask) { int timeout = 0x400; writel(mask, addr + STMP_OFFSET_REG_CLR); udelay(1); while ((readl(addr) & mask) && --timeout) /* nothing */; return !timeout; } int stmp_reset_block(void __iomem *reset_addr) { int ret; int timeout = 0x400; /* clear and poll SFTRST */ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST); if (unlikely(ret)) goto error; /* clear CLKGATE */ writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR); /* set SFTRST to reset the block */ writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET); udelay(1); /* poll CLKGATE becoming set */ while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout) /* nothing */; if (unlikely(!timeout)) goto error; /* clear and poll SFTRST */ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST); if (unlikely(ret)) goto error; /* clear and poll CLKGATE */ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE); if (unlikely(ret)) goto error; return 0; error: pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); return -ETIMEDOUT; } EXPORT_SYMBOL(stmp_reset_block);
linux-master
lib/stmp_device.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/lib/kasprintf.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/stdarg.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> /* Simplified asprintf. */ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int first, second; char *p; va_list aq; va_copy(aq, ap); first = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc_track_caller(first+1, gfp); if (!p) return NULL; second = vsnprintf(p, first+1, fmt, ap); WARN(first != second, "different return values (%u and %u) from vsnprintf(\"%s\", ...)", first, second, fmt); return p; } EXPORT_SYMBOL(kvasprintf); /* * If fmt contains no % (or is exactly %s), use kstrdup_const. If fmt * (or the sole vararg) points to rodata, we will then save a memory * allocation and string copy. In any case, the return value should be * freed using kfree_const(). */ const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap) { if (!strchr(fmt, '%')) return kstrdup_const(fmt, gfp); if (!strcmp(fmt, "%s")) return kstrdup_const(va_arg(ap, const char*), gfp); return kvasprintf(gfp, fmt, ap); } EXPORT_SYMBOL(kvasprintf_const); char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return p; } EXPORT_SYMBOL(kasprintf);
linux-master
lib/kasprintf.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/export.h> #include <asm/syscall.h> static int collect_syscall(struct task_struct *target, struct syscall_info *info) { unsigned long args[6] = { }; struct pt_regs *regs; if (!try_get_task_stack(target)) { /* Task has no stack, so the task isn't in a syscall. */ memset(info, 0, sizeof(*info)); info->data.nr = -1; return 0; } regs = task_pt_regs(target); if (unlikely(!regs)) { put_task_stack(target); return -EAGAIN; } info->sp = user_stack_pointer(regs); info->data.instruction_pointer = instruction_pointer(regs); info->data.nr = syscall_get_nr(target, regs); if (info->data.nr != -1L) syscall_get_arguments(target, regs, args); info->data.args[0] = args[0]; info->data.args[1] = args[1]; info->data.args[2] = args[2]; info->data.args[3] = args[3]; info->data.args[4] = args[4]; info->data.args[5] = args[5]; put_task_stack(target); return 0; } /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine * @info: structure with the following fields: * .sp - filled with user stack pointer * .data.nr - filled with system call number or -1 * .data.args - filled with @maxargs system call arguments * .data.instruction_pointer - filled with user PC * * If @target is blocked in a system call, returns zero with @info.data.nr * set to the call's number and @info.data.args filled in with its * arguments. Registers not used for system call arguments may not be available * and it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, * returns zero with *@info.data.nr set to -1 and does not fill in * @info.data.args. If so, it's now safe to examine @target using * &struct user_regset get() calls as long as we're sure @target won't return * to user mode. * * Returns -%EAGAIN if @target does not remain blocked. */ int task_current_syscall(struct task_struct *target, struct syscall_info *info) { unsigned long ncsw; unsigned int state; if (target == current) return collect_syscall(target, info); state = READ_ONCE(target->__state); if (unlikely(!state)) return -EAGAIN; ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || unlikely(collect_syscall(target, info)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; return 0; }
linux-master
lib/syscall.c
// SPDX-License-Identifier: GPL-2.0 /* * seq_buf.c * * Copyright (C) 2014 Red Hat Inc, Steven Rostedt <[email protected]> * * The seq_buf is a handy tool that allows you to pass a descriptor around * to a buffer that other functions can write to. It is similar to the * seq_file functionality but has some differences. * * To use it, the seq_buf must be initialized with seq_buf_init(). * This will set up the counters within the descriptor. You can call * seq_buf_init() more than once to reset the seq_buf to start * from scratch. */ #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/seq_buf.h> /** * seq_buf_can_fit - can the new data fit in the current buffer? * @s: the seq_buf descriptor * @len: The length to see if it can fit in the current buffer * * Returns true if there's enough unused space in the seq_buf buffer * to fit the amount of new data according to @len. */ static bool seq_buf_can_fit(struct seq_buf *s, size_t len) { return s->len + len <= s->size; } /** * seq_buf_print_seq - move the contents of seq_buf into a seq_file * @m: the seq_file descriptor that is the destination * @s: the seq_buf descriptor that is the source. * * Returns zero on success, non zero otherwise */ int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s) { unsigned int len = seq_buf_used(s); return seq_write(m, s->buffer, len); } /** * seq_buf_vprintf - sequence printing of information. * @s: seq_buf descriptor * @fmt: printf format string * @args: va_list of arguments from a printf() type function * * Writes a vnprintf() format into the sequencce buffer. * * Returns zero on success, -1 on overflow. */ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args) { int len; WARN_ON(s->size == 0); if (s->len < s->size) { len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args); if (s->len + len < s->size) { s->len += len; return 0; } } seq_buf_set_overflow(s); return -1; } /** * seq_buf_printf - sequence printing of information * @s: seq_buf descriptor * @fmt: printf format string * * Writes a printf() format into the sequence buffer. * * Returns zero on success, -1 on overflow. */ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = seq_buf_vprintf(s, fmt, ap); va_end(ap); return ret; } EXPORT_SYMBOL_GPL(seq_buf_printf); /** * seq_buf_do_printk - printk seq_buf line by line * @s: seq_buf descriptor * @lvl: printk level * * printk()-s a multi-line sequential buffer line by line. The function * makes sure that the buffer in @s is nul terminated and safe to read * as a string. */ void seq_buf_do_printk(struct seq_buf *s, const char *lvl) { const char *start, *lf; if (s->size == 0 || s->len == 0) return; seq_buf_terminate(s); start = s->buffer; while ((lf = strchr(start, '\n'))) { int len = lf - start + 1; printk("%s%.*s", lvl, len, start); start = ++lf; } /* No trailing LF */ if (start < s->buffer + s->len) printk("%s%s\n", lvl, start); } EXPORT_SYMBOL_GPL(seq_buf_do_printk); #ifdef CONFIG_BINARY_PRINTF /** * seq_buf_bprintf - Write the printf string from binary arguments * @s: seq_buf descriptor * @fmt: The format string for the @binary arguments * @binary: The binary arguments for @fmt. * * When recording in a fast path, a printf may be recorded with just * saving the format and the arguments as they were passed to the * function, instead of wasting cycles converting the arguments into * ASCII characters. Instead, the arguments are saved in a 32 bit * word array that is defined by the format string constraints. * * This function will take the format and the binary array and finish * the conversion into the ASCII string within the buffer. * * Returns zero on success, -1 on overflow. */ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary) { unsigned int len = seq_buf_buffer_left(s); int ret; WARN_ON(s->size == 0); if (s->len < s->size) { ret = bstr_printf(s->buffer + s->len, len, fmt, binary); if (s->len + ret < s->size) { s->len += ret; return 0; } } seq_buf_set_overflow(s); return -1; } #endif /* CONFIG_BINARY_PRINTF */ /** * seq_buf_puts - sequence printing of simple string * @s: seq_buf descriptor * @str: simple string to record * * Copy a simple string into the sequence buffer. * * Returns zero on success, -1 on overflow */ int seq_buf_puts(struct seq_buf *s, const char *str) { size_t len = strlen(str); WARN_ON(s->size == 0); /* Add 1 to len for the trailing null byte which must be there */ len += 1; if (seq_buf_can_fit(s, len)) { memcpy(s->buffer + s->len, str, len); /* Don't count the trailing null byte against the capacity */ s->len += len - 1; return 0; } seq_buf_set_overflow(s); return -1; } /** * seq_buf_putc - sequence printing of simple character * @s: seq_buf descriptor * @c: simple character to record * * Copy a single character into the sequence buffer. * * Returns zero on success, -1 on overflow */ int seq_buf_putc(struct seq_buf *s, unsigned char c) { WARN_ON(s->size == 0); if (seq_buf_can_fit(s, 1)) { s->buffer[s->len++] = c; return 0; } seq_buf_set_overflow(s); return -1; } /** * seq_buf_putmem - write raw data into the sequenc buffer * @s: seq_buf descriptor * @mem: The raw memory to copy into the buffer * @len: The length of the raw memory to copy (in bytes) * * There may be cases where raw memory needs to be written into the * buffer and a strcpy() would not work. Using this function allows * for such cases. * * Returns zero on success, -1 on overflow */ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len) { WARN_ON(s->size == 0); if (seq_buf_can_fit(s, len)) { memcpy(s->buffer + s->len, mem, len); s->len += len; return 0; } seq_buf_set_overflow(s); return -1; } #define MAX_MEMHEX_BYTES 8U #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) /** * seq_buf_putmem_hex - write raw memory into the buffer in ASCII hex * @s: seq_buf descriptor * @mem: The raw memory to write its hex ASCII representation of * @len: The length of the raw memory to copy (in bytes) * * This is similar to seq_buf_putmem() except instead of just copying the * raw memory into the buffer it writes its ASCII representation of it * in hex characters. * * Returns zero on success, -1 on overflow */ int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len) { unsigned char hex[HEX_CHARS]; const unsigned char *data = mem; unsigned int start_len; int i, j; WARN_ON(s->size == 0); BUILD_BUG_ON(MAX_MEMHEX_BYTES * 2 >= HEX_CHARS); while (len) { start_len = min(len, MAX_MEMHEX_BYTES); #ifdef __BIG_ENDIAN for (i = 0, j = 0; i < start_len; i++) { #else for (i = start_len-1, j = 0; i >= 0; i--) { #endif hex[j++] = hex_asc_hi(data[i]); hex[j++] = hex_asc_lo(data[i]); } if (WARN_ON_ONCE(j == 0 || j/2 > len)) break; /* j increments twice per loop */ hex[j++] = ' '; seq_buf_putmem(s, hex, j); if (seq_buf_has_overflowed(s)) return -1; len -= start_len; data += start_len; } return 0; } /** * seq_buf_path - copy a path into the sequence buffer * @s: seq_buf descriptor * @path: path to write into the sequence buffer. * @esc: set of characters to escape in the output * * Write a path name into the sequence buffer. * * Returns the number of written bytes on success, -1 on overflow */ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc) { char *buf; size_t size = seq_buf_get_buf(s, &buf); int res = -1; WARN_ON(s->size == 0); if (size) { char *p = d_path(path, buf, size); if (!IS_ERR(p)) { char *end = mangle_path(buf, p, esc); if (end) res = end - buf; } } seq_buf_commit(s, res); return res; } /** * seq_buf_to_user - copy the sequence buffer to user space * @s: seq_buf descriptor * @ubuf: The userspace memory location to copy to * @cnt: The amount to copy * * Copies the sequence buffer into the userspace memory pointed to * by @ubuf. It starts from the last read position (@s->readpos) * and writes up to @cnt characters or till it reaches the end of * the content in the buffer (@s->len), which ever comes first. * * On success, it returns a positive number of the number of bytes * it copied. * * On failure it returns -EBUSY if all of the content in the * sequence has been already read, which includes nothing in the * sequence (@s->len == @s->readpos). * * Returns -EFAULT if the copy to userspace fails. */ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt) { int len; int ret; if (!cnt) return 0; len = seq_buf_used(s); if (len <= s->readpos) return -EBUSY; len -= s->readpos; if (cnt > len) cnt = len; ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); if (ret == cnt) return -EFAULT; cnt -= ret; s->readpos += cnt; return cnt; } /** * seq_buf_hex_dump - print formatted hex dump into the sequence buffer * @s: seq_buf descriptor * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @rowsize: number of bytes to print per line; must be 16 or 32 * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) * @buf: data blob to dump * @len: number of bytes in the @buf * @ascii: include ASCII after the hex output * * Function is an analogue of print_hex_dump() and thus has similar interface. * * linebuf size is maximal length for one line. * 32 * 3 - maximum bytes per line, each printed into 2 chars + 1 for * separating space * 2 - spaces separating hex dump and ascii representation * 32 - ascii representation * 1 - terminating '\0' * * Returns zero on success, -1 on overflow */ int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { const u8 *ptr = buf; int i, linelen, remaining = len; unsigned char linebuf[32 * 3 + 2 + 32 + 1]; int ret; if (rowsize != 16 && rowsize != 32) rowsize = 16; for (i = 0; i < len; i += rowsize) { linelen = min(remaining, rowsize); remaining -= rowsize; hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, linebuf, sizeof(linebuf), ascii); switch (prefix_type) { case DUMP_PREFIX_ADDRESS: ret = seq_buf_printf(s, "%s%p: %s\n", prefix_str, ptr + i, linebuf); break; case DUMP_PREFIX_OFFSET: ret = seq_buf_printf(s, "%s%.8x: %s\n", prefix_str, i, linebuf); break; default: ret = seq_buf_printf(s, "%s%s\n", prefix_str, linebuf); break; } if (ret) return ret; } return 0; }
linux-master
lib/seq_buf.c
// SPDX-License-Identifier: GPL-2.0-only /* * Simple encoder primitives for ASN.1 BER/DER/CER * * Copyright (C) 2019 [email protected] */ #include <linux/asn1_encoder.h> #include <linux/bug.h> #include <linux/string.h> #include <linux/module.h> /** * asn1_encode_integer() - encode positive integer to ASN.1 * @data: pointer to the pointer to the data * @end_data: end of data pointer, points one beyond last usable byte in @data * @integer: integer to be encoded * * This is a simplified encoder: it only currently does * positive integers, but it should be simple enough to add the * negative case if a use comes along. */ unsigned char * asn1_encode_integer(unsigned char *data, const unsigned char *end_data, s64 integer) { int data_len = end_data - data; unsigned char *d = &data[2]; bool found = false; int i; if (WARN(integer < 0, "BUG: integer encode only supports positive integers")) return ERR_PTR(-EINVAL); if (IS_ERR(data)) return data; /* need at least 3 bytes for tag, length and integer encoding */ if (data_len < 3) return ERR_PTR(-EINVAL); /* remaining length where at d (the start of the integer encoding) */ data_len -= 2; data[0] = _tag(UNIV, PRIM, INT); if (integer == 0) { *d++ = 0; goto out; } for (i = sizeof(integer); i > 0 ; i--) { int byte = integer >> (8 * (i - 1)); if (!found && byte == 0) continue; /* * for a positive number the first byte must have bit * 7 clear in two's complement (otherwise it's a * negative number) so prepend a leading zero if * that's not the case */ if (!found && (byte & 0x80)) { /* * no check needed here, we already know we * have len >= 1 */ *d++ = 0; data_len--; } found = true; if (data_len == 0) return ERR_PTR(-EINVAL); *d++ = byte; data_len--; } out: data[1] = d - data - 2; return d; } EXPORT_SYMBOL_GPL(asn1_encode_integer); /* calculate the base 128 digit values setting the top bit of the first octet */ static int asn1_encode_oid_digit(unsigned char **_data, int *data_len, u32 oid) { unsigned char *data = *_data; int start = 7 + 7 + 7 + 7; int ret = 0; if (*data_len < 1) return -EINVAL; /* quick case */ if (oid == 0) { *data++ = 0x80; (*data_len)--; goto out; } while (oid >> start == 0) start -= 7; while (start > 0 && *data_len > 0) { u8 byte; byte = oid >> start; oid = oid - (byte << start); start -= 7; byte |= 0x80; *data++ = byte; (*data_len)--; } if (*data_len > 0) { *data++ = oid; (*data_len)--; } else { ret = -EINVAL; } out: *_data = data; return ret; } /** * asn1_encode_oid() - encode an oid to ASN.1 * @data: position to begin encoding at * @end_data: end of data pointer, points one beyond last usable byte in @data * @oid: array of oids * @oid_len: length of oid array * * this encodes an OID up to ASN.1 when presented as an array of OID values */ unsigned char * asn1_encode_oid(unsigned char *data, const unsigned char *end_data, u32 oid[], int oid_len) { int data_len = end_data - data; unsigned char *d = data + 2; int i, ret; if (WARN(oid_len < 2, "OID must have at least two elements")) return ERR_PTR(-EINVAL); if (WARN(oid_len > 32, "OID is too large")) return ERR_PTR(-EINVAL); if (IS_ERR(data)) return data; /* need at least 3 bytes for tag, length and OID encoding */ if (data_len < 3) return ERR_PTR(-EINVAL); data[0] = _tag(UNIV, PRIM, OID); *d++ = oid[0] * 40 + oid[1]; data_len -= 3; for (i = 2; i < oid_len; i++) { ret = asn1_encode_oid_digit(&d, &data_len, oid[i]); if (ret < 0) return ERR_PTR(ret); } data[1] = d - data - 2; return d; } EXPORT_SYMBOL_GPL(asn1_encode_oid); /** * asn1_encode_length() - encode a length to follow an ASN.1 tag * @data: pointer to encode at * @data_len: pointer to remaining length (adjusted by routine) * @len: length to encode * * This routine can encode lengths up to 65535 using the ASN.1 rules. * It will accept a negative length and place a zero length tag * instead (to keep the ASN.1 valid). This convention allows other * encoder primitives to accept negative lengths as singalling the * sequence will be re-encoded when the length is known. */ static int asn1_encode_length(unsigned char **data, int *data_len, int len) { if (*data_len < 1) return -EINVAL; if (len < 0) { *((*data)++) = 0; (*data_len)--; return 0; } if (len <= 0x7f) { *((*data)++) = len; (*data_len)--; return 0; } if (*data_len < 2) return -EINVAL; if (len <= 0xff) { *((*data)++) = 0x81; *((*data)++) = len & 0xff; *data_len -= 2; return 0; } if (*data_len < 3) return -EINVAL; if (len <= 0xffff) { *((*data)++) = 0x82; *((*data)++) = (len >> 8) & 0xff; *((*data)++) = len & 0xff; *data_len -= 3; return 0; } if (WARN(len > 0xffffff, "ASN.1 length can't be > 0xffffff")) return -EINVAL; if (*data_len < 4) return -EINVAL; *((*data)++) = 0x83; *((*data)++) = (len >> 16) & 0xff; *((*data)++) = (len >> 8) & 0xff; *((*data)++) = len & 0xff; *data_len -= 4; return 0; } /** * asn1_encode_tag() - add a tag for optional or explicit value * @data: pointer to place tag at * @end_data: end of data pointer, points one beyond last usable byte in @data * @tag: tag to be placed * @string: the data to be tagged * @len: the length of the data to be tagged * * Note this currently only handles short form tags < 31. * * Standard usage is to pass in a @tag, @string and @length and the * @string will be ASN.1 encoded with @tag and placed into @data. If * the encoding would put data past @end_data then an error is * returned, otherwise a pointer to a position one beyond the encoding * is returned. * * To encode in place pass a NULL @string and -1 for @len and the * maximum allowable beginning and end of the data; all this will do * is add the current maximum length and update the data pointer to * the place where the tag contents should be placed is returned. The * data should be copied in by the calling routine which should then * repeat the prior statement but now with the known length. In order * to avoid having to keep both before and after pointers, the repeat * expects to be called with @data pointing to where the first encode * returned it and still NULL for @string but the real length in @len. */ unsigned char * asn1_encode_tag(unsigned char *data, const unsigned char *end_data, u32 tag, const unsigned char *string, int len) { int data_len = end_data - data; int ret; if (WARN(tag > 30, "ASN.1 tag can't be > 30")) return ERR_PTR(-EINVAL); if (!string && WARN(len > 127, "BUG: recode tag is too big (>127)")) return ERR_PTR(-EINVAL); if (IS_ERR(data)) return data; if (!string && len > 0) { /* * we're recoding, so move back to the start of the * tag and install a dummy length because the real * data_len should be NULL */ data -= 2; data_len = 2; } if (data_len < 2) return ERR_PTR(-EINVAL); *(data++) = _tagn(CONT, CONS, tag); data_len--; ret = asn1_encode_length(&data, &data_len, len); if (ret < 0) return ERR_PTR(ret); if (!string) return data; if (data_len < len) return ERR_PTR(-EINVAL); memcpy(data, string, len); data += len; return data; } EXPORT_SYMBOL_GPL(asn1_encode_tag); /** * asn1_encode_octet_string() - encode an ASN.1 OCTET STRING * @data: pointer to encode at * @end_data: end of data pointer, points one beyond last usable byte in @data * @string: string to be encoded * @len: length of string * * Note ASN.1 octet strings may contain zeros, so the length is obligatory. */ unsigned char * asn1_encode_octet_string(unsigned char *data, const unsigned char *end_data, const unsigned char *string, u32 len) { int data_len = end_data - data; int ret; if (IS_ERR(data)) return data; /* need minimum of 2 bytes for tag and length of zero length string */ if (data_len < 2) return ERR_PTR(-EINVAL); *(data++) = _tag(UNIV, PRIM, OTS); data_len--; ret = asn1_encode_length(&data, &data_len, len); if (ret) return ERR_PTR(ret); if (data_len < len) return ERR_PTR(-EINVAL); memcpy(data, string, len); data += len; return data; } EXPORT_SYMBOL_GPL(asn1_encode_octet_string); /** * asn1_encode_sequence() - wrap a byte stream in an ASN.1 SEQUENCE * @data: pointer to encode at * @end_data: end of data pointer, points one beyond last usable byte in @data * @seq: data to be encoded as a sequence * @len: length of the data to be encoded as a sequence * * Fill in a sequence. To encode in place, pass NULL for @seq and -1 * for @len; then call again once the length is known (still with NULL * for @seq). In order to avoid having to keep both before and after * pointers, the repeat expects to be called with @data pointing to * where the first encode placed it. */ unsigned char * asn1_encode_sequence(unsigned char *data, const unsigned char *end_data, const unsigned char *seq, int len) { int data_len = end_data - data; int ret; if (!seq && WARN(len > 127, "BUG: recode sequence is too big (>127)")) return ERR_PTR(-EINVAL); if (IS_ERR(data)) return data; if (!seq && len >= 0) { /* * we're recoding, so move back to the start of the * sequence and install a dummy length because the * real length should be NULL */ data -= 2; data_len = 2; } if (data_len < 2) return ERR_PTR(-EINVAL); *(data++) = _tag(UNIV, CONS, SEQ); data_len--; ret = asn1_encode_length(&data, &data_len, len); if (ret) return ERR_PTR(ret); if (!seq) return data; if (data_len < len) return ERR_PTR(-EINVAL); memcpy(data, seq, len); data += len; return data; } EXPORT_SYMBOL_GPL(asn1_encode_sequence); /** * asn1_encode_boolean() - encode a boolean value to ASN.1 * @data: pointer to encode at * @end_data: end of data pointer, points one beyond last usable byte in @data * @val: the boolean true/false value */ unsigned char * asn1_encode_boolean(unsigned char *data, const unsigned char *end_data, bool val) { int data_len = end_data - data; if (IS_ERR(data)) return data; /* booleans are 3 bytes: tag, length == 1 and value == 0 or 1 */ if (data_len < 3) return ERR_PTR(-EINVAL); *(data++) = _tag(UNIV, PRIM, BOOL); data_len--; asn1_encode_length(&data, &data_len, 1); if (val) *(data++) = 1; else *(data++) = 0; return data; } EXPORT_SYMBOL_GPL(asn1_encode_boolean); MODULE_LICENSE("GPL");
linux-master
lib/asn1_encoder.c
/* * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin * cleaned up code to current version of sparse and added the slicing-by-8 * algorithm to the closely similar existing slicing-by-4 algorithm. * * Oct 15, 2000 Matt Domsch <[email protected]> * Nicer crc32 functions/docs submitted by [email protected]. Thanks! * Code was from the public domain, copyright abandoned. Code was * subsequently included in the kernel, thus was re-licensed under the * GNU GPL v2. * * Oct 12, 2000 Matt Domsch <[email protected]> * Same crc32 function was used in 5 other places in the kernel. * I made one version, and deleted the others. * There are various incantations of crc32(). Some use a seed of 0 or ~0. * Some xor at the end with ~0. The generic crc32() function takes * seed as an argument, and doesn't xor at the end. Then individual * users can do whatever they need. * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. * fs/jffs2 uses seed 0, doesn't xor with ~0. * fs/partitions/efi.c uses seed ~0, xor's with ~0. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ /* see: Documentation/staging/crc32.rst for a description of algorithms */ #include <linux/crc32.h> #include <linux/crc32poly.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include "crc32defs.h" #if CRC_LE_BITS > 8 # define tole(x) ((__force u32) cpu_to_le32(x)) #else # define tole(x) (x) #endif #if CRC_BE_BITS > 8 # define tobe(x) ((__force u32) cpu_to_be32(x)) #else # define tobe(x) (x) #endif #include "crc32table.h" MODULE_AUTHOR("Matt Domsch <[email protected]>"); MODULE_DESCRIPTION("Various CRC32 calculations"); MODULE_LICENSE("GPL"); #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 /* implements slicing-by-4 or slicing-by-8 algorithm */ static inline u32 __pure crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) { # ifdef __LITTLE_ENDIAN # define DO_CRC(x) crc = t0[(crc ^ (x)) & 255] ^ (crc >> 8) # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) # else # define DO_CRC(x) crc = t0[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) # endif const u32 *b; size_t rem_len; # ifdef CONFIG_X86 size_t i; # endif const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3]; # if CRC_LE_BITS != 32 const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7]; # endif u32 q; /* Align it */ if (unlikely((long)buf & 3 && len)) { do { DO_CRC(*buf++); } while ((--len) && ((long)buf)&3); } # if CRC_LE_BITS == 32 rem_len = len & 3; len = len >> 2; # else rem_len = len & 7; len = len >> 3; # endif b = (const u32 *)buf; # ifdef CONFIG_X86 --b; for (i = 0; i < len; i++) { # else for (--b; len; --len) { # endif q = crc ^ *++b; /* use pre increment for speed */ # if CRC_LE_BITS == 32 crc = DO_CRC4; # else crc = DO_CRC8; q = *++b; crc ^= DO_CRC4; # endif } len = rem_len; /* And the last few bytes */ if (len) { u8 *p = (u8 *)(b + 1) - 1; # ifdef CONFIG_X86 for (i = 0; i < len; i++) DO_CRC(*++p); /* use pre increment for speed */ # else do { DO_CRC(*++p); /* use pre increment for speed */ } while (--len); # endif } return crc; #undef DO_CRC #undef DO_CRC4 #undef DO_CRC8 } #endif /** * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II * CRC32/CRC32C * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for other * uses, or the previous crc32/crc32c value if computing incrementally. * @p: pointer to buffer over which CRC32/CRC32C is run * @len: length of buffer @p * @tab: little-endian Ethernet table * @polynomial: CRC32/CRC32c LE polynomial */ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_LE_BITS == 1 int i; while (len--) { crc ^= *p++; for (i = 0; i < 8; i++) crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0); } # elif CRC_LE_BITS == 2 while (len--) { crc ^= *p++; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; crc = (crc >> 2) ^ tab[0][crc & 3]; } # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; crc = (crc >> 4) ^ tab[0][crc & 15]; crc = (crc >> 4) ^ tab[0][crc & 15]; } # elif CRC_LE_BITS == 8 /* aka Sarwate algorithm */ while (len--) { crc ^= *p++; crc = (crc >> 8) ^ tab[0][crc & 255]; } # else crc = (__force u32) __cpu_to_le32(crc); crc = crc32_body(crc, p, len, tab); crc = __le32_to_cpu((__force __le32)crc); #endif return crc; } #if CRC_LE_BITS == 1 u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); } u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); } #else u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32table_le, CRC32_POLY_LE); } u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le_generic(crc, p, len, crc32ctable_le, CRC32C_POLY_LE); } #endif EXPORT_SYMBOL(crc32_le); EXPORT_SYMBOL(__crc32c_le); u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); u32 __pure crc32_be_base(u32, unsigned char const *, size_t) __alias(crc32_be); /* * This multiplies the polynomials x and y modulo the given modulus. * This follows the "little-endian" CRC convention that the lsbit * represents the highest power of x, and the msbit represents x^0. */ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus) { u32 product = x & 1 ? y : 0; int i; for (i = 0; i < 31; i++) { product = (product >> 1) ^ (product & 1 ? modulus : 0); x >>= 1; product ^= x & 1 ? y : 0; } return product; } /** * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) * @len: The number of bytes. @crc is multiplied by x^(8*@len) * @polynomial: The modulus used to reduce the result to 32 bits. * * It's possible to parallelize CRC computations by computing a CRC * over separate ranges of a buffer, then summing them. * This shifts the given CRC by 8*len bits (i.e. produces the same effect * as appending len bytes of zero to the data), in time proportional * to log(len). */ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len, u32 polynomial) { u32 power = polynomial; /* CRC of x^32 */ int i; /* Shift up to 32 bits in the simple linear way */ for (i = 0; i < 8 * (int)(len & 3); i++) crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0); len >>= 2; if (!len) return crc; for (;;) { /* "power" is x^(2^i), modulo the polynomial */ if (len & 1) crc = gf2_multiply(crc, power, polynomial); len >>= 1; if (!len) break; /* Square power, advancing to x^(2^(i+1)) */ power = gf2_multiply(power, power, polynomial); } return crc; } u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len) { return crc32_generic_shift(crc, len, CRC32_POLY_LE); } u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len) { return crc32_generic_shift(crc, len, CRC32C_POLY_LE); } EXPORT_SYMBOL(crc32_le_shift); EXPORT_SYMBOL(__crc32c_le_shift); /** * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for * other uses, or the previous crc32 value if computing incrementally. * @p: pointer to buffer over which CRC32 is run * @len: length of buffer @p * @tab: big-endian Ethernet table * @polynomial: CRC32 BE polynomial */ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p, size_t len, const u32 (*tab)[256], u32 polynomial) { #if CRC_BE_BITS == 1 int i; while (len--) { crc ^= *p++ << 24; for (i = 0; i < 8; i++) crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial : 0); } # elif CRC_BE_BITS == 2 while (len--) { crc ^= *p++ << 24; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; crc = (crc << 2) ^ tab[0][crc >> 30]; } # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; crc = (crc << 4) ^ tab[0][crc >> 28]; crc = (crc << 4) ^ tab[0][crc >> 28]; } # elif CRC_BE_BITS == 8 while (len--) { crc ^= *p++ << 24; crc = (crc << 8) ^ tab[0][crc >> 24]; } # else crc = (__force u32) __cpu_to_be32(crc); crc = crc32_body(crc, p, len, tab); crc = __be32_to_cpu((__force __be32)crc); # endif return crc; } #if CRC_BE_BITS == 1 u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE); } #else u32 __pure __weak crc32_be(u32 crc, unsigned char const *p, size_t len) { return crc32_be_generic(crc, p, len, crc32table_be, CRC32_POLY_BE); } #endif EXPORT_SYMBOL(crc32_be);
linux-master
lib/crc32.c
/* * lib/dynamic_debug.c * * make pr_debug()/dev_dbg() calls runtime configurable based upon their * source module. * * Copyright (C) 2008 Jason Baron <[email protected]> * By Greg Banks <[email protected]> * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. * Copyright (C) 2011 Bart Van Assche. All Rights Reserved. * Copyright (C) 2013 Du, Changbin <[email protected]> */ #define pr_fmt(fmt) "dyndbg: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kallsyms.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/sysctl.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/parser.h> #include <linux/string_helpers.h> #include <linux/uaccess.h> #include <linux/dynamic_debug.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/jump_label.h> #include <linux/hardirq.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/netdevice.h> #include <rdma/ib_verbs.h> extern struct _ddebug __start___dyndbg[]; extern struct _ddebug __stop___dyndbg[]; extern struct ddebug_class_map __start___dyndbg_classes[]; extern struct ddebug_class_map __stop___dyndbg_classes[]; struct ddebug_table { struct list_head link, maps; const char *mod_name; unsigned int num_ddebugs; struct _ddebug *ddebugs; }; struct ddebug_query { const char *filename; const char *module; const char *function; const char *format; const char *class_string; unsigned int first_lineno, last_lineno; }; struct ddebug_iter { struct ddebug_table *table; int idx; }; struct flag_settings { unsigned int flags; unsigned int mask; }; static DEFINE_MUTEX(ddebug_lock); static LIST_HEAD(ddebug_tables); static int verbose; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, " dynamic_debug/control processing " "( 0 = off (default), 1 = module add/rm, 2 = >control summary, 3 = parsing, 4 = per-site changes)"); /* Return the path relative to source root */ static inline const char *trim_prefix(const char *path) { int skip = strlen(__FILE__) - strlen("lib/dynamic_debug.c"); if (strncmp(path, __FILE__, skip)) skip = 0; /* prefix mismatch, don't skip */ return path + skip; } static const struct { unsigned flag:8; char opt_char; } opt_array[] = { { _DPRINTK_FLAGS_PRINT, 'p' }, { _DPRINTK_FLAGS_INCL_MODNAME, 'm' }, { _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' }, { _DPRINTK_FLAGS_INCL_SOURCENAME, 's' }, { _DPRINTK_FLAGS_INCL_LINENO, 'l' }, { _DPRINTK_FLAGS_INCL_TID, 't' }, { _DPRINTK_FLAGS_NONE, '_' }, }; struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; /* format a string into buf[] which describes the _ddebug's flags */ static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) { char *p = fb->buf; int i; for (i = 0; i < ARRAY_SIZE(opt_array); ++i) if (flags & opt_array[i].flag) *p++ = opt_array[i].opt_char; if (p == fb->buf) *p++ = '_'; *p = '\0'; return fb->buf; } #define vnpr_info(lvl, fmt, ...) \ do { \ if (verbose >= lvl) \ pr_info(fmt, ##__VA_ARGS__); \ } while (0) #define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__) #define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__) #define v3pr_info(fmt, ...) vnpr_info(3, fmt, ##__VA_ARGS__) #define v4pr_info(fmt, ...) vnpr_info(4, fmt, ##__VA_ARGS__) static void vpr_info_dq(const struct ddebug_query *query, const char *msg) { /* trim any trailing newlines */ int fmtlen = 0; if (query->format) { fmtlen = strlen(query->format); while (fmtlen && query->format[fmtlen - 1] == '\n') fmtlen--; } v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u class=%s\n", msg, query->function ?: "", query->filename ?: "", query->module ?: "", fmtlen, query->format ?: "", query->first_lineno, query->last_lineno, query->class_string); } static struct ddebug_class_map *ddebug_find_valid_class(struct ddebug_table const *dt, const char *class_string, int *class_id) { struct ddebug_class_map *map; int idx; list_for_each_entry(map, &dt->maps, link) { idx = match_string(map->class_names, map->length, class_string); if (idx >= 0) { *class_id = idx + map->base; return map; } } *class_id = -ENOENT; return NULL; } #define __outvar /* filled by callee */ /* * Search the tables for _ddebug's which match the given `query' and * apply the `flags' and `mask' to them. Returns number of matching * callsites, normally the same as number of changes. If verbose, * logs the changes. Takes ddebug_lock. */ static int ddebug_change(const struct ddebug_query *query, struct flag_settings *modifiers) { int i; struct ddebug_table *dt; unsigned int newflags; unsigned int nfound = 0; struct flagsbuf fbuf, nbuf; struct ddebug_class_map *map = NULL; int __outvar valid_class; /* search for matching ddebugs */ mutex_lock(&ddebug_lock); list_for_each_entry(dt, &ddebug_tables, link) { /* match against the module name */ if (query->module && !match_wildcard(query->module, dt->mod_name)) continue; if (query->class_string) { map = ddebug_find_valid_class(dt, query->class_string, &valid_class); if (!map) continue; } else { /* constrain query, do not touch class'd callsites */ valid_class = _DPRINTK_CLASS_DFLT; } for (i = 0; i < dt->num_ddebugs; i++) { struct _ddebug *dp = &dt->ddebugs[i]; /* match site against query-class */ if (dp->class_id != valid_class) continue; /* match against the source filename */ if (query->filename && !match_wildcard(query->filename, dp->filename) && !match_wildcard(query->filename, kbasename(dp->filename)) && !match_wildcard(query->filename, trim_prefix(dp->filename))) continue; /* match against the function */ if (query->function && !match_wildcard(query->function, dp->function)) continue; /* match against the format */ if (query->format) { if (*query->format == '^') { char *p; /* anchored search. match must be at beginning */ p = strstr(dp->format, query->format+1); if (p != dp->format) continue; } else if (!strstr(dp->format, query->format)) continue; } /* match against the line number range */ if (query->first_lineno && dp->lineno < query->first_lineno) continue; if (query->last_lineno && dp->lineno > query->last_lineno) continue; nfound++; newflags = (dp->flags & modifiers->mask) | modifiers->flags; if (newflags == dp->flags) continue; #ifdef CONFIG_JUMP_LABEL if (dp->flags & _DPRINTK_FLAGS_PRINT) { if (!(newflags & _DPRINTK_FLAGS_PRINT)) static_branch_disable(&dp->key.dd_key_true); } else if (newflags & _DPRINTK_FLAGS_PRINT) { static_branch_enable(&dp->key.dd_key_true); } #endif v4pr_info("changed %s:%d [%s]%s %s => %s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, ddebug_describe_flags(dp->flags, &fbuf), ddebug_describe_flags(newflags, &nbuf)); dp->flags = newflags; } } mutex_unlock(&ddebug_lock); if (!nfound && verbose) pr_info("no matches for query\n"); return nfound; } /* * Split the buffer `buf' into space-separated words. * Handles simple " and ' quoting, i.e. without nested, * embedded or escaped \". Return the number of words * or <0 on error. */ static int ddebug_tokenize(char *buf, char *words[], int maxwords) { int nwords = 0; while (*buf) { char *end; /* Skip leading whitespace */ buf = skip_spaces(buf); if (!*buf) break; /* oh, it was trailing whitespace */ if (*buf == '#') break; /* token starts comment, skip rest of line */ /* find `end' of word, whitespace separated or quoted */ if (*buf == '"' || *buf == '\'') { int quote = *buf++; for (end = buf; *end && *end != quote; end++) ; if (!*end) { pr_err("unclosed quote: %s\n", buf); return -EINVAL; /* unclosed quote */ } } else { for (end = buf; *end && !isspace(*end); end++) ; BUG_ON(end == buf); } /* `buf' is start of word, `end' is one past its end */ if (nwords == maxwords) { pr_err("too many words, legal max <=%d\n", maxwords); return -EINVAL; /* ran out of words[] before bytes */ } if (*end) *end++ = '\0'; /* terminate the word */ words[nwords++] = buf; buf = end; } if (verbose >= 3) { int i; pr_info("split into words:"); for (i = 0; i < nwords; i++) pr_cont(" \"%s\"", words[i]); pr_cont("\n"); } return nwords; } /* * Parse a single line number. Note that the empty string "" * is treated as a special case and converted to zero, which * is later treated as a "don't care" value. */ static inline int parse_lineno(const char *str, unsigned int *val) { BUG_ON(str == NULL); if (*str == '\0') { *val = 0; return 0; } if (kstrtouint(str, 10, val) < 0) { pr_err("bad line-number: %s\n", str); return -EINVAL; } return 0; } static int parse_linerange(struct ddebug_query *query, const char *first) { char *last = strchr(first, '-'); if (query->first_lineno || query->last_lineno) { pr_err("match-spec: line used 2x\n"); return -EINVAL; } if (last) *last++ = '\0'; if (parse_lineno(first, &query->first_lineno) < 0) return -EINVAL; if (last) { /* range <first>-<last> */ if (parse_lineno(last, &query->last_lineno) < 0) return -EINVAL; /* special case for last lineno not specified */ if (query->last_lineno == 0) query->last_lineno = UINT_MAX; if (query->last_lineno < query->first_lineno) { pr_err("last-line:%d < 1st-line:%d\n", query->last_lineno, query->first_lineno); return -EINVAL; } } else { query->last_lineno = query->first_lineno; } v3pr_info("parsed line %d-%d\n", query->first_lineno, query->last_lineno); return 0; } static int check_set(const char **dest, char *src, char *name) { int rc = 0; if (*dest) { rc = -EINVAL; pr_err("match-spec:%s val:%s overridden by %s\n", name, *dest, src); } *dest = src; return rc; } /* * Parse words[] as a ddebug query specification, which is a series * of (keyword, value) pairs chosen from these possibilities: * * func <function-name> * file <full-pathname> * file <base-filename> * module <module-name> * format <escaped-string-to-find-in-format> * line <lineno> * line <first-lineno>-<last-lineno> // where either may be empty * * Only 1 of each type is allowed. * Returns 0 on success, <0 on error. */ static int ddebug_parse_query(char *words[], int nwords, struct ddebug_query *query, const char *modname) { unsigned int i; int rc = 0; char *fline; /* check we have an even number of words */ if (nwords % 2 != 0) { pr_err("expecting pairs of match-spec <value>\n"); return -EINVAL; } for (i = 0; i < nwords; i += 2) { char *keyword = words[i]; char *arg = words[i+1]; if (!strcmp(keyword, "func")) { rc = check_set(&query->function, arg, "func"); } else if (!strcmp(keyword, "file")) { if (check_set(&query->filename, arg, "file")) return -EINVAL; /* tail :$info is function or line-range */ fline = strchr(query->filename, ':'); if (!fline) continue; *fline++ = '\0'; if (isalpha(*fline) || *fline == '*' || *fline == '?') { /* take as function name */ if (check_set(&query->function, fline, "func")) return -EINVAL; } else { if (parse_linerange(query, fline)) return -EINVAL; } } else if (!strcmp(keyword, "module")) { rc = check_set(&query->module, arg, "module"); } else if (!strcmp(keyword, "format")) { string_unescape_inplace(arg, UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_SPECIAL); rc = check_set(&query->format, arg, "format"); } else if (!strcmp(keyword, "line")) { if (parse_linerange(query, arg)) return -EINVAL; } else if (!strcmp(keyword, "class")) { rc = check_set(&query->class_string, arg, "class"); } else { pr_err("unknown keyword \"%s\"\n", keyword); return -EINVAL; } if (rc) return rc; } if (!query->module && modname) /* * support $modname.dyndbg=<multiple queries>, when * not given in the query itself */ query->module = modname; vpr_info_dq(query, "parsed"); return 0; } /* * Parse `str' as a flags specification, format [-+=][p]+. * Sets up *maskp and *flagsp to be used when changing the * flags fields of matched _ddebug's. Returns 0 on success * or <0 on error. */ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers) { int op, i; switch (*str) { case '+': case '-': case '=': op = *str++; break; default: pr_err("bad flag-op %c, at start of %s\n", *str, str); return -EINVAL; } v3pr_info("op='%c'\n", op); for (; *str ; ++str) { for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { if (*str == opt_array[i].opt_char) { modifiers->flags |= opt_array[i].flag; break; } } if (i < 0) { pr_err("unknown flag '%c'\n", *str); return -EINVAL; } } v3pr_info("flags=0x%x\n", modifiers->flags); /* calculate final flags, mask based upon op */ switch (op) { case '=': /* modifiers->flags already set */ modifiers->mask = 0; break; case '+': modifiers->mask = ~0U; break; case '-': modifiers->mask = ~modifiers->flags; modifiers->flags = 0; break; } v3pr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask); return 0; } static int ddebug_exec_query(char *query_string, const char *modname) { struct flag_settings modifiers = {}; struct ddebug_query query = {}; #define MAXWORDS 9 int nwords, nfound; char *words[MAXWORDS]; nwords = ddebug_tokenize(query_string, words, MAXWORDS); if (nwords <= 0) { pr_err("tokenize failed\n"); return -EINVAL; } /* check flags 1st (last arg) so query is pairs of spec,val */ if (ddebug_parse_flags(words[nwords-1], &modifiers)) { pr_err("flags parse failed\n"); return -EINVAL; } if (ddebug_parse_query(words, nwords-1, &query, modname)) { pr_err("query parse failed\n"); return -EINVAL; } /* actually go and implement the change */ nfound = ddebug_change(&query, &modifiers); vpr_info_dq(&query, nfound ? "applied" : "no-match"); return nfound; } /* handle multiple queries in query string, continue on error, return last error or number of matching callsites. Module name is either in param (for boot arg) or perhaps in query string. */ static int ddebug_exec_queries(char *query, const char *modname) { char *split; int i, errs = 0, exitcode = 0, rc, nfound = 0; for (i = 0; query; query = split) { split = strpbrk(query, ";\n"); if (split) *split++ = '\0'; query = skip_spaces(query); if (!query || !*query || *query == '#') continue; vpr_info("query %d: \"%s\" mod:%s\n", i, query, modname ?: "*"); rc = ddebug_exec_query(query, modname); if (rc < 0) { errs++; exitcode = rc; } else { nfound += rc; } i++; } if (i) v2pr_info("processed %d queries, with %d matches, %d errs\n", i, nfound, errs); if (exitcode) return exitcode; return nfound; } /* apply a new bitmap to the sys-knob's current bit-state */ static int ddebug_apply_class_bitmap(const struct ddebug_class_param *dcp, unsigned long *new_bits, unsigned long *old_bits) { #define QUERY_SIZE 128 char query[QUERY_SIZE]; const struct ddebug_class_map *map = dcp->map; int matches = 0; int bi, ct; v2pr_info("apply: 0x%lx to: 0x%lx\n", *new_bits, *old_bits); for (bi = 0; bi < map->length; bi++) { if (test_bit(bi, new_bits) == test_bit(bi, old_bits)) continue; snprintf(query, QUERY_SIZE, "class %s %c%s", map->class_names[bi], test_bit(bi, new_bits) ? '+' : '-', dcp->flags); ct = ddebug_exec_queries(query, NULL); matches += ct; v2pr_info("bit_%d: %d matches on class: %s -> 0x%lx\n", bi, ct, map->class_names[bi], *new_bits); } return matches; } /* stub to later conditionally add "$module." prefix where not already done */ #define KP_NAME(kp) kp->name #define CLASSMAP_BITMASK(width) ((1UL << (width)) - 1) /* accept comma-separated-list of [+-] classnames */ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_param *kp) { const struct ddebug_class_param *dcp = kp->arg; const struct ddebug_class_map *map = dcp->map; unsigned long curr_bits, old_bits; char *cl_str, *p, *tmp; int cls_id, totct = 0; bool wanted; cl_str = tmp = kstrdup(instr, GFP_KERNEL); p = strchr(cl_str, '\n'); if (p) *p = '\0'; /* start with previously set state-bits, then modify */ curr_bits = old_bits = *dcp->bits; vpr_info("\"%s\" > %s:0x%lx\n", cl_str, KP_NAME(kp), curr_bits); for (; cl_str; cl_str = p) { p = strchr(cl_str, ','); if (p) *p++ = '\0'; if (*cl_str == '-') { wanted = false; cl_str++; } else { wanted = true; if (*cl_str == '+') cl_str++; } cls_id = match_string(map->class_names, map->length, cl_str); if (cls_id < 0) { pr_err("%s unknown to %s\n", cl_str, KP_NAME(kp)); continue; } /* have one or more valid class_ids of one *_NAMES type */ switch (map->map_type) { case DD_CLASS_TYPE_DISJOINT_NAMES: /* the +/- pertains to a single bit */ if (test_bit(cls_id, &curr_bits) == wanted) { v3pr_info("no change on %s\n", cl_str); continue; } curr_bits ^= BIT(cls_id); totct += ddebug_apply_class_bitmap(dcp, &curr_bits, dcp->bits); *dcp->bits = curr_bits; v2pr_info("%s: changed bit %d:%s\n", KP_NAME(kp), cls_id, map->class_names[cls_id]); break; case DD_CLASS_TYPE_LEVEL_NAMES: /* cls_id = N in 0..max. wanted +/- determines N or N-1 */ old_bits = CLASSMAP_BITMASK(*dcp->lvl); curr_bits = CLASSMAP_BITMASK(cls_id + (wanted ? 1 : 0 )); totct += ddebug_apply_class_bitmap(dcp, &curr_bits, &old_bits); *dcp->lvl = (cls_id + (wanted ? 1 : 0)); v2pr_info("%s: changed bit-%d: \"%s\" %lx->%lx\n", KP_NAME(kp), cls_id, map->class_names[cls_id], old_bits, curr_bits); break; default: pr_err("illegal map-type value %d\n", map->map_type); } } kfree(tmp); vpr_info("total matches: %d\n", totct); return 0; } /** * param_set_dyndbg_classes - class FOO >control * @instr: string echo>d to sysfs, input depends on map_type * @kp: kp->arg has state: bits/lvl, map, map_type * * Enable/disable prdbgs by their class, as given in the arguments to * DECLARE_DYNDBG_CLASSMAP. For LEVEL map-types, enforce relative * levels by bitpos. * * Returns: 0 or <0 if error. */ int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp) { const struct ddebug_class_param *dcp = kp->arg; const struct ddebug_class_map *map = dcp->map; unsigned long inrep, new_bits, old_bits; int rc, totct = 0; switch (map->map_type) { case DD_CLASS_TYPE_DISJOINT_NAMES: case DD_CLASS_TYPE_LEVEL_NAMES: /* handle [+-]classnames list separately, we are done here */ return param_set_dyndbg_classnames(instr, kp); case DD_CLASS_TYPE_DISJOINT_BITS: case DD_CLASS_TYPE_LEVEL_NUM: /* numeric input, accept and fall-thru */ rc = kstrtoul(instr, 0, &inrep); if (rc) { pr_err("expecting numeric input: %s > %s\n", instr, KP_NAME(kp)); return -EINVAL; } break; default: pr_err("%s: bad map type: %d\n", KP_NAME(kp), map->map_type); return -EINVAL; } /* only _BITS,_NUM (numeric) map-types get here */ switch (map->map_type) { case DD_CLASS_TYPE_DISJOINT_BITS: /* expect bits. mask and warn if too many */ if (inrep & ~CLASSMAP_BITMASK(map->length)) { pr_warn("%s: input: 0x%lx exceeds mask: 0x%lx, masking\n", KP_NAME(kp), inrep, CLASSMAP_BITMASK(map->length)); inrep &= CLASSMAP_BITMASK(map->length); } v2pr_info("bits:%lx > %s\n", inrep, KP_NAME(kp)); totct += ddebug_apply_class_bitmap(dcp, &inrep, dcp->bits); *dcp->bits = inrep; break; case DD_CLASS_TYPE_LEVEL_NUM: /* input is bitpos, of highest verbosity to be enabled */ if (inrep > map->length) { pr_warn("%s: level:%ld exceeds max:%d, clamping\n", KP_NAME(kp), inrep, map->length); inrep = map->length; } old_bits = CLASSMAP_BITMASK(*dcp->lvl); new_bits = CLASSMAP_BITMASK(inrep); v2pr_info("lvl:%ld bits:0x%lx > %s\n", inrep, new_bits, KP_NAME(kp)); totct += ddebug_apply_class_bitmap(dcp, &new_bits, &old_bits); *dcp->lvl = inrep; break; default: pr_warn("%s: bad map type: %d\n", KP_NAME(kp), map->map_type); } vpr_info("%s: total matches: %d\n", KP_NAME(kp), totct); return 0; } EXPORT_SYMBOL(param_set_dyndbg_classes); /** * param_get_dyndbg_classes - classes reader * @buffer: string description of controlled bits -> classes * @kp: kp->arg has state: bits, map * * Reads last written state, underlying prdbg state may have been * altered by direct >control. Displays 0x for DISJOINT, 0-N for * LEVEL Returns: #chars written or <0 on error */ int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp) { const struct ddebug_class_param *dcp = kp->arg; const struct ddebug_class_map *map = dcp->map; switch (map->map_type) { case DD_CLASS_TYPE_DISJOINT_NAMES: case DD_CLASS_TYPE_DISJOINT_BITS: return scnprintf(buffer, PAGE_SIZE, "0x%lx\n", *dcp->bits); case DD_CLASS_TYPE_LEVEL_NAMES: case DD_CLASS_TYPE_LEVEL_NUM: return scnprintf(buffer, PAGE_SIZE, "%d\n", *dcp->lvl); default: return -1; } } EXPORT_SYMBOL(param_get_dyndbg_classes); const struct kernel_param_ops param_ops_dyndbg_classes = { .set = param_set_dyndbg_classes, .get = param_get_dyndbg_classes, }; EXPORT_SYMBOL(param_ops_dyndbg_classes); #define PREFIX_SIZE 128 static int remaining(int wrote) { if (PREFIX_SIZE - wrote > 0) return PREFIX_SIZE - wrote; return 0; } static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf) { int pos_after_tid; int pos = 0; if (desc->flags & _DPRINTK_FLAGS_INCL_TID) { if (in_interrupt()) pos += snprintf(buf + pos, remaining(pos), "<intr> "); else pos += snprintf(buf + pos, remaining(pos), "[%d] ", task_pid_vnr(current)); } pos_after_tid = pos; if (desc->flags & _DPRINTK_FLAGS_INCL_MODNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", desc->modname); if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME) pos += snprintf(buf + pos, remaining(pos), "%s:", desc->function); if (desc->flags & _DPRINTK_FLAGS_INCL_SOURCENAME) pos += snprintf(buf + pos, remaining(pos), "%s:", trim_prefix(desc->filename)); if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO) pos += snprintf(buf + pos, remaining(pos), "%d:", desc->lineno); if (pos - pos_after_tid) pos += snprintf(buf + pos, remaining(pos), " "); if (pos >= PREFIX_SIZE) buf[PREFIX_SIZE - 1] = '\0'; return buf; } static inline char *dynamic_emit_prefix(struct _ddebug *desc, char *buf) { if (unlikely(desc->flags & _DPRINTK_FLAGS_INCL_ANY)) return __dynamic_emit_prefix(desc, buf); return buf; } void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) { va_list args; struct va_format vaf; char buf[PREFIX_SIZE] = ""; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf); va_end(args); } EXPORT_SYMBOL(__dynamic_pr_debug); void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (!dev) { printk(KERN_DEBUG "(NULL device *): %pV", &vaf); } else { char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, dev, "%s%s %s: %pV", dynamic_emit_prefix(descriptor, buf), dev_driver_string(dev), dev_name(dev), &vaf); } va_end(args); } EXPORT_SYMBOL(__dynamic_dev_dbg); #ifdef CONFIG_NET void __dynamic_netdev_dbg(struct _ddebug *descriptor, const struct net_device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; BUG_ON(!descriptor); BUG_ON(!fmt); va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (dev && dev->dev.parent) { char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, dev->dev.parent, "%s%s %s %s%s: %pV", dynamic_emit_prefix(descriptor, buf), dev_driver_string(dev->dev.parent), dev_name(dev->dev.parent), netdev_name(dev), netdev_reg_state(dev), &vaf); } else if (dev) { printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev), netdev_reg_state(dev), &vaf); } else { printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); } va_end(args); } EXPORT_SYMBOL(__dynamic_netdev_dbg); #endif #if IS_ENABLED(CONFIG_INFINIBAND) void __dynamic_ibdev_dbg(struct _ddebug *descriptor, const struct ib_device *ibdev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (ibdev && ibdev->dev.parent) { char buf[PREFIX_SIZE] = ""; dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent, "%s%s %s %s: %pV", dynamic_emit_prefix(descriptor, buf), dev_driver_string(ibdev->dev.parent), dev_name(ibdev->dev.parent), dev_name(&ibdev->dev), &vaf); } else if (ibdev) { printk(KERN_DEBUG "%s: %pV", dev_name(&ibdev->dev), &vaf); } else { printk(KERN_DEBUG "(NULL ib_device): %pV", &vaf); } va_end(args); } EXPORT_SYMBOL(__dynamic_ibdev_dbg); #endif /* * Install a noop handler to make dyndbg look like a normal kernel cli param. * This avoids warnings about dyndbg being an unknown cli param when supplied * by a user. */ static __init int dyndbg_setup(char *str) { return 1; } __setup("dyndbg=", dyndbg_setup); /* * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the * command text from userspace, parses and executes it. */ #define USER_BUF_PAGE 4096 static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { char *tmpbuf; int ret; if (len == 0) return 0; if (len > USER_BUF_PAGE - 1) { pr_warn("expected <%d bytes into control\n", USER_BUF_PAGE); return -E2BIG; } tmpbuf = memdup_user_nul(ubuf, len); if (IS_ERR(tmpbuf)) return PTR_ERR(tmpbuf); v2pr_info("read %zu bytes from userspace\n", len); ret = ddebug_exec_queries(tmpbuf, NULL); kfree(tmpbuf); if (ret < 0) return ret; *offp += len; return len; } /* * Set the iterator to point to the first _ddebug object * and return a pointer to that first object. Returns * NULL if there are no _ddebugs at all. */ static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter) { if (list_empty(&ddebug_tables)) { iter->table = NULL; return NULL; } iter->table = list_entry(ddebug_tables.next, struct ddebug_table, link); iter->idx = iter->table->num_ddebugs; return &iter->table->ddebugs[--iter->idx]; } /* * Advance the iterator to point to the next _ddebug * object from the one the iterator currently points at, * and returns a pointer to the new _ddebug. Returns * NULL if the iterator has seen all the _ddebugs. */ static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter) { if (iter->table == NULL) return NULL; if (--iter->idx < 0) { /* iterate to next table */ if (list_is_last(&iter->table->link, &ddebug_tables)) { iter->table = NULL; return NULL; } iter->table = list_entry(iter->table->link.next, struct ddebug_table, link); iter->idx = iter->table->num_ddebugs; --iter->idx; } return &iter->table->ddebugs[iter->idx]; } /* * Seq_ops start method. Called at the start of every * read() call from userspace. Takes the ddebug_lock and * seeks the seq_file's iterator to the given position. */ static void *ddebug_proc_start(struct seq_file *m, loff_t *pos) { struct ddebug_iter *iter = m->private; struct _ddebug *dp; int n = *pos; mutex_lock(&ddebug_lock); if (!n) return SEQ_START_TOKEN; if (n < 0) return NULL; dp = ddebug_iter_first(iter); while (dp != NULL && --n > 0) dp = ddebug_iter_next(iter); return dp; } /* * Seq_ops next method. Called several times within a read() * call from userspace, with ddebug_lock held. Walks to the * next _ddebug object with a special case for the header line. */ static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) { struct ddebug_iter *iter = m->private; struct _ddebug *dp; if (p == SEQ_START_TOKEN) dp = ddebug_iter_first(iter); else dp = ddebug_iter_next(iter); ++*pos; return dp; } #define class_in_range(class_id, map) \ (class_id >= map->base && class_id < map->base + map->length) static const char *ddebug_class_name(struct ddebug_iter *iter, struct _ddebug *dp) { struct ddebug_class_map *map; list_for_each_entry(map, &iter->table->maps, link) if (class_in_range(dp->class_id, map)) return map->class_names[dp->class_id - map->base]; return NULL; } /* * Seq_ops show method. Called several times within a read() * call from userspace, with ddebug_lock held. Formats the * current _ddebug as a single human-readable line, with a * special case for the header line. */ static int ddebug_proc_show(struct seq_file *m, void *p) { struct ddebug_iter *iter = m->private; struct _ddebug *dp = p; struct flagsbuf flags; char const *class; if (p == SEQ_START_TOKEN) { seq_puts(m, "# filename:lineno [module]function flags format\n"); return 0; } seq_printf(m, "%s:%u [%s]%s =%s \"", trim_prefix(dp->filename), dp->lineno, iter->table->mod_name, dp->function, ddebug_describe_flags(dp->flags, &flags)); seq_escape_str(m, dp->format, ESCAPE_SPACE, "\t\r\n\""); seq_puts(m, "\""); if (dp->class_id != _DPRINTK_CLASS_DFLT) { class = ddebug_class_name(iter, dp); if (class) seq_printf(m, " class:%s", class); else seq_printf(m, " class unknown, _id:%d", dp->class_id); } seq_puts(m, "\n"); return 0; } /* * Seq_ops stop method. Called at the end of each read() * call from userspace. Drops ddebug_lock. */ static void ddebug_proc_stop(struct seq_file *m, void *p) { mutex_unlock(&ddebug_lock); } static const struct seq_operations ddebug_proc_seqops = { .start = ddebug_proc_start, .next = ddebug_proc_next, .show = ddebug_proc_show, .stop = ddebug_proc_stop }; static int ddebug_proc_open(struct inode *inode, struct file *file) { return seq_open_private(file, &ddebug_proc_seqops, sizeof(struct ddebug_iter)); } static const struct file_operations ddebug_proc_fops = { .owner = THIS_MODULE, .open = ddebug_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, .write = ddebug_proc_write }; static const struct proc_ops proc_fops = { .proc_open = ddebug_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release_private, .proc_write = ddebug_proc_write }; static void ddebug_attach_module_classes(struct ddebug_table *dt, struct ddebug_class_map *classes, int num_classes) { struct ddebug_class_map *cm; int i, j, ct = 0; for (cm = classes, i = 0; i < num_classes; i++, cm++) { if (!strcmp(cm->mod_name, dt->mod_name)) { v2pr_info("class[%d]: module:%s base:%d len:%d ty:%d\n", i, cm->mod_name, cm->base, cm->length, cm->map_type); for (j = 0; j < cm->length; j++) v3pr_info(" %d: %d %s\n", j + cm->base, j, cm->class_names[j]); list_add(&cm->link, &dt->maps); ct++; } } if (ct) vpr_info("module:%s attached %d classes\n", dt->mod_name, ct); } /* * Allocate a new ddebug_table for the given module * and add it to the global list. */ static int ddebug_add_module(struct _ddebug_info *di, const char *modname) { struct ddebug_table *dt; v3pr_info("add-module: %s.%d sites\n", modname, di->num_descs); if (!di->num_descs) { v3pr_info(" skip %s\n", modname); return 0; } dt = kzalloc(sizeof(*dt), GFP_KERNEL); if (dt == NULL) { pr_err("error adding module: %s\n", modname); return -ENOMEM; } /* * For built-in modules, name lives in .rodata and is * immortal. For loaded modules, name points at the name[] * member of struct module, which lives at least as long as * this struct ddebug_table. */ dt->mod_name = modname; dt->ddebugs = di->descs; dt->num_ddebugs = di->num_descs; INIT_LIST_HEAD(&dt->link); INIT_LIST_HEAD(&dt->maps); if (di->classes && di->num_classes) ddebug_attach_module_classes(dt, di->classes, di->num_classes); mutex_lock(&ddebug_lock); list_add_tail(&dt->link, &ddebug_tables); mutex_unlock(&ddebug_lock); vpr_info("%3u debug prints in module %s\n", di->num_descs, modname); return 0; } /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, const char *modname, int on_err) { char *sep; sep = strchr(param, '.'); if (sep) { /* needed only for ddebug_dyndbg_boot_param_cb */ *sep = '\0'; modname = param; param = sep + 1; } if (strcmp(param, "dyndbg")) return on_err; /* determined by caller */ ddebug_exec_queries((val ? val : "+p"), modname); return 0; /* query failure shouldn't stop module load */ } /* handle both dyndbg and $module.dyndbg params at boot */ static int ddebug_dyndbg_boot_param_cb(char *param, char *val, const char *unused, void *arg) { vpr_info("%s=\"%s\"\n", param, val); return ddebug_dyndbg_param_cb(param, val, NULL, 0); } /* * modprobe foo finds foo.params in boot-args, strips "foo.", and * passes them to load_module(). This callback gets unknown params, * processes dyndbg params, rejects others. */ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module) { vpr_info("module: %s %s=\"%s\"\n", module, param, val); return ddebug_dyndbg_param_cb(param, val, module, -ENOENT); } static void ddebug_table_free(struct ddebug_table *dt) { list_del_init(&dt->link); kfree(dt); } #ifdef CONFIG_MODULES /* * Called in response to a module being unloaded. Removes * any ddebug_table's which point at the module. */ static int ddebug_remove_module(const char *mod_name) { struct ddebug_table *dt, *nextdt; int ret = -ENOENT; mutex_lock(&ddebug_lock); list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { if (dt->mod_name == mod_name) { ddebug_table_free(dt); ret = 0; break; } } mutex_unlock(&ddebug_lock); if (!ret) v2pr_info("removed module \"%s\"\n", mod_name); return ret; } static int ddebug_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; switch (val) { case MODULE_STATE_COMING: ret = ddebug_add_module(&mod->dyndbg_info, mod->name); if (ret) WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n"); break; case MODULE_STATE_GOING: ddebug_remove_module(mod->name); break; } return notifier_from_errno(ret); } static struct notifier_block ddebug_module_nb = { .notifier_call = ddebug_module_notify, .priority = 0, /* dynamic debug depends on jump label */ }; #endif /* CONFIG_MODULES */ static void ddebug_remove_all_tables(void) { mutex_lock(&ddebug_lock); while (!list_empty(&ddebug_tables)) { struct ddebug_table *dt = list_entry(ddebug_tables.next, struct ddebug_table, link); ddebug_table_free(dt); } mutex_unlock(&ddebug_lock); } static __initdata int ddebug_init_success; static int __init dynamic_debug_init_control(void) { struct proc_dir_entry *procfs_dir; struct dentry *debugfs_dir; if (!ddebug_init_success) return -ENODEV; /* Create the control file in debugfs if it is enabled */ if (debugfs_initialized()) { debugfs_dir = debugfs_create_dir("dynamic_debug", NULL); debugfs_create_file("control", 0644, debugfs_dir, NULL, &ddebug_proc_fops); } /* Also create the control file in procfs */ procfs_dir = proc_mkdir("dynamic_debug", NULL); if (procfs_dir) proc_create("control", 0644, procfs_dir, &proc_fops); return 0; } static int __init dynamic_debug_init(void) { struct _ddebug *iter, *iter_mod_start; int ret, i, mod_sites, mod_ct; const char *modname; char *cmdline; struct _ddebug_info di = { .descs = __start___dyndbg, .classes = __start___dyndbg_classes, .num_descs = __stop___dyndbg - __start___dyndbg, .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes, }; #ifdef CONFIG_MODULES ret = register_module_notifier(&ddebug_module_nb); if (ret) { pr_warn("Failed to register dynamic debug module notifier\n"); return ret; } #endif /* CONFIG_MODULES */ if (&__start___dyndbg == &__stop___dyndbg) { if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) { pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); return 1; } pr_info("Ignore empty _ddebug table in a CONFIG_DYNAMIC_DEBUG_CORE build\n"); ddebug_init_success = 1; return 0; } iter = iter_mod_start = __start___dyndbg; modname = iter->modname; i = mod_sites = mod_ct = 0; for (; iter < __stop___dyndbg; iter++, i++, mod_sites++) { if (strcmp(modname, iter->modname)) { mod_ct++; di.num_descs = mod_sites; di.descs = iter_mod_start; ret = ddebug_add_module(&di, modname); if (ret) goto out_err; mod_sites = 0; modname = iter->modname; iter_mod_start = iter; } } di.num_descs = mod_sites; di.descs = iter_mod_start; ret = ddebug_add_module(&di, modname); if (ret) goto out_err; ddebug_init_success = 1; vpr_info("%d prdebugs in %d modules, %d KiB in ddebug tables, %d kiB in __dyndbg section\n", i, mod_ct, (int)((mod_ct * sizeof(struct ddebug_table)) >> 10), (int)((i * sizeof(struct _ddebug)) >> 10)); if (di.num_classes) v2pr_info(" %d builtin ddebug class-maps\n", di.num_classes); /* now that ddebug tables are loaded, process all boot args * again to find and activate queries given in dyndbg params. * While this has already been done for known boot params, it * ignored the unknown ones (dyndbg in particular). Reusing * parse_args avoids ad-hoc parsing. This will also attempt * to activate queries for not-yet-loaded modules, which is * slightly noisy if verbose, but harmless. */ cmdline = kstrdup(saved_command_line, GFP_KERNEL); parse_args("dyndbg params", cmdline, NULL, 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb); kfree(cmdline); return 0; out_err: ddebug_remove_all_tables(); return 0; } /* Allow early initialization for boot messages via boot param */ early_initcall(dynamic_debug_init); /* Debugfs setup must be done later */ fs_initcall(dynamic_debug_init_control);
linux-master
lib/dynamic_debug.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 Thomas Gleixner. * Copyright (C) 2016-2017 Christoph Hellwig. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/sort.h> #include <linux/group_cpus.h> #ifdef CONFIG_SMP static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, unsigned int cpus_per_grp) { const struct cpumask *siblmsk; int cpu, sibl; for ( ; cpus_per_grp > 0; ) { cpu = cpumask_first(nmsk); /* Should not happen, but I'm too lazy to think about it */ if (cpu >= nr_cpu_ids) return; cpumask_clear_cpu(cpu, nmsk); cpumask_set_cpu(cpu, irqmsk); cpus_per_grp--; /* If the cpu has siblings, use them first */ siblmsk = topology_sibling_cpumask(cpu); for (sibl = -1; cpus_per_grp > 0; ) { sibl = cpumask_next(sibl, siblmsk); if (sibl >= nr_cpu_ids) break; if (!cpumask_test_and_clear_cpu(sibl, nmsk)) continue; cpumask_set_cpu(sibl, irqmsk); cpus_per_grp--; } } } static cpumask_var_t *alloc_node_to_cpumask(void) { cpumask_var_t *masks; int node; masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); if (!masks) return NULL; for (node = 0; node < nr_node_ids; node++) { if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL)) goto out_unwind; } return masks; out_unwind: while (--node >= 0) free_cpumask_var(masks[node]); kfree(masks); return NULL; } static void free_node_to_cpumask(cpumask_var_t *masks) { int node; for (node = 0; node < nr_node_ids; node++) free_cpumask_var(masks[node]); kfree(masks); } static void build_node_to_cpumask(cpumask_var_t *masks) { int cpu; for_each_possible_cpu(cpu) cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); } static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, const struct cpumask *mask, nodemask_t *nodemsk) { int n, nodes = 0; /* Calculate the number of nodes in the supplied affinity mask */ for_each_node(n) { if (cpumask_intersects(mask, node_to_cpumask[n])) { node_set(n, *nodemsk); nodes++; } } return nodes; } struct node_groups { unsigned id; union { unsigned ngroups; unsigned ncpus; }; }; static int ncpus_cmp_func(const void *l, const void *r) { const struct node_groups *ln = l; const struct node_groups *rn = r; return ln->ncpus - rn->ncpus; } /* * Allocate group number for each node, so that for each node: * * 1) the allocated number is >= 1 * * 2) the allocated number is <= active CPU number of this node * * The actual allocated total groups may be less than @numgrps when * active total CPU number is less than @numgrps. * * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]' * for each node. */ static void alloc_nodes_groups(unsigned int numgrps, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, const nodemask_t nodemsk, struct cpumask *nmsk, struct node_groups *node_groups) { unsigned n, remaining_ncpus = 0; for (n = 0; n < nr_node_ids; n++) { node_groups[n].id = n; node_groups[n].ncpus = UINT_MAX; } for_each_node_mask(n, nodemsk) { unsigned ncpus; cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); ncpus = cpumask_weight(nmsk); if (!ncpus) continue; remaining_ncpus += ncpus; node_groups[n].ncpus = ncpus; } numgrps = min_t(unsigned, remaining_ncpus, numgrps); sort(node_groups, nr_node_ids, sizeof(node_groups[0]), ncpus_cmp_func, NULL); /* * Allocate groups for each node according to the ratio of this * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is * bigger than number of active numa nodes. Always start the * allocation from the node with minimized nr_cpus. * * This way guarantees that each active node gets allocated at * least one group, and the theory is simple: over-allocation * is only done when this node is assigned by one group, so * other nodes will be allocated >= 1 groups, since 'numgrps' is * bigger than number of numa nodes. * * One perfect invariant is that number of allocated groups for * each node is <= CPU count of this node: * * 1) suppose there are two nodes: A and B * ncpu(X) is CPU count of node X * grps(X) is the group count allocated to node X via this * algorithm * * ncpu(A) <= ncpu(B) * ncpu(A) + ncpu(B) = N * grps(A) + grps(B) = G * * grps(A) = max(1, round_down(G * ncpu(A) / N)) * grps(B) = G - grps(A) * * both N and G are integer, and 2 <= G <= N, suppose * G = N - delta, and 0 <= delta <= N - 2 * * 2) obviously grps(A) <= ncpu(A) because: * * if grps(A) is 1, then grps(A) <= ncpu(A) given * ncpu(A) >= 1 * * otherwise, * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N * * 3) prove how grps(B) <= ncpu(B): * * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be * over-allocated, so grps(B) <= ncpu(B), * * otherwise: * * grps(A) = * round_down(G * ncpu(A) / N) = * round_down((N - delta) * ncpu(A) / N) = * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >= * round_down((N * ncpu(A) - delta * N) / N) = * cpu(A) - delta * * then: * * grps(A) - G >= ncpu(A) - delta - G * => * G - grps(A) <= G + delta - ncpu(A) * => * grps(B) <= N - ncpu(A) * => * grps(B) <= cpu(B) * * For nodes >= 3, it can be thought as one node and another big * node given that is exactly what this algorithm is implemented, * and we always re-calculate 'remaining_ncpus' & 'numgrps', and * finally for each node X: grps(X) <= ncpu(X). * */ for (n = 0; n < nr_node_ids; n++) { unsigned ngroups, ncpus; if (node_groups[n].ncpus == UINT_MAX) continue; WARN_ON_ONCE(numgrps == 0); ncpus = node_groups[n].ncpus; ngroups = max_t(unsigned, 1, numgrps * ncpus / remaining_ncpus); WARN_ON_ONCE(ngroups > ncpus); node_groups[n].ngroups = ngroups; remaining_ncpus -= ncpus; numgrps -= ngroups; } } static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, struct cpumask *nmsk, struct cpumask *masks) { unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0; unsigned int last_grp = numgrps; unsigned int curgrp = startgrp; nodemask_t nodemsk = NODE_MASK_NONE; struct node_groups *node_groups; if (cpumask_empty(cpu_mask)) return 0; nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); /* * If the number of nodes in the mask is greater than or equal the * number of groups we just spread the groups across the nodes. */ if (numgrps <= nodes) { for_each_node_mask(n, nodemsk) { /* Ensure that only CPUs which are in both masks are set */ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); cpumask_or(&masks[curgrp], &masks[curgrp], nmsk); if (++curgrp == last_grp) curgrp = 0; } return numgrps; } node_groups = kcalloc(nr_node_ids, sizeof(struct node_groups), GFP_KERNEL); if (!node_groups) return -ENOMEM; /* allocate group number for each node */ alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask, nodemsk, nmsk, node_groups); for (i = 0; i < nr_node_ids; i++) { unsigned int ncpus, v; struct node_groups *nv = &node_groups[i]; if (nv->ngroups == UINT_MAX) continue; /* Get the cpus on this node which are in the mask */ cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); ncpus = cpumask_weight(nmsk); if (!ncpus) continue; WARN_ON_ONCE(nv->ngroups > ncpus); /* Account for rounding errors */ extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups); /* Spread allocated groups on CPUs of the current node */ for (v = 0; v < nv->ngroups; v++, curgrp++) { cpus_per_grp = ncpus / nv->ngroups; /* Account for extra groups to compensate rounding errors */ if (extra_grps) { cpus_per_grp++; --extra_grps; } /* * wrapping has to be considered given 'startgrp' * may start anywhere */ if (curgrp >= last_grp) curgrp = 0; grp_spread_init_one(&masks[curgrp], nmsk, cpus_per_grp); } done += nv->ngroups; } kfree(node_groups); return done; } /** * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality * @numgrps: number of groups * * Return: cpumask array if successful, NULL otherwise. And each element * includes CPUs assigned to this group * * Try to put close CPUs from viewpoint of CPU and NUMA locality into * same group, and run two-stage grouping: * 1) allocate present CPUs on these groups evenly first * 2) allocate other possible CPUs on these groups evenly * * We guarantee in the resulted grouping that all CPUs are covered, and * no same CPU is assigned to multiple groups */ struct cpumask *group_cpus_evenly(unsigned int numgrps) { unsigned int curgrp = 0, nr_present = 0, nr_others = 0; cpumask_var_t *node_to_cpumask; cpumask_var_t nmsk, npresmsk; int ret = -ENOMEM; struct cpumask *masks = NULL; if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) goto fail_nmsk; node_to_cpumask = alloc_node_to_cpumask(); if (!node_to_cpumask) goto fail_npresmsk; masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); if (!masks) goto fail_node_to_cpumask; /* Stabilize the cpumasks */ cpus_read_lock(); build_node_to_cpumask(node_to_cpumask); /* grouping present CPUs first */ ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, cpu_present_mask, nmsk, masks); if (ret < 0) goto fail_build_affinity; nr_present = ret; /* * Allocate non present CPUs starting from the next group to be * handled. If the grouping of present CPUs already exhausted the * group space, assign the non present CPUs to the already * allocated out groups. */ if (nr_present >= numgrps) curgrp = 0; else curgrp = nr_present; cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask, npresmsk, nmsk, masks); if (ret >= 0) nr_others = ret; fail_build_affinity: cpus_read_unlock(); if (ret >= 0) WARN_ON(nr_present + nr_others < numgrps); fail_node_to_cpumask: free_node_to_cpumask(node_to_cpumask); fail_npresmsk: free_cpumask_var(npresmsk); fail_nmsk: free_cpumask_var(nmsk); if (ret < 0) { kfree(masks); return NULL; } return masks; } #else /* CONFIG_SMP */ struct cpumask *group_cpus_evenly(unsigned int numgrps) { struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); if (!masks) return NULL; /* assign all CPUs(cpu 0) to the 1st group only */ cpumask_copy(&masks[0], cpu_possible_mask); return masks; } #endif /* CONFIG_SMP */ EXPORT_SYMBOL_GPL(group_cpus_evenly);
linux-master
lib/group_cpus.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/stat.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/stacktrace.h> #include <linux/fault-inject.h> /* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. */ int setup_fault_attr(struct fault_attr *attr, char *str) { unsigned long probability; unsigned long interval; int times; int space; /* "<interval>,<probability>,<space>,<times>" */ if (sscanf(str, "%lu,%lu,%d,%d", &interval, &probability, &space, &times) < 4) { printk(KERN_WARNING "FAULT_INJECTION: failed to parse arguments\n"); return 0; } attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); atomic_set(&attr->space, space); return 1; } EXPORT_SYMBOL_GPL(setup_fault_attr); static void fail_dump(struct fault_attr *attr) { if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) dump_stack(); } } #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) static bool fail_task(struct fault_attr *attr, struct task_struct *task) { return in_task() && task->make_it_fail; } #define MAX_STACK_TRACE_DEPTH 32 #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static bool fail_stacktrace(struct fault_attr *attr) { int depth = attr->stacktrace_depth; unsigned long entries[MAX_STACK_TRACE_DEPTH]; int n, nr_entries; bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX); if (depth == 0 || (found && !attr->reject_start && !attr->reject_end)) return found; nr_entries = stack_trace_save(entries, depth, 1); for (n = 0; n < nr_entries; n++) { if (attr->reject_start <= entries[n] && entries[n] < attr->reject_end) return false; if (attr->require_start <= entries[n] && entries[n] < attr->require_end) found = true; } return found; } #else static inline bool fail_stacktrace(struct fault_attr *attr) { return true; } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ /* * This code is stolen from failmalloc-1.0 * http://www.nongnu.org/failmalloc/ */ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) { bool stack_checked = false; if (in_task()) { unsigned int fail_nth = READ_ONCE(current->fail_nth); if (fail_nth) { if (!fail_stacktrace(attr)) return false; stack_checked = true; fail_nth--; WRITE_ONCE(current->fail_nth, fail_nth); if (!fail_nth) goto fail; return false; } } /* No need to check any other properties if the probability is 0 */ if (attr->probability == 0) return false; if (attr->task_filter && !fail_task(attr, current)) return false; if (atomic_read(&attr->times) == 0) return false; if (!stack_checked && !fail_stacktrace(attr)) return false; if (atomic_read(&attr->space) > size) { atomic_sub(size, &attr->space); return false; } if (attr->interval > 1) { attr->count++; if (attr->count % attr->interval) return false; } if (attr->probability <= get_random_u32_below(100)) return false; fail: if (!(flags & FAULT_NOWARN)) fail_dump(attr); if (atomic_read(&attr->times) != -1) atomic_dec_not_zero(&attr->times); return true; } bool should_fail(struct fault_attr *attr, ssize_t size) { return should_fail_ex(attr, size, 0); } EXPORT_SYMBOL_GPL(should_fail); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int debugfs_ul_set(void *data, u64 val) { *(unsigned long *)data = val; return 0; } static int debugfs_ul_get(void *data, u64 *val) { *val = *(unsigned long *)data; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); static void debugfs_create_ul(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { debugfs_create_file(name, mode, parent, value, &fops_ul); } #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static int debugfs_stacktrace_depth_set(void *data, u64 val) { *(unsigned long *)data = min_t(unsigned long, val, MAX_STACK_TRACE_DEPTH); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, debugfs_stacktrace_depth_set, "%llu\n"); static void debugfs_create_stacktrace_depth(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth); } #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ struct dentry *fault_create_debugfs_attr(const char *name, struct dentry *parent, struct fault_attr *attr) { umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = debugfs_create_dir(name, parent); if (IS_ERR(dir)) return dir; debugfs_create_ul("probability", mode, dir, &attr->probability); debugfs_create_ul("interval", mode, dir, &attr->interval); debugfs_create_atomic_t("times", mode, dir, &attr->times); debugfs_create_atomic_t("space", mode, dir, &attr->space); debugfs_create_ul("verbose", mode, dir, &attr->verbose); debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir, &attr->ratelimit_state.interval); debugfs_create_u32("verbose_ratelimit_burst", mode, dir, &attr->ratelimit_state.burst); debugfs_create_bool("task-filter", mode, dir, &attr->task_filter); #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir, &attr->stacktrace_depth); debugfs_create_xul("require-start", mode, dir, &attr->require_start); debugfs_create_xul("require-end", mode, dir, &attr->require_end); debugfs_create_xul("reject-start", mode, dir, &attr->reject_start); debugfs_create_xul("reject-end", mode, dir, &attr->reject_end); #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ attr->dname = dget(dir); return dir; } EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ #ifdef CONFIG_FAULT_INJECTION_CONFIGFS /* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */ static ssize_t fault_uint_attr_show(unsigned int val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static ssize_t fault_ulong_attr_show(unsigned long val, char *page) { return snprintf(page, PAGE_SIZE, "%lu\n", val); } static ssize_t fault_bool_attr_show(bool val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page) { return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val)); } static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count) { unsigned int tmp; int result; result = kstrtouint(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count) { int result; unsigned long tmp; result = kstrtoul(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count) { bool tmp; int result; result = kstrtobool(page, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count) { int tmp; int result; result = kstrtoint(page, 0, &tmp); if (result < 0) return result; atomic_set(val, tmp); return count; } #define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \ static struct configfs_attribute _pfx##attr_##_name = { \ .ca_name = _attr_name, \ .ca_mode = 0644, \ .ca_owner = THIS_MODULE, \ .show = _pfx##_name##_show, \ .store = _pfx##_name##_store, \ } static struct fault_config *to_fault_config(struct config_item *item) { return container_of(to_config_group(item), struct fault_config, group); } #define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \ static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \ { \ return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \ } \ static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \ { \ struct fault_config *config = to_fault_config(item); \ return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \ } \ CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME) #define FAULT_CONFIGFS_ATTR(NAME, TYPE) \ FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE) FAULT_CONFIGFS_ATTR(probability, ulong); FAULT_CONFIGFS_ATTR(interval, ulong); FAULT_CONFIGFS_ATTR(times, atomic_t); FAULT_CONFIGFS_ATTR(space, atomic_t); FAULT_CONFIGFS_ATTR(verbose, ulong); FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms", ratelimit_state.interval, uint); FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst", ratelimit_state.burst, uint); FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool); #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page) { return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page); } static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page, size_t count) { int result; unsigned long tmp; result = kstrtoul(page, 0, &tmp); if (result < 0) return result; to_fault_config(item)->attr.stacktrace_depth = min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH); return count; } CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth"); static ssize_t fault_xul_attr_show(unsigned long val, char *page) { return snprintf(page, PAGE_SIZE, sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val); } static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count) { return fault_ulong_attr_store(val, page, count); } FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul); FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul); FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul); FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul); #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ static struct configfs_attribute *fault_config_attrs[] = { &fault_attr_probability, &fault_attr_interval, &fault_attr_times, &fault_attr_space, &fault_attr_verbose, &fault_attr_ratelimit_interval, &fault_attr_ratelimit_burst, &fault_attr_task_filter, #ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER &fault_attr_stacktrace_depth, &fault_attr_require_start, &fault_attr_require_end, &fault_attr_reject_start, &fault_attr_reject_end, #endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ NULL, }; static const struct config_item_type fault_config_type = { .ct_attrs = fault_config_attrs, .ct_owner = THIS_MODULE, }; void fault_config_init(struct fault_config *config, const char *name) { config_group_init_type_name(&config->group, name, &fault_config_type); } EXPORT_SYMBOL_GPL(fault_config_init); #endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
linux-master
lib/fault-inject.c
// SPDX-License-Identifier: GPL-2.0-only /* * Test cases for printf facility. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/sprintf.h> #include <linux/string.h> #include <linux/bitmap.h> #include <linux/dcache.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/property.h> #include "../tools/testing/selftests/kselftest_module.h" #define BUF_SIZE 256 #define PAD_SIZE 16 #define FILL_CHAR '$' #define NOWARN(option, comment, block) \ __diag_push(); \ __diag_ignore_all(#option, comment); \ block \ __diag_pop(); KSTM_MODULE_GLOBALS(); static char *test_buffer __initdata; static char *alloced_buffer __initdata; static int __printf(4, 0) __init do_test(int bufsize, const char *expect, int elen, const char *fmt, va_list ap) { va_list aq; int ret, written; total_tests++; memset(alloced_buffer, FILL_CHAR, BUF_SIZE + 2*PAD_SIZE); va_copy(aq, ap); ret = vsnprintf(test_buffer, bufsize, fmt, aq); va_end(aq); if (ret != elen) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) returned %d, expected %d\n", bufsize, fmt, ret, elen); return 1; } if (memchr_inv(alloced_buffer, FILL_CHAR, PAD_SIZE)) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote before buffer\n", bufsize, fmt); return 1; } if (!bufsize) { if (memchr_inv(test_buffer, FILL_CHAR, BUF_SIZE + PAD_SIZE)) { pr_warn("vsnprintf(buf, 0, \"%s\", ...) wrote to buffer\n", fmt); return 1; } return 0; } written = min(bufsize-1, elen); if (test_buffer[written]) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) did not nul-terminate buffer\n", bufsize, fmt); return 1; } if (memchr_inv(test_buffer + written + 1, FILL_CHAR, bufsize - (written + 1))) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond the nul-terminator\n", bufsize, fmt); return 1; } if (memchr_inv(test_buffer + bufsize, FILL_CHAR, BUF_SIZE + PAD_SIZE - bufsize)) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote beyond buffer\n", bufsize, fmt); return 1; } if (memcmp(test_buffer, expect, written)) { pr_warn("vsnprintf(buf, %d, \"%s\", ...) wrote '%s', expected '%.*s'\n", bufsize, fmt, test_buffer, written, expect); return 1; } return 0; } static void __printf(3, 4) __init __test(const char *expect, int elen, const char *fmt, ...) { va_list ap; int rand; char *p; if (elen >= BUF_SIZE) { pr_err("error in test suite: expected output length %d too long. Format was '%s'.\n", elen, fmt); failed_tests++; return; } va_start(ap, fmt); /* * Every fmt+args is subjected to four tests: Three where we * tell vsnprintf varying buffer sizes (plenty, not quite * enough and 0), and then we also test that kvasprintf would * be able to print it as expected. */ failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap); rand = get_random_u32_inclusive(1, elen + 1); /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */ failed_tests += do_test(rand, expect, elen, fmt, ap); failed_tests += do_test(0, expect, elen, fmt, ap); p = kvasprintf(GFP_KERNEL, fmt, ap); if (p) { total_tests++; if (memcmp(p, expect, elen+1)) { pr_warn("kvasprintf(..., \"%s\", ...) returned '%s', expected '%s'\n", fmt, p, expect); failed_tests++; } kfree(p); } va_end(ap); } #define test(expect, fmt, ...) \ __test(expect, strlen(expect), fmt, ##__VA_ARGS__) static void __init test_basic(void) { /* Work around annoying "warning: zero-length gnu_printf format string". */ char nul = '\0'; test("", &nul); test("100%", "100%%"); test("xxx%yyy", "xxx%cyyy", '%'); __test("xxx\0yyy", 7, "xxx%cyyy", '\0'); } static void __init test_number(void) { test("0x1234abcd ", "%#-12x", 0x1234abcd); test(" 0x1234abcd", "%#12x", 0x1234abcd); test("0|001| 12|+123| 1234|-123|-1234", "%d|%03d|%3d|%+d|% d|%+d|% d", 0, 1, 12, 123, 1234, -123, -1234); NOWARN(-Wformat, "Intentionally test narrowing conversion specifiers.", { test("0|1|1|128|255", "%hhu|%hhu|%hhu|%hhu|%hhu", 0, 1, 257, 128, -1); test("0|1|1|-128|-1", "%hhd|%hhd|%hhd|%hhd|%hhd", 0, 1, 257, 128, -1); test("2015122420151225", "%ho%ho%#ho", 1037, 5282, -11627); }) /* * POSIX/C99: »The result of converting zero with an explicit * precision of zero shall be no characters.« Hence the output * from the below test should really be "00|0||| ". However, * the kernel's printf also produces a single 0 in that * case. This test case simply documents the current * behaviour. */ test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0); } static void __init test_string(void) { test("", "%s%.0s", "", "123"); test("ABCD|abc|123", "%s|%.3s|%.*s", "ABCD", "abcdef", 3, "123456"); test("1 | 2|3 | 4|5 ", "%-3s|%3s|%-*s|%*s|%*s", "1", "2", 3, "3", 3, "4", -3, "5"); test("1234 ", "%-10.4s", "123456"); test(" 1234", "%10.4s", "123456"); /* * POSIX and C99 say that a negative precision (which is only * possible to pass via a * argument) should be treated as if * the precision wasn't present, and that if the precision is * omitted (as in %.s), the precision should be taken to be * 0. However, the kernel's printf behave exactly opposite, * treating a negative precision as 0 and treating an omitted * precision specifier as if no precision was given. * * These test cases document the current behaviour; should * anyone ever feel the need to follow the standards more * closely, this can be revisited. */ test(" ", "%4.*s", -5, "123456"); test("123456", "%.s", "123456"); test("a||", "%.s|%.0s|%.*s", "a", "b", 0, "c"); test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); } #define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */ #if BITS_PER_LONG == 64 #define PTR_WIDTH 16 #define PTR ((void *)0xffff0123456789abUL) #define PTR_STR "ffff0123456789ab" #define PTR_VAL_NO_CRNG "(____ptrval____)" #define ZEROS "00000000" /* hex 32 zero bits */ #define ONES "ffffffff" /* hex 32 one bits */ static int __init plain_format(void) { char buf[PLAIN_BUF_SIZE]; int nchars; nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR); if (nchars != PTR_WIDTH) return -1; if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) { pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"", PTR_VAL_NO_CRNG); return 0; } if (strncmp(buf, ZEROS, strlen(ZEROS)) != 0) return -1; return 0; } #else #define PTR_WIDTH 8 #define PTR ((void *)0x456789ab) #define PTR_STR "456789ab" #define PTR_VAL_NO_CRNG "(ptrval)" #define ZEROS "" #define ONES "" static int __init plain_format(void) { /* Format is implicitly tested for 32 bit machines by plain_hash() */ return 0; } #endif /* BITS_PER_LONG == 64 */ static int __init plain_hash_to_buffer(const void *p, char *buf, size_t len) { int nchars; nchars = snprintf(buf, len, "%p", p); if (nchars != PTR_WIDTH) return -1; if (strncmp(buf, PTR_VAL_NO_CRNG, PTR_WIDTH) == 0) { pr_warn("crng possibly not yet initialized. plain 'p' buffer contains \"%s\"", PTR_VAL_NO_CRNG); return 0; } return 0; } static int __init plain_hash(void) { char buf[PLAIN_BUF_SIZE]; int ret; ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE); if (ret) return ret; if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0) return -1; return 0; } /* * We can't use test() to test %p because we don't know what output to expect * after an address is hashed. */ static void __init plain(void) { int err; if (no_hash_pointers) { pr_warn("skipping plain 'p' tests"); skipped_tests += 2; return; } err = plain_hash(); if (err) { pr_warn("plain 'p' does not appear to be hashed\n"); failed_tests++; return; } err = plain_format(); if (err) { pr_warn("hashing plain 'p' has unexpected format\n"); failed_tests++; } } static void __init test_hashed(const char *fmt, const void *p) { char buf[PLAIN_BUF_SIZE]; int ret; /* * No need to increase failed test counter since this is assumed * to be called after plain(). */ ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE); if (ret) return; test(buf, fmt, p); } /* * NULL pointers aren't hashed. */ static void __init null_pointer(void) { test(ZEROS "00000000", "%p", NULL); test(ZEROS "00000000", "%px", NULL); test("(null)", "%pE", NULL); } /* * Error pointers aren't hashed. */ static void __init error_pointer(void) { test(ONES "fffffff5", "%p", ERR_PTR(-11)); test(ONES "fffffff5", "%px", ERR_PTR(-11)); test("(efault)", "%pE", ERR_PTR(-11)); } #define PTR_INVALID ((void *)0x000000ab) static void __init invalid_pointer(void) { test_hashed("%p", PTR_INVALID); test(ZEROS "000000ab", "%px", PTR_INVALID); test("(efault)", "%pE", PTR_INVALID); } static void __init symbol_ptr(void) { } static void __init kernel_ptr(void) { /* We can't test this without access to kptr_restrict. */ } static void __init struct_resource(void) { } static void __init addr(void) { } static void __init escaped_str(void) { } static void __init hex_string(void) { const char buf[3] = {0xc0, 0xff, 0xee}; test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee", "%3ph|%3phC|%3phD|%3phN", buf, buf, buf, buf); test("c0 ff ee|c0:ff:ee|c0-ff-ee|c0ffee", "%*ph|%*phC|%*phD|%*phN", 3, buf, 3, buf, 3, buf, 3, buf); } static void __init mac(void) { const u8 addr[6] = {0x2d, 0x48, 0xd6, 0xfc, 0x7a, 0x05}; test("2d:48:d6:fc:7a:05", "%pM", addr); test("05:7a:fc:d6:48:2d", "%pMR", addr); test("2d-48-d6-fc-7a-05", "%pMF", addr); test("2d48d6fc7a05", "%pm", addr); test("057afcd6482d", "%pmR", addr); } static void __init ip4(void) { struct sockaddr_in sa; sa.sin_family = AF_INET; sa.sin_port = cpu_to_be16(12345); sa.sin_addr.s_addr = cpu_to_be32(0x7f000001); test("127.000.000.001|127.0.0.1", "%pi4|%pI4", &sa.sin_addr, &sa.sin_addr); test("127.000.000.001|127.0.0.1", "%piS|%pIS", &sa, &sa); sa.sin_addr.s_addr = cpu_to_be32(0x01020304); test("001.002.003.004:12345|1.2.3.4:12345", "%piSp|%pISp", &sa, &sa); } static void __init ip6(void) { } static void __init ip(void) { ip4(); ip6(); } static void __init uuid(void) { const char uuid[16] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; test("00010203-0405-0607-0809-0a0b0c0d0e0f", "%pUb", uuid); test("00010203-0405-0607-0809-0A0B0C0D0E0F", "%pUB", uuid); test("03020100-0504-0706-0809-0a0b0c0d0e0f", "%pUl", uuid); test("03020100-0504-0706-0809-0A0B0C0D0E0F", "%pUL", uuid); } static struct dentry test_dentry[4] __initdata = { { .d_parent = &test_dentry[0], .d_name = QSTR_INIT(test_dentry[0].d_iname, 3), .d_iname = "foo" }, { .d_parent = &test_dentry[0], .d_name = QSTR_INIT(test_dentry[1].d_iname, 5), .d_iname = "bravo" }, { .d_parent = &test_dentry[1], .d_name = QSTR_INIT(test_dentry[2].d_iname, 4), .d_iname = "alfa" }, { .d_parent = &test_dentry[2], .d_name = QSTR_INIT(test_dentry[3].d_iname, 5), .d_iname = "romeo" }, }; static void __init dentry(void) { test("foo", "%pd", &test_dentry[0]); test("foo", "%pd2", &test_dentry[0]); test("(null)", "%pd", NULL); test("(efault)", "%pd", PTR_INVALID); test("(null)", "%pD", NULL); test("(efault)", "%pD", PTR_INVALID); test("romeo", "%pd", &test_dentry[3]); test("alfa/romeo", "%pd2", &test_dentry[3]); test("bravo/alfa/romeo", "%pd3", &test_dentry[3]); test("/bravo/alfa/romeo", "%pd4", &test_dentry[3]); test("/bravo/alfa", "%pd4", &test_dentry[2]); test("bravo/alfa |bravo/alfa ", "%-12pd2|%*pd2", &test_dentry[2], -12, &test_dentry[2]); test(" bravo/alfa| bravo/alfa", "%12pd2|%*pd2", &test_dentry[2], 12, &test_dentry[2]); } static void __init struct_va_format(void) { } static void __init time_and_date(void) { /* 1543210543 */ const struct rtc_time tm = { .tm_sec = 43, .tm_min = 35, .tm_hour = 5, .tm_mday = 26, .tm_mon = 10, .tm_year = 118, }; /* 2019-01-04T15:32:23 */ time64_t t = 1546615943; test("(%pt?)", "%pt", &tm); test("2018-11-26T05:35:43", "%ptR", &tm); test("0118-10-26T05:35:43", "%ptRr", &tm); test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm); test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm); test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm); test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm); test("2019-01-04T15:32:23", "%ptT", &t); test("0119-00-04T15:32:23", "%ptTr", &t); test("15:32:23|2019-01-04", "%ptTt|%ptTd", &t, &t); test("15:32:23|0119-00-04", "%ptTtr|%ptTdr", &t, &t); test("2019-01-04 15:32:23", "%ptTs", &t); test("0119-00-04 15:32:23", "%ptTsr", &t); test("15:32:23|2019-01-04", "%ptTts|%ptTds", &t, &t); test("15:32:23|0119-00-04", "%ptTtrs|%ptTdrs", &t, &t); } static void __init struct_clk(void) { } static void __init large_bitmap(void) { const int nbits = 1 << 16; unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL); if (!bits) return; bitmap_set(bits, 1, 20); bitmap_set(bits, 60000, 15); test("1-20,60000-60014", "%*pbl", nbits, bits); bitmap_free(bits); } static void __init bitmap(void) { DECLARE_BITMAP(bits, 20); const int primes[] = {2,3,5,7,11,13,17,19}; int i; bitmap_zero(bits, 20); test("00000|00000", "%20pb|%*pb", bits, 20, bits); test("|", "%20pbl|%*pbl", bits, 20, bits); for (i = 0; i < ARRAY_SIZE(primes); ++i) set_bit(primes[i], bits); test("a28ac|a28ac", "%20pb|%*pb", bits, 20, bits); test("2-3,5,7,11,13,17,19|2-3,5,7,11,13,17,19", "%20pbl|%*pbl", bits, 20, bits); bitmap_fill(bits, 20); test("fffff|fffff", "%20pb|%*pb", bits, 20, bits); test("0-19|0-19", "%20pbl|%*pbl", bits, 20, bits); large_bitmap(); } static void __init netdev_features(void) { } struct page_flags_test { int width; int shift; int mask; const char *fmt; const char *name; }; static const struct page_flags_test pft[] = { {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, "%d", "section"}, {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, "%d", "node"}, {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, "%d", "zone"}, {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, "%#x", "lastcpupid"}, {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, "%#x", "kasantag"}, }; static void __init page_flags_test(int section, int node, int zone, int last_cpupid, int kasan_tag, unsigned long flags, const char *name, char *cmp_buf) { unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag}; unsigned long size; bool append = false; int i; for (i = 0; i < ARRAY_SIZE(values); i++) flags |= (values[i] & pft[i].mask) << pft[i].shift; size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags); if (flags & PAGEFLAGS_MASK) { size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); append = true; } for (i = 0; i < ARRAY_SIZE(pft); i++) { if (!pft[i].width) continue; if (append) size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|"); size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, values[i] & pft[i].mask); append = true; } snprintf(cmp_buf + size, BUF_SIZE - size, ")"); test(cmp_buf, "%pGp", &flags); } static void __init page_type_test(unsigned int page_type, const char *name, char *cmp_buf) { unsigned long size; size = scnprintf(cmp_buf, BUF_SIZE, "%#x(", page_type); if (page_type_has_type(page_type)) size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); snprintf(cmp_buf + size, BUF_SIZE - size, ")"); test(cmp_buf, "%pGt", &page_type); } static void __init flags(void) { unsigned long flags; char *cmp_buffer; gfp_t gfp; unsigned int page_type; cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); if (!cmp_buffer) return; flags = 0; page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer); flags = 1UL << NR_PAGEFLAGS; page_flags_test(0, 0, 0, 0, 0, flags, "", cmp_buffer); flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru | 1UL << PG_active | 1UL << PG_swapbacked; page_flags_test(1, 1, 1, 0x1fffff, 1, flags, "uptodate|dirty|lru|active|swapbacked", cmp_buffer); flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; test("read|exec|mayread|maywrite|mayexec", "%pGv", &flags); gfp = GFP_TRANSHUGE; test("GFP_TRANSHUGE", "%pGg", &gfp); gfp = GFP_ATOMIC|__GFP_DMA; test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp); gfp = __GFP_HIGH; test("__GFP_HIGH", "%pGg", &gfp); /* Any flags not translated by the table should remain numeric */ gfp = ~__GFP_BITS_MASK; snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp); test(cmp_buffer, "%pGg", &gfp); snprintf(cmp_buffer, BUF_SIZE, "__GFP_HIGH|%#lx", (unsigned long) gfp); gfp |= __GFP_HIGH; test(cmp_buffer, "%pGg", &gfp); page_type = ~0; page_type_test(page_type, "", cmp_buffer); page_type = 10; page_type_test(page_type, "", cmp_buffer); page_type = ~PG_buddy; page_type_test(page_type, "buddy", cmp_buffer); page_type = ~(PG_table | PG_buddy); page_type_test(page_type, "table|buddy", cmp_buffer); kfree(cmp_buffer); } static void __init fwnode_pointer(void) { const struct software_node first = { .name = "first" }; const struct software_node second = { .name = "second", .parent = &first }; const struct software_node third = { .name = "third", .parent = &second }; const struct software_node *group[] = { &first, &second, &third, NULL }; const char * const full_name_second = "first/second"; const char * const full_name_third = "first/second/third"; const char * const second_name = "second"; const char * const third_name = "third"; int rval; rval = software_node_register_node_group(group); if (rval) { pr_warn("cannot register softnodes; rval %d\n", rval); return; } test(full_name_second, "%pfw", software_node_fwnode(&second)); test(full_name_third, "%pfw", software_node_fwnode(&third)); test(full_name_third, "%pfwf", software_node_fwnode(&third)); test(second_name, "%pfwP", software_node_fwnode(&second)); test(third_name, "%pfwP", software_node_fwnode(&third)); software_node_unregister_node_group(group); } static void __init fourcc_pointer(void) { struct { u32 code; char *str; } const try[] = { { 0x3231564e, "NV12 little-endian (0x3231564e)", }, { 0xb231564e, "NV12 big-endian (0xb231564e)", }, { 0x10111213, ".... little-endian (0x10111213)", }, { 0x20303159, "Y10 little-endian (0x20303159)", }, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(try); i++) test(try[i].str, "%p4cc", &try[i].code); } static void __init errptr(void) { test("-1234", "%pe", ERR_PTR(-1234)); /* Check that %pe with a non-ERR_PTR gets treated as ordinary %p. */ BUILD_BUG_ON(IS_ERR(PTR)); test_hashed("%pe", PTR); #ifdef CONFIG_SYMBOLIC_ERRNAME test("(-ENOTSOCK)", "(%pe)", ERR_PTR(-ENOTSOCK)); test("(-EAGAIN)", "(%pe)", ERR_PTR(-EAGAIN)); BUILD_BUG_ON(EAGAIN != EWOULDBLOCK); test("(-EAGAIN)", "(%pe)", ERR_PTR(-EWOULDBLOCK)); test("[-EIO ]", "[%-8pe]", ERR_PTR(-EIO)); test("[ -EIO]", "[%8pe]", ERR_PTR(-EIO)); test("-EPROBE_DEFER", "%pe", ERR_PTR(-EPROBE_DEFER)); #endif } static void __init test_pointer(void) { plain(); null_pointer(); error_pointer(); invalid_pointer(); symbol_ptr(); kernel_ptr(); struct_resource(); addr(); escaped_str(); hex_string(); mac(); ip(); uuid(); dentry(); struct_va_format(); time_and_date(); struct_clk(); bitmap(); netdev_features(); flags(); errptr(); fwnode_pointer(); fourcc_pointer(); } static void __init selftest(void) { alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL); if (!alloced_buffer) return; test_buffer = alloced_buffer + PAD_SIZE; test_basic(); test_number(); test_string(); test_pointer(); kfree(alloced_buffer); } KSTM_MODULE_LOADERS(test_printf); MODULE_AUTHOR("Rasmus Villemoes <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_printf.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007 Jens Axboe <[email protected]> * * Scatterlist handling helpers. */ #include <linux/export.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/highmem.h> #include <linux/kmemleak.h> #include <linux/bvec.h> #include <linux/uio.h> /** * sg_next - return the next scatterlist entry in a list * @sg: The current sg entry * * Description: * Usually the next entry will be @sg@ + 1, but if this sg element is part * of a chained scatterlist, it could jump to the start of a new * scatterlist array. * **/ struct scatterlist *sg_next(struct scatterlist *sg) { if (sg_is_last(sg)) return NULL; sg++; if (unlikely(sg_is_chain(sg))) sg = sg_chain_ptr(sg); return sg; } EXPORT_SYMBOL(sg_next); /** * sg_nents - return total count of entries in scatterlist * @sg: The scatterlist * * Description: * Allows to know how many entries are in sg, taking into account * chaining as well * **/ int sg_nents(struct scatterlist *sg) { int nents; for (nents = 0; sg; sg = sg_next(sg)) nents++; return nents; } EXPORT_SYMBOL(sg_nents); /** * sg_nents_for_len - return total count of entries in scatterlist * needed to satisfy the supplied length * @sg: The scatterlist * @len: The total required length * * Description: * Determines the number of entries in sg that are required to meet * the supplied length, taking into account chaining as well * * Returns: * the number of sg entries needed, negative error on failure * **/ int sg_nents_for_len(struct scatterlist *sg, u64 len) { int nents; u64 total; if (!len) return 0; for (nents = 0, total = 0; sg; sg = sg_next(sg)) { nents++; total += sg->length; if (total >= len) return nents; } return -EINVAL; } EXPORT_SYMBOL(sg_nents_for_len); /** * sg_last - return the last scatterlist entry in a list * @sgl: First entry in the scatterlist * @nents: Number of entries in the scatterlist * * Description: * Should only be used casually, it (currently) scans the entire list * to get the last entry. * * Note that the @sgl@ pointer passed in need not be the first one, * the important bit is that @nents@ denotes the number of entries that * exist from @sgl@. * **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; BUG_ON(!sg_is_last(ret)); return ret; } EXPORT_SYMBOL(sg_last); /** * sg_init_table - Initialize SG table * @sgl: The SG table * @nents: Number of entries in table * * Notes: * If this is part of a chained sg table, sg_mark_end() should be * used only on the last table part. * **/ void sg_init_table(struct scatterlist *sgl, unsigned int nents) { memset(sgl, 0, sizeof(*sgl) * nents); sg_init_marker(sgl, nents); } EXPORT_SYMBOL(sg_init_table); /** * sg_init_one - Initialize a single entry sg list * @sg: SG entry * @buf: Virtual address for IO * @buflen: IO length * **/ void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) { sg_init_table(sg, 1); sg_set_buf(sg, buf, buflen); } EXPORT_SYMBOL(sg_init_one); /* * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree * helpers. */ static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) { if (nents == SG_MAX_SINGLE_ALLOC) { /* * Kmemleak doesn't track page allocations as they are not * commonly used (in a raw form) for kernel data structures. * As we chain together a list of pages and then a normal * kmalloc (tracked by kmemleak), in order to for that last * allocation not to become decoupled (and thus a * false-positive) we need to inform kmemleak of all the * intermediate allocations. */ void *ptr = (void *) __get_free_page(gfp_mask); kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); return ptr; } else return kmalloc_array(nents, sizeof(struct scatterlist), gfp_mask); } static void sg_kfree(struct scatterlist *sg, unsigned int nents) { if (nents == SG_MAX_SINGLE_ALLOC) { kmemleak_free(sg); free_page((unsigned long) sg); } else kfree(sg); } /** * __sg_free_table - Free a previously mapped sg table * @table: The sg table header to use * @max_ents: The maximum number of entries per single scatterlist * @nents_first_chunk: Number of entries int the (preallocated) first * scatterlist chunk, 0 means no such preallocated first chunk * @free_fn: Free function * @num_ents: Number of entries in the table * * Description: * Free an sg table previously allocated and setup with * __sg_alloc_table(). The @max_ents value must be identical to * that previously used with __sg_alloc_table(). * **/ void __sg_free_table(struct sg_table *table, unsigned int max_ents, unsigned int nents_first_chunk, sg_free_fn *free_fn, unsigned int num_ents) { struct scatterlist *sgl, *next; unsigned curr_max_ents = nents_first_chunk ?: max_ents; if (unlikely(!table->sgl)) return; sgl = table->sgl; while (num_ents) { unsigned int alloc_size = num_ents; unsigned int sg_size; /* * If we have more than max_ents segments left, * then assign 'next' to the sg table after the current one. * sg_size is then one less than alloc size, since the last * element is the chain pointer. */ if (alloc_size > curr_max_ents) { next = sg_chain_ptr(&sgl[curr_max_ents - 1]); alloc_size = curr_max_ents; sg_size = alloc_size - 1; } else { sg_size = alloc_size; next = NULL; } num_ents -= sg_size; if (nents_first_chunk) nents_first_chunk = 0; else free_fn(sgl, alloc_size); sgl = next; curr_max_ents = max_ents; } table->sgl = NULL; } EXPORT_SYMBOL(__sg_free_table); /** * sg_free_append_table - Free a previously allocated append sg table. * @table: The mapped sg append table header * **/ void sg_free_append_table(struct sg_append_table *table) { __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->total_nents); } EXPORT_SYMBOL(sg_free_append_table); /** * sg_free_table - Free a previously allocated sg table * @table: The mapped sg table header * **/ void sg_free_table(struct sg_table *table) { __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->orig_nents); } EXPORT_SYMBOL(sg_free_table); /** * __sg_alloc_table - Allocate and initialize an sg table with given allocator * @table: The sg table header to use * @nents: Number of entries in sg list * @max_ents: The maximum number of entries the allocator returns per call * @first_chunk: first SGL if preallocated (may be %NULL) * @nents_first_chunk: Number of entries in the (preallocated) first * scatterlist chunk, 0 means no such preallocated chunk provided by user * @gfp_mask: GFP allocation mask * @alloc_fn: Allocator to use * * Description: * This function returns a @table @nents long. The allocator is * defined to return scatterlist chunks of maximum size @max_ents. * Thus if @nents is bigger than @max_ents, the scatterlists will be * chained in units of @max_ents. * * Notes: * If this function returns non-0 (eg failure), the caller must call * __sg_free_table() to cleanup any leftover allocations. * **/ int __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int max_ents, struct scatterlist *first_chunk, unsigned int nents_first_chunk, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) { struct scatterlist *sg, *prv; unsigned int left; unsigned curr_max_ents = nents_first_chunk ?: max_ents; unsigned prv_max_ents; memset(table, 0, sizeof(*table)); if (nents == 0) return -EINVAL; #ifdef CONFIG_ARCH_NO_SG_CHAIN if (WARN_ON_ONCE(nents > max_ents)) return -EINVAL; #endif left = nents; prv = NULL; do { unsigned int sg_size, alloc_size = left; if (alloc_size > curr_max_ents) { alloc_size = curr_max_ents; sg_size = alloc_size - 1; } else sg_size = alloc_size; left -= sg_size; if (first_chunk) { sg = first_chunk; first_chunk = NULL; } else { sg = alloc_fn(alloc_size, gfp_mask); } if (unlikely(!sg)) { /* * Adjust entry count to reflect that the last * entry of the previous table won't be used for * linkage. Without this, sg_kfree() may get * confused. */ if (prv) table->nents = ++table->orig_nents; return -ENOMEM; } sg_init_table(sg, alloc_size); table->nents = table->orig_nents += sg_size; /* * If this is the first mapping, assign the sg table header. * If this is not the first mapping, chain previous part. */ if (prv) sg_chain(prv, prv_max_ents, sg); else table->sgl = sg; /* * If no more entries after this one, mark the end */ if (!left) sg_mark_end(&sg[sg_size - 1]); prv = sg; prv_max_ents = curr_max_ents; curr_max_ents = max_ents; } while (left); return 0; } EXPORT_SYMBOL(__sg_alloc_table); /** * sg_alloc_table - Allocate and initialize an sg table * @table: The sg table header to use * @nents: Number of entries in sg list * @gfp_mask: GFP allocation mask * * Description: * Allocate and initialize an sg table. If @nents@ is larger than * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. * **/ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) { int ret; ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, NULL, 0, gfp_mask, sg_kmalloc); if (unlikely(ret)) sg_free_table(table); return ret; } EXPORT_SYMBOL(sg_alloc_table); static struct scatterlist *get_next_sg(struct sg_append_table *table, struct scatterlist *cur, unsigned long needed_sges, gfp_t gfp_mask) { struct scatterlist *new_sg, *next_sg; unsigned int alloc_size; if (cur) { next_sg = sg_next(cur); /* Check if last entry should be keeped for chainning */ if (!sg_is_last(next_sg) || needed_sges == 1) return next_sg; } alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC); new_sg = sg_kmalloc(alloc_size, gfp_mask); if (!new_sg) return ERR_PTR(-ENOMEM); sg_init_table(new_sg, alloc_size); if (cur) { table->total_nents += alloc_size - 1; __sg_chain(next_sg, new_sg); } else { table->sgt.sgl = new_sg; table->total_nents = alloc_size; } return new_sg; } static bool pages_are_mergeable(struct page *a, struct page *b) { if (page_to_pfn(a) != page_to_pfn(b) + 1) return false; if (!zone_device_pages_have_same_pgmap(a, b)) return false; return true; } /** * sg_alloc_append_table_from_pages - Allocate and initialize an append sg * table from an array of pages * @sgt_append: The sg append table to use * @pages: Pointer to an array of page pointers * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) * @max_segment: Maximum size of a scatterlist element in bytes * @left_pages: Left pages caller have to set after this call * @gfp_mask: GFP allocation mask * * Description: * In the first call it allocate and initialize an sg table from a list of * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of * the pages are squashed into a single scatterlist entry up to the maximum * size specified in @max_segment. A user may provide an offset at a start * and a size of valid data in a buffer specified by the page array. The * returned sg table is released by sg_free_append_table * * Returns: * 0 on success, negative error on failure * * Notes: * If this function returns non-0 (eg failure), the caller must call * sg_free_append_table() to cleanup any leftover allocations. * * In the fist call, sgt_append must by initialized. */ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, unsigned int left_pages, gfp_t gfp_mask) { unsigned int chunks, cur_page, seg_len, i, prv_len = 0; unsigned int added_nents = 0; struct scatterlist *s = sgt_append->prv; struct page *last_pg; /* * The algorithm below requires max_segment to be aligned to PAGE_SIZE * otherwise it can overshoot. */ max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE); if (WARN_ON(max_segment < PAGE_SIZE)) return -EINVAL; if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv) return -EOPNOTSUPP; if (sgt_append->prv) { unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) + sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE; if (WARN_ON(offset)) return -EINVAL; /* Merge contiguous pages into the last SG */ prv_len = sgt_append->prv->length; if (page_to_pfn(pages[0]) == next_pfn) { last_pg = pfn_to_page(next_pfn - 1); while (n_pages && pages_are_mergeable(pages[0], last_pg)) { if (sgt_append->prv->length + PAGE_SIZE > max_segment) break; sgt_append->prv->length += PAGE_SIZE; last_pg = pages[0]; pages++; n_pages--; } if (!n_pages) goto out; } } /* compute number of contiguous chunks */ chunks = 1; seg_len = 0; for (i = 1; i < n_pages; i++) { seg_len += PAGE_SIZE; if (seg_len >= max_segment || !pages_are_mergeable(pages[i], pages[i - 1])) { chunks++; seg_len = 0; } } /* merging chunks and putting them into the scatterlist */ cur_page = 0; for (i = 0; i < chunks; i++) { unsigned int j, chunk_size; /* look for the end of the current chunk */ seg_len = 0; for (j = cur_page + 1; j < n_pages; j++) { seg_len += PAGE_SIZE; if (seg_len >= max_segment || !pages_are_mergeable(pages[j], pages[j - 1])) break; } /* Pass how many chunks might be left */ s = get_next_sg(sgt_append, s, chunks - i + left_pages, gfp_mask); if (IS_ERR(s)) { /* * Adjust entry length to be as before function was * called. */ if (sgt_append->prv) sgt_append->prv->length = prv_len; return PTR_ERR(s); } chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; sg_set_page(s, pages[cur_page], min_t(unsigned long, size, chunk_size), offset); added_nents++; size -= chunk_size; offset = 0; cur_page = j; } sgt_append->sgt.nents += added_nents; sgt_append->sgt.orig_nents = sgt_append->sgt.nents; sgt_append->prv = s; out: if (!left_pages) sg_mark_end(s); return 0; } EXPORT_SYMBOL(sg_alloc_append_table_from_pages); /** * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from * an array of pages and given maximum * segment. * @sgt: The sg table header to use * @pages: Pointer to an array of page pointers * @n_pages: Number of pages in the pages array * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) * @max_segment: Maximum size of a scatterlist element in bytes * @gfp_mask: GFP allocation mask * * Description: * Allocate and initialize an sg table from a list of pages. Contiguous * ranges of the pages are squashed into a single scatterlist node up to the * maximum size specified in @max_segment. A user may provide an offset at a * start and a size of valid data in a buffer specified by the page array. * * The returned sg table is released by sg_free_table. * * Returns: * 0 on success, negative error on failure */ int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, gfp_t gfp_mask) { struct sg_append_table append = {}; int err; err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset, size, max_segment, 0, gfp_mask); if (err) { sg_free_append_table(&append); return err; } memcpy(sgt, &append.sgt, sizeof(*sgt)); WARN_ON(append.total_nents != sgt->orig_nents); return 0; } EXPORT_SYMBOL(sg_alloc_table_from_pages_segment); #ifdef CONFIG_SGL_ALLOC /** * sgl_alloc_order - allocate a scatterlist and its pages * @length: Length in bytes of the scatterlist. Must be at least one * @order: Second argument for alloc_pages() * @chainable: Whether or not to allocate an extra element in the scatterlist * for scatterlist chaining purposes * @gfp: Memory allocation flags * @nent_p: [out] Number of entries in the scatterlist that have pages * * Returns: A pointer to an initialized scatterlist or %NULL upon failure. */ struct scatterlist *sgl_alloc_order(unsigned long long length, unsigned int order, bool chainable, gfp_t gfp, unsigned int *nent_p) { struct scatterlist *sgl, *sg; struct page *page; unsigned int nent, nalloc; u32 elem_len; nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); /* Check for integer overflow */ if (length > (nent << (PAGE_SHIFT + order))) return NULL; nalloc = nent; if (chainable) { /* Check for integer overflow */ if (nalloc + 1 < nalloc) return NULL; nalloc++; } sgl = kmalloc_array(nalloc, sizeof(struct scatterlist), gfp & ~GFP_DMA); if (!sgl) return NULL; sg_init_table(sgl, nalloc); sg = sgl; while (length) { elem_len = min_t(u64, length, PAGE_SIZE << order); page = alloc_pages(gfp, order); if (!page) { sgl_free_order(sgl, order); return NULL; } sg_set_page(sg, page, elem_len, 0); length -= elem_len; sg = sg_next(sg); } WARN_ONCE(length, "length = %lld\n", length); if (nent_p) *nent_p = nent; return sgl; } EXPORT_SYMBOL(sgl_alloc_order); /** * sgl_alloc - allocate a scatterlist and its pages * @length: Length in bytes of the scatterlist * @gfp: Memory allocation flags * @nent_p: [out] Number of entries in the scatterlist * * Returns: A pointer to an initialized scatterlist or %NULL upon failure. */ struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, unsigned int *nent_p) { return sgl_alloc_order(length, 0, false, gfp, nent_p); } EXPORT_SYMBOL(sgl_alloc); /** * sgl_free_n_order - free a scatterlist and its pages * @sgl: Scatterlist with one or more elements * @nents: Maximum number of elements to free * @order: Second argument for __free_pages() * * Notes: * - If several scatterlists have been chained and each chain element is * freed separately then it's essential to set nents correctly to avoid that a * page would get freed twice. * - All pages in a chained scatterlist can be freed at once by setting @nents * to a high number. */ void sgl_free_n_order(struct scatterlist *sgl, int nents, int order) { struct scatterlist *sg; struct page *page; int i; for_each_sg(sgl, sg, nents, i) { if (!sg) break; page = sg_page(sg); if (page) __free_pages(page, order); } kfree(sgl); } EXPORT_SYMBOL(sgl_free_n_order); /** * sgl_free_order - free a scatterlist and its pages * @sgl: Scatterlist with one or more elements * @order: Second argument for __free_pages() */ void sgl_free_order(struct scatterlist *sgl, int order) { sgl_free_n_order(sgl, INT_MAX, order); } EXPORT_SYMBOL(sgl_free_order); /** * sgl_free - free a scatterlist and its pages * @sgl: Scatterlist with one or more elements */ void sgl_free(struct scatterlist *sgl) { sgl_free_order(sgl, 0); } EXPORT_SYMBOL(sgl_free); #endif /* CONFIG_SGL_ALLOC */ void __sg_page_iter_start(struct sg_page_iter *piter, struct scatterlist *sglist, unsigned int nents, unsigned long pgoffset) { piter->__pg_advance = 0; piter->__nents = nents; piter->sg = sglist; piter->sg_pgoffset = pgoffset; } EXPORT_SYMBOL(__sg_page_iter_start); static int sg_page_count(struct scatterlist *sg) { return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; } bool __sg_page_iter_next(struct sg_page_iter *piter) { if (!piter->__nents || !piter->sg) return false; piter->sg_pgoffset += piter->__pg_advance; piter->__pg_advance = 1; while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { piter->sg_pgoffset -= sg_page_count(piter->sg); piter->sg = sg_next(piter->sg); if (!--piter->__nents || !piter->sg) return false; } return true; } EXPORT_SYMBOL(__sg_page_iter_next); static int sg_dma_page_count(struct scatterlist *sg) { return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; } bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) { struct sg_page_iter *piter = &dma_iter->base; if (!piter->__nents || !piter->sg) return false; piter->sg_pgoffset += piter->__pg_advance; piter->__pg_advance = 1; while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { piter->sg_pgoffset -= sg_dma_page_count(piter->sg); piter->sg = sg_next(piter->sg); if (!--piter->__nents || !piter->sg) return false; } return true; } EXPORT_SYMBOL(__sg_page_iter_dma_next); /** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started * @sgl: sg list to iterate over * @nents: number of sg entries * @flags: sg iterator flags * * Description: * Starts mapping iterator @miter. * * Context: * Don't care. */ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, unsigned int nents, unsigned int flags) { memset(miter, 0, sizeof(struct sg_mapping_iter)); __sg_page_iter_start(&miter->piter, sgl, nents, 0); WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); miter->__flags = flags; } EXPORT_SYMBOL(sg_miter_start); static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) { if (!miter->__remaining) { struct scatterlist *sg; if (!__sg_page_iter_next(&miter->piter)) return false; sg = miter->piter.sg; miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset; miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT; miter->__offset &= PAGE_SIZE - 1; miter->__remaining = sg->offset + sg->length - (miter->piter.sg_pgoffset << PAGE_SHIFT) - miter->__offset; miter->__remaining = min_t(unsigned long, miter->__remaining, PAGE_SIZE - miter->__offset); } return true; } /** * sg_miter_skip - reposition mapping iterator * @miter: sg mapping iter to be skipped * @offset: number of bytes to plus the current location * * Description: * Sets the offset of @miter to its current location plus @offset bytes. * If mapping iterator @miter has been proceeded by sg_miter_next(), this * stops @miter. * * Context: * Don't care. * * Returns: * true if @miter contains the valid mapping. false if end of sg * list is reached. */ bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) { sg_miter_stop(miter); while (offset) { off_t consumed; if (!sg_miter_get_next_page(miter)) return false; consumed = min_t(off_t, offset, miter->__remaining); miter->__offset += consumed; miter->__remaining -= consumed; offset -= consumed; } return true; } EXPORT_SYMBOL(sg_miter_skip); /** * sg_miter_next - proceed mapping iterator to the next mapping * @miter: sg mapping iter to proceed * * Description: * Proceeds @miter to the next mapping. @miter should have been started * using sg_miter_start(). On successful return, @miter->page, * @miter->addr and @miter->length point to the current mapping. * * Context: * May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg * list is reached. */ bool sg_miter_next(struct sg_mapping_iter *miter) { sg_miter_stop(miter); /* * Get to the next page if necessary. * __remaining, __offset is adjusted by sg_miter_stop */ if (!sg_miter_get_next_page(miter)) return false; miter->page = sg_page_iter_page(&miter->piter); miter->consumed = miter->length = miter->__remaining; if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + miter->__offset; else miter->addr = kmap(miter->page) + miter->__offset; return true; } EXPORT_SYMBOL(sg_miter_next); /** * sg_miter_stop - stop mapping iteration * @miter: sg mapping iter to be stopped * * Description: * Stops mapping iterator @miter. @miter should have been started * using sg_miter_start(). A stopped iteration can be resumed by * calling sg_miter_next() on it. This is useful when resources (kmap) * need to be released during iteration. * * Context: * Don't care otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { WARN_ON(miter->consumed > miter->length); /* drop resources from the last iteration */ if (miter->addr) { miter->__offset += miter->consumed; miter->__remaining -= miter->consumed; if (miter->__flags & SG_MITER_TO_SG) flush_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); miter->page = NULL; miter->addr = NULL; miter->length = 0; miter->consumed = 0; } } EXPORT_SYMBOL(sg_miter_stop); /** * sg_copy_buffer - Copy data between a linear buffer and an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @skip: Number of bytes to skip before copying * @to_buffer: transfer direction (true == from an sg list to a * buffer, false == from a buffer to an sg list) * * Returns the number of copied bytes. * **/ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned int sg_flags = SG_MITER_ATOMIC; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; else sg_flags |= SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); if (!sg_miter_skip(&miter, skip)) return 0; while ((offset < buflen) && sg_miter_next(&miter)) { unsigned int len; len = min(miter.length, buflen - offset); if (to_buffer) memcpy(buf + offset, miter.addr, len); else memcpy(miter.addr, buf + offset, len); offset += len; } sg_miter_stop(&miter); return offset; } EXPORT_SYMBOL(sg_copy_buffer); /** * sg_copy_from_buffer - Copy from a linear buffer to an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * * Returns the number of copied bytes. * **/ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, const void *buf, size_t buflen) { return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); /** * sg_copy_to_buffer - Copy from an SG list to a linear buffer * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to * @buflen: The number of bytes to copy * * Returns the number of copied bytes. * **/ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen) { return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); } EXPORT_SYMBOL(sg_copy_to_buffer); /** * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from * @buflen: The number of bytes to copy * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, const void *buf, size_t buflen, off_t skip) { return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); } EXPORT_SYMBOL(sg_pcopy_from_buffer); /** * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to * @buflen: The number of bytes to copy * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip) { return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); } EXPORT_SYMBOL(sg_pcopy_to_buffer); /** * sg_zero_buffer - Zero-out a part of a SG list * @sgl: The SG list * @nents: Number of SG entries * @buflen: The number of bytes to zero out * @skip: Number of bytes to skip before zeroing * * Returns the number of bytes zeroed. **/ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, size_t buflen, off_t skip) { unsigned int offset = 0; struct sg_mapping_iter miter; unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); if (!sg_miter_skip(&miter, skip)) return false; while (offset < buflen && sg_miter_next(&miter)) { unsigned int len; len = min(miter.length, buflen - offset); memset(miter.addr, 0, len); offset += len; } sg_miter_stop(&miter); return offset; } EXPORT_SYMBOL(sg_zero_buffer); /* * Extract and pin a list of up to sg_max pages from UBUF- or IOVEC-class * iterators, and add them to the scatterlist. */ static ssize_t extract_user_to_sg(struct iov_iter *iter, ssize_t maxsize, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags) { struct scatterlist *sg = sgtable->sgl + sgtable->nents; struct page **pages; unsigned int npages; ssize_t ret = 0, res; size_t len, off; /* We decant the page list into the tail of the scatterlist */ pages = (void *)sgtable->sgl + array_size(sg_max, sizeof(struct scatterlist)); pages -= sg_max; do { res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, extraction_flags, &off); if (res < 0) goto failed; len = res; maxsize -= len; ret += len; npages = DIV_ROUND_UP(off + len, PAGE_SIZE); sg_max -= npages; for (; npages > 0; npages--) { struct page *page = *pages; size_t seg = min_t(size_t, PAGE_SIZE - off, len); *pages++ = NULL; sg_set_page(sg, page, seg, off); sgtable->nents++; sg++; len -= seg; off = 0; } } while (maxsize > 0 && sg_max > 0); return ret; failed: while (sgtable->nents > sgtable->orig_nents) unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents])); return res; } /* * Extract up to sg_max pages from a BVEC-type iterator and add them to the * scatterlist. The pages are not pinned. */ static ssize_t extract_bvec_to_sg(struct iov_iter *iter, ssize_t maxsize, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags) { const struct bio_vec *bv = iter->bvec; struct scatterlist *sg = sgtable->sgl + sgtable->nents; unsigned long start = iter->iov_offset; unsigned int i; ssize_t ret = 0; for (i = 0; i < iter->nr_segs; i++) { size_t off, len; len = bv[i].bv_len; if (start >= len) { start -= len; continue; } len = min_t(size_t, maxsize, len - start); off = bv[i].bv_offset + start; sg_set_page(sg, bv[i].bv_page, len, off); sgtable->nents++; sg++; sg_max--; ret += len; maxsize -= len; if (maxsize <= 0 || sg_max == 0) break; start = 0; } if (ret > 0) iov_iter_advance(iter, ret); return ret; } /* * Extract up to sg_max pages from a KVEC-type iterator and add them to the * scatterlist. This can deal with vmalloc'd buffers as well as kmalloc'd or * static buffers. The pages are not pinned. */ static ssize_t extract_kvec_to_sg(struct iov_iter *iter, ssize_t maxsize, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags) { const struct kvec *kv = iter->kvec; struct scatterlist *sg = sgtable->sgl + sgtable->nents; unsigned long start = iter->iov_offset; unsigned int i; ssize_t ret = 0; for (i = 0; i < iter->nr_segs; i++) { struct page *page; unsigned long kaddr; size_t off, len, seg; len = kv[i].iov_len; if (start >= len) { start -= len; continue; } kaddr = (unsigned long)kv[i].iov_base + start; off = kaddr & ~PAGE_MASK; len = min_t(size_t, maxsize, len - start); kaddr &= PAGE_MASK; maxsize -= len; ret += len; do { seg = min_t(size_t, len, PAGE_SIZE - off); if (is_vmalloc_or_module_addr((void *)kaddr)) page = vmalloc_to_page((void *)kaddr); else page = virt_to_page((void *)kaddr); sg_set_page(sg, page, len, off); sgtable->nents++; sg++; sg_max--; len -= seg; kaddr += PAGE_SIZE; off = 0; } while (len > 0 && sg_max > 0); if (maxsize <= 0 || sg_max == 0) break; start = 0; } if (ret > 0) iov_iter_advance(iter, ret); return ret; } /* * Extract up to sg_max folios from an XARRAY-type iterator and add them to * the scatterlist. The pages are not pinned. */ static ssize_t extract_xarray_to_sg(struct iov_iter *iter, ssize_t maxsize, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags) { struct scatterlist *sg = sgtable->sgl + sgtable->nents; struct xarray *xa = iter->xarray; struct folio *folio; loff_t start = iter->xarray_start + iter->iov_offset; pgoff_t index = start / PAGE_SIZE; ssize_t ret = 0; size_t offset, len; XA_STATE(xas, xa, index); rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { if (xas_retry(&xas, folio)) continue; if (WARN_ON(xa_is_value(folio))) break; if (WARN_ON(folio_test_hugetlb(folio))) break; offset = offset_in_folio(folio, start); len = min_t(size_t, maxsize, folio_size(folio) - offset); sg_set_page(sg, folio_page(folio, 0), len, offset); sgtable->nents++; sg++; sg_max--; maxsize -= len; ret += len; if (maxsize <= 0 || sg_max == 0) break; } rcu_read_unlock(); if (ret > 0) iov_iter_advance(iter, ret); return ret; } /** * extract_iter_to_sg - Extract pages from an iterator and add to an sglist * @iter: The iterator to extract from * @maxsize: The amount of iterator to copy * @sgtable: The scatterlist table to fill in * @sg_max: Maximum number of elements in @sgtable that may be filled * @extraction_flags: Flags to qualify the request * * Extract the page fragments from the given amount of the source iterator and * add them to a scatterlist that refers to all of those bits, to a maximum * addition of @sg_max elements. * * The pages referred to by UBUF- and IOVEC-type iterators are extracted and * pinned; BVEC-, KVEC- and XARRAY-type are extracted but aren't pinned; PIPE- * and DISCARD-type are not supported. * * No end mark is placed on the scatterlist; that's left to the caller. * * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA * be allowed on the pages extracted. * * If successful, @sgtable->nents is updated to include the number of elements * added and the number of bytes added is returned. @sgtable->orig_nents is * left unaltered. * * The iov_iter_extract_mode() function should be used to query how cleanup * should be performed. */ ssize_t extract_iter_to_sg(struct iov_iter *iter, size_t maxsize, struct sg_table *sgtable, unsigned int sg_max, iov_iter_extraction_t extraction_flags) { if (maxsize == 0) return 0; switch (iov_iter_type(iter)) { case ITER_UBUF: case ITER_IOVEC: return extract_user_to_sg(iter, maxsize, sgtable, sg_max, extraction_flags); case ITER_BVEC: return extract_bvec_to_sg(iter, maxsize, sgtable, sg_max, extraction_flags); case ITER_KVEC: return extract_kvec_to_sg(iter, maxsize, sgtable, sg_max, extraction_flags); case ITER_XARRAY: return extract_xarray_to_sg(iter, maxsize, sgtable, sg_max, extraction_flags); default: pr_err("%s(%u) unsupported\n", __func__, iov_iter_type(iter)); WARN_ON_ONCE(1); return -EIO; } } EXPORT_SYMBOL_GPL(extract_iter_to_sg);
linux-master
lib/scatterlist.c
// SPDX-License-Identifier: GPL-2.0 /* * kobject.c - library routines for handling generic kernel objects * * Copyright (c) 2002-2003 Patrick Mochel <[email protected]> * Copyright (c) 2006-2007 Greg Kroah-Hartman <[email protected]> * Copyright (c) 2006-2007 Novell Inc. * * Please see the file Documentation/core-api/kobject.rst for critical information * about using the kobject interface. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/random.h> /** * kobject_namespace() - Return @kobj's namespace tag. * @kobj: kobject in question * * Returns namespace tag of @kobj if its parent has namespace ops enabled * and thus @kobj should have a namespace tag associated with it. Returns * %NULL otherwise. */ const void *kobject_namespace(const struct kobject *kobj) { const struct kobj_ns_type_operations *ns_ops = kobj_ns_ops(kobj); if (!ns_ops || ns_ops->type == KOBJ_NS_TYPE_NONE) return NULL; return kobj->ktype->namespace(kobj); } /** * kobject_get_ownership() - Get sysfs ownership data for @kobj. * @kobj: kobject in question * @uid: kernel user ID for sysfs objects * @gid: kernel group ID for sysfs objects * * Returns initial uid/gid pair that should be used when creating sysfs * representation of given kobject. Normally used to adjust ownership of * objects in a container. */ void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { *uid = GLOBAL_ROOT_UID; *gid = GLOBAL_ROOT_GID; if (kobj->ktype->get_ownership) kobj->ktype->get_ownership(kobj, uid, gid); } static bool kobj_ns_type_is_valid(enum kobj_ns_type type) { if ((type <= KOBJ_NS_TYPE_NONE) || (type >= KOBJ_NS_TYPES)) return false; return true; } static int create_dir(struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); const struct kobj_ns_type_operations *ops; int error; error = sysfs_create_dir_ns(kobj, kobject_namespace(kobj)); if (error) return error; error = sysfs_create_groups(kobj, ktype->default_groups); if (error) { sysfs_remove_dir(kobj); return error; } /* * @kobj->sd may be deleted by an ancestor going away. Hold an * extra reference so that it stays until @kobj is gone. */ sysfs_get(kobj->sd); /* * If @kobj has ns_ops, its children need to be filtered based on * their namespace tags. Enable namespace support on @kobj->sd. */ ops = kobj_child_ns_ops(kobj); if (ops) { BUG_ON(!kobj_ns_type_is_valid(ops->type)); BUG_ON(!kobj_ns_type_registered(ops->type)); sysfs_enable_ns(kobj->sd); } return 0; } static int get_kobj_path_length(const struct kobject *kobj) { int length = 1; const struct kobject *parent = kobj; /* walk up the ancestors until we hit the one pointing to the * root. * Add 1 to strlen for leading '/' of each level. */ do { if (kobject_name(parent) == NULL) return 0; length += strlen(kobject_name(parent)) + 1; parent = parent->parent; } while (parent); return length; } static int fill_kobj_path(const struct kobject *kobj, char *path, int length) { const struct kobject *parent; --length; for (parent = kobj; parent; parent = parent->parent) { int cur = strlen(kobject_name(parent)); /* back up enough to print this name with '/' */ length -= cur; if (length <= 0) return -EINVAL; memcpy(path + length, kobject_name(parent), cur); *(path + --length) = '/'; } pr_debug("'%s' (%p): %s: path = '%s'\n", kobject_name(kobj), kobj, __func__, path); return 0; } /** * kobject_get_path() - Allocate memory and fill in the path for @kobj. * @kobj: kobject in question, with which to build the path * @gfp_mask: the allocation type used to allocate the path * * Return: The newly allocated memory, caller must free with kfree(). */ char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask) { char *path; int len; retry: len = get_kobj_path_length(kobj); if (len == 0) return NULL; path = kzalloc(len, gfp_mask); if (!path) return NULL; if (fill_kobj_path(kobj, path, len)) { kfree(path); goto retry; } return path; } EXPORT_SYMBOL_GPL(kobject_get_path); /* add the kobject to its kset's list */ static void kobj_kset_join(struct kobject *kobj) { if (!kobj->kset) return; kset_get(kobj->kset); spin_lock(&kobj->kset->list_lock); list_add_tail(&kobj->entry, &kobj->kset->list); spin_unlock(&kobj->kset->list_lock); } /* remove the kobject from its kset's list */ static void kobj_kset_leave(struct kobject *kobj) { if (!kobj->kset) return; spin_lock(&kobj->kset->list_lock); list_del_init(&kobj->entry); spin_unlock(&kobj->kset->list_lock); kset_put(kobj->kset); } static void kobject_init_internal(struct kobject *kobj) { if (!kobj) return; kref_init(&kobj->kref); INIT_LIST_HEAD(&kobj->entry); kobj->state_in_sysfs = 0; kobj->state_add_uevent_sent = 0; kobj->state_remove_uevent_sent = 0; kobj->state_initialized = 1; } static int kobject_add_internal(struct kobject *kobj) { int error = 0; struct kobject *parent; if (!kobj) return -ENOENT; if (!kobj->name || !kobj->name[0]) { WARN(1, "kobject: (%p): attempted to be registered with empty name!\n", kobj); return -EINVAL; } parent = kobject_get(kobj->parent); /* join kset if set, use it as parent if we do not already have one */ if (kobj->kset) { if (!parent) parent = kobject_get(&kobj->kset->kobj); kobj_kset_join(kobj); kobj->parent = parent; } pr_debug("'%s' (%p): %s: parent: '%s', set: '%s'\n", kobject_name(kobj), kobj, __func__, parent ? kobject_name(parent) : "<NULL>", kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>"); error = create_dir(kobj); if (error) { kobj_kset_leave(kobj); kobject_put(parent); kobj->parent = NULL; /* be noisy on error issues */ if (error == -EEXIST) pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", __func__, kobject_name(kobj)); else pr_err("%s failed for %s (error: %d parent: %s)\n", __func__, kobject_name(kobj), error, parent ? kobject_name(parent) : "'none'"); } else kobj->state_in_sysfs = 1; return error; } /** * kobject_set_name_vargs() - Set the name of a kobject. * @kobj: struct kobject to set the name of * @fmt: format string used to build the name * @vargs: vargs to format the string. */ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { const char *s; if (kobj->name && !fmt) return 0; s = kvasprintf_const(GFP_KERNEL, fmt, vargs); if (!s) return -ENOMEM; /* * ewww... some of these buggers have '/' in the name ... If * that's the case, we need to make sure we have an actual * allocated copy to modify, since kvasprintf_const may have * returned something from .rodata. */ if (strchr(s, '/')) { char *t; t = kstrdup(s, GFP_KERNEL); kfree_const(s); if (!t) return -ENOMEM; s = strreplace(t, '/', '!'); } kfree_const(kobj->name); kobj->name = s; return 0; } /** * kobject_set_name() - Set the name of a kobject. * @kobj: struct kobject to set the name of * @fmt: format string used to build the name * * This sets the name of the kobject. If you have already added the * kobject to the system, you must call kobject_rename() in order to * change the name of the kobject. */ int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list vargs; int retval; va_start(vargs, fmt); retval = kobject_set_name_vargs(kobj, fmt, vargs); va_end(vargs); return retval; } EXPORT_SYMBOL(kobject_set_name); /** * kobject_init() - Initialize a kobject structure. * @kobj: pointer to the kobject to initialize * @ktype: pointer to the ktype for this kobject. * * This function will properly initialize a kobject such that it can then * be passed to the kobject_add() call. * * After this function is called, the kobject MUST be cleaned up by a call * to kobject_put(), not by a call to kfree directly to ensure that all of * the memory is cleaned up properly. */ void kobject_init(struct kobject *kobj, const struct kobj_type *ktype) { char *err_str; if (!kobj) { err_str = "invalid kobject pointer!"; goto error; } if (!ktype) { err_str = "must have a ktype to be initialized properly!\n"; goto error; } if (kobj->state_initialized) { /* do not error out as sometimes we can recover */ pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n", kobj); dump_stack_lvl(KERN_ERR); } kobject_init_internal(kobj); kobj->ktype = ktype; return; error: pr_err("kobject (%p): %s\n", kobj, err_str); dump_stack_lvl(KERN_ERR); } EXPORT_SYMBOL(kobject_init); static __printf(3, 0) int kobject_add_varg(struct kobject *kobj, struct kobject *parent, const char *fmt, va_list vargs) { int retval; retval = kobject_set_name_vargs(kobj, fmt, vargs); if (retval) { pr_err("can not set name properly!\n"); return retval; } kobj->parent = parent; return kobject_add_internal(kobj); } /** * kobject_add() - The main kobject add function. * @kobj: the kobject to add * @parent: pointer to the parent of the kobject. * @fmt: format to name the kobject with. * * The kobject name is set and added to the kobject hierarchy in this * function. * * If @parent is set, then the parent of the @kobj will be set to it. * If @parent is NULL, then the parent of the @kobj will be set to the * kobject associated with the kset assigned to this kobject. If no kset * is assigned to the kobject, then the kobject will be located in the * root of the sysfs tree. * * Note, no "add" uevent will be created with this call, the caller should set * up all of the necessary sysfs files for the object and then call * kobject_uevent() with the UEVENT_ADD parameter to ensure that * userspace is properly notified of this kobject's creation. * * Return: If this function returns an error, kobject_put() must be * called to properly clean up the memory associated with the * object. Under no instance should the kobject that is passed * to this function be directly freed with a call to kfree(), * that can leak memory. * * If this function returns success, kobject_put() must also be called * in order to properly clean up the memory associated with the object. * * In short, once this function is called, kobject_put() MUST be called * when the use of the object is finished in order to properly free * everything. */ int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int retval; if (!kobj) return -EINVAL; if (!kobj->state_initialized) { pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n", kobject_name(kobj), kobj); dump_stack_lvl(KERN_ERR); return -EINVAL; } va_start(args, fmt); retval = kobject_add_varg(kobj, parent, fmt, args); va_end(args); return retval; } EXPORT_SYMBOL(kobject_add); /** * kobject_init_and_add() - Initialize a kobject structure and add it to * the kobject hierarchy. * @kobj: pointer to the kobject to initialize * @ktype: pointer to the ktype for this kobject. * @parent: pointer to the parent of this kobject. * @fmt: the name of the kobject. * * This function combines the call to kobject_init() and kobject_add(). * * If this function returns an error, kobject_put() must be called to * properly clean up the memory associated with the object. This is the * same type of error handling after a call to kobject_add() and kobject * lifetime rules are the same here. */ int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int retval; kobject_init(kobj, ktype); va_start(args, fmt); retval = kobject_add_varg(kobj, parent, fmt, args); va_end(args); return retval; } EXPORT_SYMBOL_GPL(kobject_init_and_add); /** * kobject_rename() - Change the name of an object. * @kobj: object in question. * @new_name: object's new name * * It is the responsibility of the caller to provide mutual * exclusion between two different calls of kobject_rename * on the same kobject and to ensure that new_name is valid and * won't conflict with other kobjects. */ int kobject_rename(struct kobject *kobj, const char *new_name) { int error = 0; const char *devpath = NULL; const char *dup_name = NULL, *name; char *devpath_string = NULL; char *envp[2]; kobj = kobject_get(kobj); if (!kobj) return -EINVAL; if (!kobj->parent) { kobject_put(kobj); return -EINVAL; } devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { error = -ENOMEM; goto out; } devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); if (!devpath_string) { error = -ENOMEM; goto out; } sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); envp[0] = devpath_string; envp[1] = NULL; name = dup_name = kstrdup_const(new_name, GFP_KERNEL); if (!name) { error = -ENOMEM; goto out; } error = sysfs_rename_dir_ns(kobj, new_name, kobject_namespace(kobj)); if (error) goto out; /* Install the new kobject name */ dup_name = kobj->name; kobj->name = name; /* This function is mostly/only used for network interface. * Some hotplug package track interfaces by their name and * therefore want to know when the name is changed by the user. */ kobject_uevent_env(kobj, KOBJ_MOVE, envp); out: kfree_const(dup_name); kfree(devpath_string); kfree(devpath); kobject_put(kobj); return error; } EXPORT_SYMBOL_GPL(kobject_rename); /** * kobject_move() - Move object to another parent. * @kobj: object in question. * @new_parent: object's new parent (can be NULL) */ int kobject_move(struct kobject *kobj, struct kobject *new_parent) { int error; struct kobject *old_parent; const char *devpath = NULL; char *devpath_string = NULL; char *envp[2]; kobj = kobject_get(kobj); if (!kobj) return -EINVAL; new_parent = kobject_get(new_parent); if (!new_parent) { if (kobj->kset) new_parent = kobject_get(&kobj->kset->kobj); } /* old object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { error = -ENOMEM; goto out; } devpath_string = kmalloc(strlen(devpath) + 15, GFP_KERNEL); if (!devpath_string) { error = -ENOMEM; goto out; } sprintf(devpath_string, "DEVPATH_OLD=%s", devpath); envp[0] = devpath_string; envp[1] = NULL; error = sysfs_move_dir_ns(kobj, new_parent, kobject_namespace(kobj)); if (error) goto out; old_parent = kobj->parent; kobj->parent = new_parent; new_parent = NULL; kobject_put(old_parent); kobject_uevent_env(kobj, KOBJ_MOVE, envp); out: kobject_put(new_parent); kobject_put(kobj); kfree(devpath_string); kfree(devpath); return error; } EXPORT_SYMBOL_GPL(kobject_move); static void __kobject_del(struct kobject *kobj) { struct kernfs_node *sd; const struct kobj_type *ktype; sd = kobj->sd; ktype = get_ktype(kobj); sysfs_remove_groups(kobj, ktype->default_groups); /* send "remove" if the caller did not do it but sent "add" */ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) { pr_debug("'%s' (%p): auto cleanup 'remove' event\n", kobject_name(kobj), kobj); kobject_uevent(kobj, KOBJ_REMOVE); } sysfs_remove_dir(kobj); sysfs_put(sd); kobj->state_in_sysfs = 0; kobj_kset_leave(kobj); kobj->parent = NULL; } /** * kobject_del() - Unlink kobject from hierarchy. * @kobj: object. * * This is the function that should be called to delete an object * successfully added via kobject_add(). */ void kobject_del(struct kobject *kobj) { struct kobject *parent; if (!kobj) return; parent = kobj->parent; __kobject_del(kobj); kobject_put(parent); } EXPORT_SYMBOL(kobject_del); /** * kobject_get() - Increment refcount for object. * @kobj: object. */ struct kobject *kobject_get(struct kobject *kobj) { if (kobj) { if (!kobj->state_initialized) WARN(1, KERN_WARNING "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n", kobject_name(kobj), kobj); kref_get(&kobj->kref); } return kobj; } EXPORT_SYMBOL(kobject_get); struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) { if (!kobj) return NULL; if (!kref_get_unless_zero(&kobj->kref)) kobj = NULL; return kobj; } EXPORT_SYMBOL(kobject_get_unless_zero); /* * kobject_cleanup - free kobject resources. * @kobj: object to cleanup */ static void kobject_cleanup(struct kobject *kobj) { struct kobject *parent = kobj->parent; const struct kobj_type *t = get_ktype(kobj); const char *name = kobj->name; pr_debug("'%s' (%p): %s, parent %p\n", kobject_name(kobj), kobj, __func__, kobj->parent); /* remove from sysfs if the caller did not do it */ if (kobj->state_in_sysfs) { pr_debug("'%s' (%p): auto cleanup kobject_del\n", kobject_name(kobj), kobj); __kobject_del(kobj); } else { /* avoid dropping the parent reference unnecessarily */ parent = NULL; } if (t->release) { pr_debug("'%s' (%p): calling ktype release\n", kobject_name(kobj), kobj); t->release(kobj); } else { pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", kobject_name(kobj), kobj); } /* free name if we allocated it */ if (name) { pr_debug("'%s': free name\n", name); kfree_const(name); } kobject_put(parent); } #ifdef CONFIG_DEBUG_KOBJECT_RELEASE static void kobject_delayed_cleanup(struct work_struct *work) { kobject_cleanup(container_of(to_delayed_work(work), struct kobject, release)); } #endif static void kobject_release(struct kref *kref) { struct kobject *kobj = container_of(kref, struct kobject, kref); #ifdef CONFIG_DEBUG_KOBJECT_RELEASE unsigned long delay = HZ + HZ * get_random_u32_below(4); pr_info("'%s' (%p): %s, parent %p (delayed %ld)\n", kobject_name(kobj), kobj, __func__, kobj->parent, delay); INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); schedule_delayed_work(&kobj->release, delay); #else kobject_cleanup(kobj); #endif } /** * kobject_put() - Decrement refcount for object. * @kobj: object. * * Decrement the refcount, and if 0, call kobject_cleanup(). */ void kobject_put(struct kobject *kobj) { if (kobj) { if (!kobj->state_initialized) WARN(1, KERN_WARNING "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n", kobject_name(kobj), kobj); kref_put(&kobj->kref, kobject_release); } } EXPORT_SYMBOL(kobject_put); static void dynamic_kobj_release(struct kobject *kobj) { pr_debug("(%p): %s\n", kobj, __func__); kfree(kobj); } static const struct kobj_type dynamic_kobj_ktype = { .release = dynamic_kobj_release, .sysfs_ops = &kobj_sysfs_ops, }; /** * kobject_create() - Create a struct kobject dynamically. * * This function creates a kobject structure dynamically and sets it up * to be a "dynamic" kobject with a default release function set up. * * If the kobject was not able to be created, NULL will be returned. * The kobject structure returned from here must be cleaned up with a * call to kobject_put() and not kfree(), as kobject_init() has * already been called on this structure. */ static struct kobject *kobject_create(void) { struct kobject *kobj; kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); if (!kobj) return NULL; kobject_init(kobj, &dynamic_kobj_ktype); return kobj; } /** * kobject_create_and_add() - Create a struct kobject dynamically and * register it with sysfs. * @name: the name for the kobject * @parent: the parent kobject of this kobject, if any. * * This function creates a kobject structure dynamically and registers it * with sysfs. When you are finished with this structure, call * kobject_put() and the structure will be dynamically freed when * it is no longer being used. * * If the kobject was not able to be created, NULL will be returned. */ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent) { struct kobject *kobj; int retval; kobj = kobject_create(); if (!kobj) return NULL; retval = kobject_add(kobj, parent, "%s", name); if (retval) { pr_warn("%s: kobject_add error: %d\n", __func__, retval); kobject_put(kobj); kobj = NULL; } return kobj; } EXPORT_SYMBOL_GPL(kobject_create_and_add); /** * kset_init() - Initialize a kset for use. * @k: kset */ void kset_init(struct kset *k) { kobject_init_internal(&k->kobj); INIT_LIST_HEAD(&k->list); spin_lock_init(&k->list_lock); } /* default kobject attribute operations */ static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kobj_attribute *kattr; ssize_t ret = -EIO; kattr = container_of(attr, struct kobj_attribute, attr); if (kattr->show) ret = kattr->show(kobj, kattr, buf); return ret; } static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct kobj_attribute *kattr; ssize_t ret = -EIO; kattr = container_of(attr, struct kobj_attribute, attr); if (kattr->store) ret = kattr->store(kobj, kattr, buf, count); return ret; } const struct sysfs_ops kobj_sysfs_ops = { .show = kobj_attr_show, .store = kobj_attr_store, }; EXPORT_SYMBOL_GPL(kobj_sysfs_ops); /** * kset_register() - Initialize and add a kset. * @k: kset. * * NOTE: On error, the kset.kobj.name allocated by() kobj_set_name() * is freed, it can not be used any more. */ int kset_register(struct kset *k) { int err; if (!k) return -EINVAL; if (!k->kobj.ktype) { pr_err("must have a ktype to be initialized properly!\n"); return -EINVAL; } kset_init(k); err = kobject_add_internal(&k->kobj); if (err) { kfree_const(k->kobj.name); /* Set it to NULL to avoid accessing bad pointer in callers. */ k->kobj.name = NULL; return err; } kobject_uevent(&k->kobj, KOBJ_ADD); return 0; } EXPORT_SYMBOL(kset_register); /** * kset_unregister() - Remove a kset. * @k: kset. */ void kset_unregister(struct kset *k) { if (!k) return; kobject_del(&k->kobj); kobject_put(&k->kobj); } EXPORT_SYMBOL(kset_unregister); /** * kset_find_obj() - Search for object in kset. * @kset: kset we're looking in. * @name: object's name. * * Lock kset via @kset->subsys, and iterate over @kset->list, * looking for a matching kobject. If matching object is found * take a reference and return the object. */ struct kobject *kset_find_obj(struct kset *kset, const char *name) { struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get_unless_zero(k); break; } } spin_unlock(&kset->list_lock); return ret; } EXPORT_SYMBOL_GPL(kset_find_obj); static void kset_release(struct kobject *kobj) { struct kset *kset = container_of(kobj, struct kset, kobj); pr_debug("'%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); kfree(kset); } static void kset_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { if (kobj->parent) kobject_get_ownership(kobj->parent, uid, gid); } static const struct kobj_type kset_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = kset_release, .get_ownership = kset_get_ownership, }; /** * kset_create() - Create a struct kset dynamically. * * @name: the name for the kset * @uevent_ops: a struct kset_uevent_ops for the kset * @parent_kobj: the parent kobject of this kset, if any. * * This function creates a kset structure dynamically. This structure can * then be registered with the system and show up in sysfs with a call to * kset_register(). When you are finished with this structure, if * kset_register() has been called, call kset_unregister() and the * structure will be dynamically freed when it is no longer being used. * * If the kset was not able to be created, NULL will be returned. */ static struct kset *kset_create(const char *name, const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; int retval; kset = kzalloc(sizeof(*kset), GFP_KERNEL); if (!kset) return NULL; retval = kobject_set_name(&kset->kobj, "%s", name); if (retval) { kfree(kset); return NULL; } kset->uevent_ops = uevent_ops; kset->kobj.parent = parent_kobj; /* * The kobject of this kset will have a type of kset_ktype and belong to * no kset itself. That way we can properly free it when it is * finished being used. */ kset->kobj.ktype = &kset_ktype; kset->kobj.kset = NULL; return kset; } /** * kset_create_and_add() - Create a struct kset dynamically and add it to sysfs. * * @name: the name for the kset * @uevent_ops: a struct kset_uevent_ops for the kset * @parent_kobj: the parent kobject of this kset, if any. * * This function creates a kset structure dynamically and registers it * with sysfs. When you are finished with this structure, call * kset_unregister() and the structure will be dynamically freed when it * is no longer being used. * * If the kset was not able to be created, NULL will be returned. */ struct kset *kset_create_and_add(const char *name, const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; int error; kset = kset_create(name, uevent_ops, parent_kobj); if (!kset) return NULL; error = kset_register(kset); if (error) { kfree(kset); return NULL; } return kset; } EXPORT_SYMBOL_GPL(kset_create_and_add); static DEFINE_SPINLOCK(kobj_ns_type_lock); static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES]; int kobj_ns_type_register(const struct kobj_ns_type_operations *ops) { enum kobj_ns_type type = ops->type; int error; spin_lock(&kobj_ns_type_lock); error = -EINVAL; if (!kobj_ns_type_is_valid(type)) goto out; error = -EBUSY; if (kobj_ns_ops_tbl[type]) goto out; error = 0; kobj_ns_ops_tbl[type] = ops; out: spin_unlock(&kobj_ns_type_lock); return error; } int kobj_ns_type_registered(enum kobj_ns_type type) { int registered = 0; spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type)) registered = kobj_ns_ops_tbl[type] != NULL; spin_unlock(&kobj_ns_type_lock); return registered; } const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent) { const struct kobj_ns_type_operations *ops = NULL; if (parent && parent->ktype->child_ns_type) ops = parent->ktype->child_ns_type(parent); return ops; } const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj) { return kobj_child_ns_ops(kobj->parent); } bool kobj_ns_current_may_mount(enum kobj_ns_type type) { bool may_mount = true; spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) may_mount = kobj_ns_ops_tbl[type]->current_may_mount(); spin_unlock(&kobj_ns_type_lock); return may_mount; } void *kobj_ns_grab_current(enum kobj_ns_type type) { void *ns = NULL; spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->grab_current_ns(); spin_unlock(&kobj_ns_type_lock); return ns; } EXPORT_SYMBOL_GPL(kobj_ns_grab_current); const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) { const void *ns = NULL; spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->netlink_ns(sk); spin_unlock(&kobj_ns_type_lock); return ns; } const void *kobj_ns_initial(enum kobj_ns_type type) { const void *ns = NULL; spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type]) ns = kobj_ns_ops_tbl[type]->initial_ns(); spin_unlock(&kobj_ns_type_lock); return ns; } void kobj_ns_drop(enum kobj_ns_type type, void *ns) { spin_lock(&kobj_ns_type_lock); if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns) kobj_ns_ops_tbl[type]->drop_ns(ns); spin_unlock(&kobj_ns_type_lock); } EXPORT_SYMBOL_GPL(kobj_ns_drop);
linux-master
lib/kobject.c
/* * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd * * Author: Lasse Collin <[email protected]> * * This file has been put into the public domain. * You can do whatever you want with this file. */ /* * Important notes about in-place decompression * * At least on x86, the kernel is decompressed in place: the compressed data * is placed to the end of the output buffer, and the decompressor overwrites * most of the compressed data. There must be enough safety margin to * guarantee that the write position is always behind the read position. * * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below. * Note that the margin with XZ is bigger than with Deflate (gzip)! * * The worst case for in-place decompression is that the beginning of * the file is compressed extremely well, and the rest of the file is * incompressible. Thus, we must look for worst-case expansion when the * compressor is encoding incompressible data. * * The structure of the .xz file in case of a compressed kernel is as follows. * Sizes (as bytes) of the fields are in parenthesis. * * Stream Header (12) * Block Header: * Block Header (8-12) * Compressed Data (N) * Block Padding (0-3) * CRC32 (4) * Index (8-20) * Stream Footer (12) * * Normally there is exactly one Block, but let's assume that there are * 2-4 Blocks just in case. Because Stream Header and also Block Header * of the first Block don't make the decompressor produce any uncompressed * data, we can ignore them from our calculations. Block Headers of possible * additional Blocks have to be taken into account still. With these * assumptions, it is safe to assume that the total header overhead is * less than 128 bytes. * * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ * doesn't change the size of the data, it is enough to calculate the * safety margin for LZMA2. * * LZMA2 stores the data in chunks. Each chunk has a header whose size is * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that * the maximum chunk header size is 8 bytes. After the chunk header, there * may be up to 64 KiB of actual payload in the chunk. Often the payload is * quite a bit smaller though; to be safe, let's assume that an average * chunk has only 32 KiB of payload. * * The maximum uncompressed size of the payload is 2 MiB. The minimum * uncompressed size of the payload is in practice never less than the * payload size itself. The LZMA2 format would allow uncompressed size * to be less than the payload size, but no sane compressor creates such * files. LZMA2 supports storing incompressible data in uncompressed form, * so there's never a need to create payloads whose uncompressed size is * smaller than the compressed size. * * The assumption, that the uncompressed size of the payload is never * smaller than the payload itself, is valid only when talking about * the payload as a whole. It is possible that the payload has parts where * the decompressor consumes more input than it produces output. Calculating * the worst case for this would be tricky. Instead of trying to do that, * let's simply make sure that the decompressor never overwrites any bytes * of the payload which it is currently reading. * * Now we have enough information to calculate the safety margin. We need * - 128 bytes for the .xz file format headers; * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header * per chunk, each chunk having average payload size of 32 KiB); and * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that * the decompressor never overwrites anything from the LZMA2 chunk * payload it is currently reading. * * We get the following formula: * * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536 * = 128 + (uncompressed_size >> 12) + 65536 * * For comparison, according to arch/x86/boot/compressed/misc.c, the * equivalent formula for Deflate is this: * * safety_margin = 18 + (uncompressed_size >> 12) + 32768 * * Thus, when updating Deflate-only in-place kernel decompressor to * support XZ, the fixed overhead has to be increased from 18+32768 bytes * to 128+65536 bytes. */ /* * STATIC is defined to "static" if we are being built for kernel * decompression (pre-boot code). <linux/decompress/mm.h> will define * STATIC to empty if it wasn't already defined. Since we will need to * know later if we are being used for kernel decompression, we define * XZ_PREBOOT here. */ #ifdef STATIC # define XZ_PREBOOT #else #include <linux/decompress/unxz.h> #endif #ifdef __KERNEL__ # include <linux/decompress/mm.h> #endif #define XZ_EXTERN STATIC #ifndef XZ_PREBOOT # include <linux/slab.h> # include <linux/xz.h> #else /* * Use the internal CRC32 code instead of kernel's CRC32 module, which * is not available in early phase of booting. */ #define XZ_INTERNAL_CRC32 1 /* * For boot time use, we enable only the BCJ filter of the current * architecture or none if no BCJ filter is available for the architecture. */ #ifdef CONFIG_X86 # define XZ_DEC_X86 #endif #ifdef CONFIG_PPC # define XZ_DEC_POWERPC #endif #ifdef CONFIG_ARM # define XZ_DEC_ARM #endif #ifdef CONFIG_IA64 # define XZ_DEC_IA64 #endif #ifdef CONFIG_SPARC # define XZ_DEC_SPARC #endif /* * This will get the basic headers so that memeq() and others * can be defined. */ #include "xz/xz_private.h" /* * Replace the normal allocation functions with the versions from * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL) * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it. * Workaround it here because the other decompressors don't need it. */ #undef kmalloc #undef kfree #undef vmalloc #undef vfree #define kmalloc(size, flags) malloc(size) #define kfree(ptr) free(ptr) #define vmalloc(size) malloc(size) #define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0) /* * FIXME: Not all basic memory functions are provided in architecture-specific * files (yet). We define our own versions here for now, but this should be * only a temporary solution. * * memeq and memzero are not used much and any remotely sane implementation * is fast enough. memcpy/memmove speed matters in multi-call mode, but * the kernel image is decompressed in single-call mode, in which only * memmove speed can matter and only if there is a lot of incompressible data * (LZMA2 stores incompressible chunks in uncompressed form). Thus, the * functions below should just be kept small; it's probably not worth * optimizing for speed. */ #ifndef memeq static bool memeq(const void *a, const void *b, size_t size) { const uint8_t *x = a; const uint8_t *y = b; size_t i; for (i = 0; i < size; ++i) if (x[i] != y[i]) return false; return true; } #endif #ifndef memzero static void memzero(void *buf, size_t size) { uint8_t *b = buf; uint8_t *e = b + size; while (b != e) *b++ = '\0'; } #endif #ifndef memmove /* Not static to avoid a conflict with the prototype in the Linux headers. */ void *memmove(void *dest, const void *src, size_t size) { uint8_t *d = dest; const uint8_t *s = src; size_t i; if (d < s) { for (i = 0; i < size; ++i) d[i] = s[i]; } else if (d > s) { i = size; while (i-- > 0) d[i] = s[i]; } return dest; } #endif /* * Since we need memmove anyway, would use it as memcpy too. * Commented out for now to avoid breaking things. */ /* #ifndef memcpy # define memcpy memmove #endif */ #include "xz/xz_crc32.c" #include "xz/xz_dec_stream.c" #include "xz/xz_dec_lzma2.c" #include "xz/xz_dec_bcj.c" #endif /* XZ_PREBOOT */ /* Size of the input and output buffers in multi-call mode */ #define XZ_IOBUF_SIZE 4096 /* * This function implements the API defined in <linux/decompress/generic.h>. * * This wrapper will automatically choose single-call or multi-call mode * of the native XZ decoder API. The single-call mode can be used only when * both input and output buffers are available as a single chunk, i.e. when * fill() and flush() won't be used. */ STATIC int INIT unxz(unsigned char *in, long in_size, long (*fill)(void *dest, unsigned long size), long (*flush)(void *src, unsigned long size), unsigned char *out, long *in_used, void (*error)(char *x)) { struct xz_buf b; struct xz_dec *s; enum xz_ret ret; bool must_free_in = false; #if XZ_INTERNAL_CRC32 xz_crc32_init(); #endif if (in_used != NULL) *in_used = 0; if (fill == NULL && flush == NULL) s = xz_dec_init(XZ_SINGLE, 0); else s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1); if (s == NULL) goto error_alloc_state; if (flush == NULL) { b.out = out; b.out_size = (size_t)-1; } else { b.out_size = XZ_IOBUF_SIZE; b.out = malloc(XZ_IOBUF_SIZE); if (b.out == NULL) goto error_alloc_out; } if (in == NULL) { must_free_in = true; in = malloc(XZ_IOBUF_SIZE); if (in == NULL) goto error_alloc_in; } b.in = in; b.in_pos = 0; b.in_size = in_size; b.out_pos = 0; if (fill == NULL && flush == NULL) { ret = xz_dec_run(s, &b); } else { do { if (b.in_pos == b.in_size && fill != NULL) { if (in_used != NULL) *in_used += b.in_pos; b.in_pos = 0; in_size = fill(in, XZ_IOBUF_SIZE); if (in_size < 0) { /* * This isn't an optimal error code * but it probably isn't worth making * a new one either. */ ret = XZ_BUF_ERROR; break; } b.in_size = in_size; } ret = xz_dec_run(s, &b); if (flush != NULL && (b.out_pos == b.out_size || (ret != XZ_OK && b.out_pos > 0))) { /* * Setting ret here may hide an error * returned by xz_dec_run(), but probably * it's not too bad. */ if (flush(b.out, b.out_pos) != (long)b.out_pos) ret = XZ_BUF_ERROR; b.out_pos = 0; } } while (ret == XZ_OK); if (must_free_in) free(in); if (flush != NULL) free(b.out); } if (in_used != NULL) *in_used += b.in_pos; xz_dec_end(s); switch (ret) { case XZ_STREAM_END: return 0; case XZ_MEM_ERROR: /* This can occur only in multi-call mode. */ error("XZ decompressor ran out of memory"); break; case XZ_FORMAT_ERROR: error("Input is not in the XZ format (wrong magic bytes)"); break; case XZ_OPTIONS_ERROR: error("Input was encoded with settings that are not " "supported by this XZ decoder"); break; case XZ_DATA_ERROR: case XZ_BUF_ERROR: error("XZ-compressed data is corrupt"); break; default: error("Bug in the XZ decompressor"); break; } return -1; error_alloc_in: if (flush != NULL) free(b.out); error_alloc_out: xz_dec_end(s); error_alloc_state: error("XZ decompressor ran out of memory"); return -1; } /* * This macro is used by architecture-specific files to decompress * the kernel image. */ #ifdef XZ_PREBOOT STATIC int INIT __decompress(unsigned char *buf, long len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), unsigned char *out_buf, long olen, long *pos, void (*error)(char *x)) { return unxz(buf, len, fill, flush, out_buf, pos, error); } #endif
linux-master
lib/decompress_unxz.c
/* * Test cases for lib/string_helpers.c module. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/random.h> #include <linux/string.h> #include <linux/string_helpers.h> static __init bool test_string_check_buf(const char *name, unsigned int flags, char *in, size_t p, char *out_real, size_t q_real, char *out_test, size_t q_test) { if (q_real == q_test && !memcmp(out_test, out_real, q_test)) return true; pr_warn("Test '%s' failed: flags = %#x\n", name, flags); print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1, in, p, true); print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true); print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true); return false; } struct test_string { const char *in; const char *out; unsigned int flags; }; static const struct test_string strings[] __initconst = { { .in = "\\f\\ \\n\\r\\t\\v", .out = "\f\\ \n\r\t\v", .flags = UNESCAPE_SPACE, }, { .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777", .out = " \001\00387\0064\005 \\8aH?7", .flags = UNESCAPE_OCTAL, }, { .in = "\\xv\\xa\\x2c\\xD\\x6f2", .out = "\\xv\n,\ro2", .flags = UNESCAPE_HEX, }, { .in = "\\h\\\\\\\"\\a\\e\\", .out = "\\h\\\"\a\e\\", .flags = UNESCAPE_SPECIAL, }, }; static void __init test_string_unescape(const char *name, unsigned int flags, bool inplace) { int q_real = 256; char *in = kmalloc(q_real, GFP_KERNEL); char *out_test = kmalloc(q_real, GFP_KERNEL); char *out_real = kmalloc(q_real, GFP_KERNEL); int i, p = 0, q_test = 0; if (!in || !out_test || !out_real) goto out; for (i = 0; i < ARRAY_SIZE(strings); i++) { const char *s = strings[i].in; int len = strlen(strings[i].in); /* Copy string to in buffer */ memcpy(&in[p], s, len); p += len; /* Copy expected result for given flags */ if (flags & strings[i].flags) { s = strings[i].out; len = strlen(strings[i].out); } memcpy(&out_test[q_test], s, len); q_test += len; } in[p++] = '\0'; /* Call string_unescape and compare result */ if (inplace) { memcpy(out_real, in, p); if (flags == UNESCAPE_ANY) q_real = string_unescape_any_inplace(out_real); else q_real = string_unescape_inplace(out_real, flags); } else if (flags == UNESCAPE_ANY) { q_real = string_unescape_any(in, out_real, q_real); } else { q_real = string_unescape(in, out_real, q_real, flags); } test_string_check_buf(name, flags, in, p - 1, out_real, q_real, out_test, q_test); out: kfree(out_real); kfree(out_test); kfree(in); } struct test_string_1 { const char *out; unsigned int flags; }; #define TEST_STRING_2_MAX_S1 32 struct test_string_2 { const char *in; struct test_string_1 s1[TEST_STRING_2_MAX_S1]; }; #define TEST_STRING_2_DICT_0 NULL static const struct test_string_2 escape0[] __initconst = {{ .in = "\f\\ \n\r\t\v", .s1 = {{ .out = "\\f\\ \\n\\r\\t\\v", .flags = ESCAPE_SPACE, },{ .out = "\\f\\134\\040\\n\\r\\t\\v", .flags = ESCAPE_SPACE | ESCAPE_OCTAL, },{ .out = "\\f\\x5c\\x20\\n\\r\\t\\v", .flags = ESCAPE_SPACE | ESCAPE_HEX, },{ /* terminator */ }} },{ .in = "\\h\\\"\a\e\\", .s1 = {{ .out = "\\\\h\\\\\\\"\\a\\e\\\\", .flags = ESCAPE_SPECIAL, },{ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\", .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL, },{ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\", .flags = ESCAPE_SPECIAL | ESCAPE_HEX, },{ /* terminator */ }} },{ .in = "\eb \\C\007\"\x90\r]", .s1 = {{ .out = "\eb \\C\007\"\x90\\r]", .flags = ESCAPE_SPACE, },{ .out = "\\eb \\\\C\\a\\\"\x90\r]", .flags = ESCAPE_SPECIAL, },{ .out = "\\eb \\\\C\\a\\\"\x90\\r]", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL, },{ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135", .flags = ESCAPE_OCTAL, },{ .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135", .flags = ESCAPE_SPACE | ESCAPE_OCTAL, },{ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135", .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL, },{ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL, },{ .out = "\eb \\C\007\"\x90\r]", .flags = ESCAPE_NP, },{ .out = "\eb \\C\007\"\x90\\r]", .flags = ESCAPE_SPACE | ESCAPE_NP, },{ .out = "\\eb \\C\\a\"\x90\r]", .flags = ESCAPE_SPECIAL | ESCAPE_NP, },{ .out = "\\eb \\C\\a\"\x90\\r]", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NP, },{ .out = "\\033b \\C\\007\"\\220\\015]", .flags = ESCAPE_OCTAL | ESCAPE_NP, },{ .out = "\\033b \\C\\007\"\\220\\r]", .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP, },{ .out = "\\eb \\C\\a\"\\220\\r]", .flags = ESCAPE_SPECIAL | ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP, },{ .out = "\\x1bb \\C\\x07\"\\x90\\x0d]", .flags = ESCAPE_NP | ESCAPE_HEX, },{ /* terminator */ }} },{ .in = "\007 \eb\"\x90\xCF\r", .s1 = {{ .out = "\007 \eb\"\\220\\317\r", .flags = ESCAPE_OCTAL | ESCAPE_NA, },{ .out = "\007 \eb\"\\x90\\xcf\r", .flags = ESCAPE_HEX | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_NA, },{ /* terminator */ }} },{ /* terminator */ }}; #define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF" static const struct test_string_2 escape1[] __initconst = {{ .in = "\f\\ \n\r\t\v", .s1 = {{ .out = "\f\\134\\040\n\\015\\011\v", .flags = ESCAPE_OCTAL, },{ .out = "\f\\x5c\\x20\n\\x0d\\x09\v", .flags = ESCAPE_HEX, },{ .out = "\f\\134\\040\n\\015\\011\v", .flags = ESCAPE_ANY | ESCAPE_APPEND, },{ .out = "\\014\\134\\040\\012\\015\\011\\013", .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP, },{ .out = "\\x0c\\x5c\\x20\\x0a\\x0d\\x09\\x0b", .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NAP, },{ .out = "\f\\134\\040\n\\015\\011\v", .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA, },{ .out = "\f\\x5c\\x20\n\\x0d\\x09\v", .flags = ESCAPE_HEX | ESCAPE_APPEND | ESCAPE_NA, },{ /* terminator */ }} },{ .in = "\\h\\\"\a\xCF\e\\", .s1 = {{ .out = "\\134h\\134\"\a\\317\e\\134", .flags = ESCAPE_OCTAL, },{ .out = "\\134h\\134\"\a\\317\e\\134", .flags = ESCAPE_ANY | ESCAPE_APPEND, },{ .out = "\\134h\\134\"\\007\\317\\033\\134", .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NAP, },{ .out = "\\134h\\134\"\a\\317\e\\134", .flags = ESCAPE_OCTAL | ESCAPE_APPEND | ESCAPE_NA, },{ /* terminator */ }} },{ .in = "\eb \\C\007\"\x90\r]", .s1 = {{ .out = "\e\\142\\040\\134C\007\"\x90\\015]", .flags = ESCAPE_OCTAL, },{ /* terminator */ }} },{ .in = "\007 \eb\"\x90\xCF\r", .s1 = {{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_NA, },{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_SPACE | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_SPECIAL | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\317\r", .flags = ESCAPE_OCTAL | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\317\r", .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\317\r", .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\317\r", .flags = ESCAPE_ANY | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\xcf\r", .flags = ESCAPE_HEX | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\xcf\r", .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\xcf\r", .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA, },{ .out = "\007 \eb\"\x90\\xcf\r", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NA, },{ /* terminator */ }} },{ .in = "\007 \eb\"\x90\xCF\r", .s1 = {{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\xCF\\r", .flags = ESCAPE_SPACE | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\xCF\r", .flags = ESCAPE_SPECIAL | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\xCF\\r", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\317\\015", .flags = ESCAPE_OCTAL | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\317\\r", .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\317\\015", .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\317\r", .flags = ESCAPE_ANY | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\xcf\\x0d", .flags = ESCAPE_HEX | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\xcf\\r", .flags = ESCAPE_SPACE | ESCAPE_HEX | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\xcf\\x0d", .flags = ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP, },{ .out = "\007 \eb\"\x90\\xcf\\r", .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_HEX | ESCAPE_NAP, },{ /* terminator */ }} },{ /* terminator */ }}; static const struct test_string strings_upper[] __initconst = { { .in = "abcdefgh1234567890test", .out = "ABCDEFGH1234567890TEST", }, { .in = "abCdeFgH1234567890TesT", .out = "ABCDEFGH1234567890TEST", }, }; static const struct test_string strings_lower[] __initconst = { { .in = "ABCDEFGH1234567890TEST", .out = "abcdefgh1234567890test", }, { .in = "abCdeFgH1234567890TesT", .out = "abcdefgh1234567890test", }, }; static __init const char *test_string_find_match(const struct test_string_2 *s2, unsigned int flags) { const struct test_string_1 *s1 = s2->s1; unsigned int i; if (!flags) return s2->in; /* Test cases are NULL-aware */ flags &= ~ESCAPE_NULL; /* ESCAPE_OCTAL has a higher priority */ if (flags & ESCAPE_OCTAL) flags &= ~ESCAPE_HEX; for (i = 0; i < TEST_STRING_2_MAX_S1 && s1->out; i++, s1++) if (s1->flags == flags) return s1->out; return NULL; } static __init void test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc, int q_test, const char *name) { int q_real; q_real = string_escape_mem(in, p, NULL, 0, flags, esc); if (q_real != q_test) pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n", name, flags, q_test, q_real); } static __init void test_string_escape(const char *name, const struct test_string_2 *s2, unsigned int flags, const char *esc) { size_t out_size = 512; char *out_test = kmalloc(out_size, GFP_KERNEL); char *out_real = kmalloc(out_size, GFP_KERNEL); char *in = kmalloc(256, GFP_KERNEL); int p = 0, q_test = 0; int q_real; if (!out_test || !out_real || !in) goto out; for (; s2->in; s2++) { const char *out; int len; /* NULL injection */ if (flags & ESCAPE_NULL) { in[p++] = '\0'; /* '\0' passes isascii() test */ if (flags & ESCAPE_NA && !(flags & ESCAPE_APPEND && esc)) { out_test[q_test++] = '\0'; } else { out_test[q_test++] = '\\'; out_test[q_test++] = '0'; } } /* Don't try strings that have no output */ out = test_string_find_match(s2, flags); if (!out) continue; /* Copy string to in buffer */ len = strlen(s2->in); memcpy(&in[p], s2->in, len); p += len; /* Copy expected result for given flags */ len = strlen(out); memcpy(&out_test[q_test], out, len); q_test += len; } q_real = string_escape_mem(in, p, out_real, out_size, flags, esc); test_string_check_buf(name, flags, in, p, out_real, q_real, out_test, q_test); test_string_escape_overflow(in, p, flags, esc, q_test, name); out: kfree(in); kfree(out_real); kfree(out_test); } #define string_get_size_maxbuf 16 #define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ do { \ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ __test_string_get_size((size), (blk_size), (exp_result10), \ (exp_result2)); \ } while (0) static __init void test_string_get_size_check(const char *units, const char *exp, char *res, const u64 size, const u64 blk_size) { if (!memcmp(res, exp, strlen(exp) + 1)) return; res[string_get_size_maxbuf - 1] = '\0'; pr_warn("Test 'test_string_get_size' failed!\n"); pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n", size, blk_size, units); pr_warn("expected: '%s', got '%s'\n", exp, res); } static __init void __test_string_get_size(const u64 size, const u64 blk_size, const char *exp_result10, const char *exp_result2) { char buf10[string_get_size_maxbuf]; char buf2[string_get_size_maxbuf]; string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10)); string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2)); test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10, size, blk_size); test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2, size, blk_size); } static __init void test_string_get_size(void) { /* small values */ test_string_get_size_one(0, 512, "0 B", "0 B"); test_string_get_size_one(1, 512, "512 B", "512 B"); test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB"); /* normal values */ test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB"); test_string_get_size_one(500118192, 512, "256 GB", "238 GiB"); test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB"); /* weird block sizes */ test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB"); /* huge values */ test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB"); test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); } static void __init test_string_upper_lower(void) { char *dst; int i; for (i = 0; i < ARRAY_SIZE(strings_upper); i++) { const char *s = strings_upper[i].in; int len = strlen(strings_upper[i].in) + 1; dst = kmalloc(len, GFP_KERNEL); if (!dst) return; string_upper(dst, s); if (memcmp(dst, strings_upper[i].out, len)) { pr_warn("Test 'string_upper' failed : expected %s, got %s!\n", strings_upper[i].out, dst); kfree(dst); return; } kfree(dst); } for (i = 0; i < ARRAY_SIZE(strings_lower); i++) { const char *s = strings_lower[i].in; int len = strlen(strings_lower[i].in) + 1; dst = kmalloc(len, GFP_KERNEL); if (!dst) return; string_lower(dst, s); if (memcmp(dst, strings_lower[i].out, len)) { pr_warn("Test 'string_lower failed : : expected %s, got %s!\n", strings_lower[i].out, dst); kfree(dst); return; } kfree(dst); } } static int __init test_string_helpers_init(void) { unsigned int i; pr_info("Running tests...\n"); for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++) test_string_unescape("unescape", i, false); test_string_unescape("unescape inplace", get_random_u32_below(UNESCAPE_ALL_MASK + 1), true); /* Without dictionary */ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++) test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0); /* With dictionary */ for (i = 0; i < ESCAPE_ALL_MASK + 1; i++) test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1); /* Test string_get_size() */ test_string_get_size(); /* Test string upper(), string_lower() */ test_string_upper_lower(); return -EINVAL; } module_init(test_string_helpers_init); MODULE_LICENSE("Dual BSD/GPL");
linux-master
lib/test-string_helpers.c
// SPDX-License-Identifier: GPL-2.0-or-later #define pr_fmt(fmt) "ref_tracker: " fmt #include <linux/export.h> #include <linux/list_sort.h> #include <linux/ref_tracker.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #define REF_TRACKER_STACK_ENTRIES 16 #define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ bool dead; depot_stack_handle_t alloc_stack_handle; depot_stack_handle_t free_stack_handle; }; struct ref_tracker_dir_stats { int total; int count; struct { depot_stack_handle_t stack_handle; unsigned int count; } stacks[]; }; static struct ref_tracker_dir_stats * ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { struct ref_tracker_dir_stats *stats; struct ref_tracker *tracker; stats = kmalloc(struct_size(stats, stacks, limit), GFP_NOWAIT | __GFP_NOWARN); if (!stats) return ERR_PTR(-ENOMEM); stats->total = 0; stats->count = 0; list_for_each_entry(tracker, &dir->list, head) { depot_stack_handle_t stack = tracker->alloc_stack_handle; int i; ++stats->total; for (i = 0; i < stats->count; ++i) if (stats->stacks[i].stack_handle == stack) break; if (i >= limit) continue; if (i >= stats->count) { stats->stacks[i].stack_handle = stack; stats->stacks[i].count = 0; ++stats->count; } ++stats->stacks[i].count; } return stats; } struct ostream { char *buf; int size, used; }; #define pr_ostream(stream, fmt, args...) \ ({ \ struct ostream *_s = (stream); \ \ if (!_s->buf) { \ pr_err(fmt, ##args); \ } else { \ int ret, len = _s->size - _s->used; \ ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ _s->used += min(ret, len); \ } \ }) static void __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, unsigned int display_limit, struct ostream *s) { struct ref_tracker_dir_stats *stats; unsigned int i = 0, skipped; depot_stack_handle_t stack; char *sbuf; lockdep_assert_held(&dir->lock); if (list_empty(&dir->list)) return; stats = ref_tracker_get_stats(dir, display_limit); if (IS_ERR(stats)) { pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", dir->name, dir, stats); return; } sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); for (i = 0, skipped = stats->total; i < stats->count; ++i) { stack = stats->stacks[i].stack_handle; if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) sbuf[0] = 0; pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, stats->stacks[i].count, stats->total, sbuf); skipped -= stats->stacks[i].count; } if (skipped) pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", dir->name, dir, skipped, stats->total); kfree(sbuf); kfree(stats); } void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, unsigned int display_limit) { struct ostream os = {}; __ref_tracker_dir_pr_ostream(dir, display_limit, &os); } EXPORT_SYMBOL(ref_tracker_dir_print_locked); void ref_tracker_dir_print(struct ref_tracker_dir *dir, unsigned int display_limit) { unsigned long flags; spin_lock_irqsave(&dir->lock, flags); ref_tracker_dir_print_locked(dir, display_limit); spin_unlock_irqrestore(&dir->lock, flags); } EXPORT_SYMBOL(ref_tracker_dir_print); int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) { struct ostream os = { .buf = buf, .size = size }; unsigned long flags; spin_lock_irqsave(&dir->lock, flags); __ref_tracker_dir_pr_ostream(dir, 16, &os); spin_unlock_irqrestore(&dir->lock, flags); return os.used; } EXPORT_SYMBOL(ref_tracker_dir_snprint); void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; unsigned long flags; bool leak = false; dir->dead = true; spin_lock_irqsave(&dir->lock, flags); list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { list_del(&tracker->head); kfree(tracker); dir->quarantine_avail++; } if (!list_empty(&dir->list)) { ref_tracker_dir_print_locked(dir, 16); leak = true; list_for_each_entry_safe(tracker, n, &dir->list, head) { list_del(&tracker->head); kfree(tracker); } } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1); } EXPORT_SYMBOL(ref_tracker_dir_exit); int ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp, gfp_t gfp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; gfp_t gfp_mask = gfp | __GFP_NOWARN; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_inc(&dir->no_tracker); return 0; } if (gfp & __GFP_DIRECT_RECLAIM) gfp_mask |= __GFP_NOFAIL; *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); if (unlikely(!tracker)) { pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); refcount_inc(&dir->untracked); return -ENOMEM; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); spin_lock_irqsave(&dir->lock, flags); list_add(&tracker->head, &dir->list); spin_unlock_irqrestore(&dir->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_alloc); int ref_tracker_free(struct ref_tracker_dir *dir, struct ref_tracker **trackerp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; depot_stack_handle_t stack_handle; struct ref_tracker *tracker; unsigned int nr_entries; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_dec(&dir->no_tracker); return 0; } tracker = *trackerp; if (!tracker) { refcount_dec(&dir->untracked); return -EEXIST; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); stack_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT | __GFP_NOWARN); spin_lock_irqsave(&dir->lock, flags); if (tracker->dead) { pr_err("reference already released.\n"); if (tracker->alloc_stack_handle) { pr_err("allocated in:\n"); stack_depot_print(tracker->alloc_stack_handle); } if (tracker->free_stack_handle) { pr_err("freed in:\n"); stack_depot_print(tracker->free_stack_handle); } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(1); return -EINVAL; } tracker->dead = true; tracker->free_stack_handle = stack_handle; list_move_tail(&tracker->head, &dir->quarantine); if (!dir->quarantine_avail) { tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); list_del(&tracker->head); } else { dir->quarantine_avail--; tracker = NULL; } spin_unlock_irqrestore(&dir->lock, flags); kfree(tracker); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_free);
linux-master
lib/ref_tracker.c
// SPDX-License-Identifier: GPL-2.0 /* * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h * * Copyright (c) 2011, Tom Herbert <[email protected]> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/dynamic_queue_limits.h> #include <linux/compiler.h> #include <linux/export.h> #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); completed = dql->num_completed + count; limit = dql->limit; ovlimit = POSDIFF(num_queued - dql->num_completed, limit); inprogress = num_queued - completed; prev_inprogress = dql->prev_num_queued - dql->num_completed; all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); if ((ovlimit && !inprogress) || (dql->prev_ovlimit && all_prev_completed)) { /* * Queue considered starved if: * - The queue was over-limit in the last interval, * and there is no more data in the queue. * OR * - The queue was over-limit in the previous interval and * when enqueuing it was possible that all queued data * had been consumed. This covers the case when queue * may have becomes starved between completion processing * running and next time enqueue was scheduled. * * When queue is starved increase the limit by the amount * of bytes both sent and completed in the last interval, * plus any previous over-limit. */ limit += POSDIFF(completed, dql->prev_num_queued) + dql->prev_ovlimit; dql->slack_start_time = jiffies; dql->lowest_slack = UINT_MAX; } else if (inprogress && prev_inprogress && !all_prev_completed) { /* * Queue was not starved, check if the limit can be decreased. * A decrease is only considered if the queue has been busy in * the whole interval (the check above). * * If there is slack, the amount of excess data queued above * the amount needed to prevent starvation, the queue limit * can be decreased. To avoid hysteresis we consider the * minimum amount of slack found over several iterations of the * completion routine. */ unsigned int slack, slack_last_objs; /* * Slack is the maximum of * - The queue limit plus previous over-limit minus twice * the number of objects completed. Note that two times * number of completed bytes is a basis for an upper bound * of the limit. * - Portion of objects in the last queuing operation that * was not part of non-zero previous over-limit. That is * "round down" by non-overlimit portion of the last * queueing operation. */ slack = POSDIFF(limit + dql->prev_ovlimit, 2 * (completed - dql->num_completed)); slack_last_objs = dql->prev_ovlimit ? POSDIFF(dql->prev_last_obj_cnt, dql->prev_ovlimit) : 0; slack = max(slack, slack_last_objs); if (slack < dql->lowest_slack) dql->lowest_slack = slack; if (time_after(jiffies, dql->slack_start_time + dql->slack_hold_time)) { limit = POSDIFF(limit, dql->lowest_slack); dql->slack_start_time = jiffies; dql->lowest_slack = UINT_MAX; } } /* Enforce bounds on limit */ limit = clamp(limit, dql->min_limit, dql->max_limit); if (limit != dql->limit) { dql->limit = limit; ovlimit = 0; } dql->adj_limit = limit + completed; dql->prev_ovlimit = ovlimit; dql->prev_last_obj_cnt = dql->last_obj_cnt; dql->num_completed = completed; dql->prev_num_queued = num_queued; } EXPORT_SYMBOL(dql_completed); void dql_reset(struct dql *dql) { /* Reset all dynamic values */ dql->limit = 0; dql->num_queued = 0; dql->num_completed = 0; dql->last_obj_cnt = 0; dql->prev_num_queued = 0; dql->prev_last_obj_cnt = 0; dql->prev_ovlimit = 0; dql->lowest_slack = UINT_MAX; dql->slack_start_time = jiffies; } EXPORT_SYMBOL(dql_reset); void dql_init(struct dql *dql, unsigned int hold_time) { dql->max_limit = DQL_MAX_LIMIT; dql->min_limit = 0; dql->slack_hold_time = hold_time; dql_reset(dql); } EXPORT_SYMBOL(dql_init);
linux-master
lib/dynamic_queue_limits.c
// SPDX-License-Identifier: GPL-2.0-only /* * crc-itu-t.c */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc-itu-t.h> /* CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^5 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0 }; EXPORT_SYMBOL(crc_itu_t_table); /** * crc_itu_t - Compute the CRC-ITU-T for the data buffer * * @crc: previous CRC value * @buffer: data pointer * @len: number of bytes in the buffer * * Returns the updated CRC value */ u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len) { while (len--) crc = crc_itu_t_byte(crc, *buffer++); return crc; } EXPORT_SYMBOL(crc_itu_t); MODULE_DESCRIPTION("CRC ITU-T V.41 calculations"); MODULE_LICENSE("GPL");
linux-master
lib/crc-itu-t.c
#include <linux/libfdt_env.h> #include "../scripts/dtc/libfdt/fdt_strerror.c"
linux-master
lib/fdt_strerror.c
/* Lzma decompressor for Linux kernel. Shamelessly snarfed *from busybox 1.1.1 * *Linux kernel adaptation *Copyright (C) 2006 Alain < [email protected] > * *Based on small lzma deflate implementation/Small range coder *implementation for lzma. *Copyright (C) 2006 Aurelien Jacobs < [email protected] > * *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/) *Copyright (C) 1999-2005 Igor Pavlov * *Copyrights of the parts, see headers below. * * *This program is free software; you can redistribute it and/or *modify it under the terms of the GNU Lesser General Public *License as published by the Free Software Foundation; either *version 2.1 of the License, or (at your option) any later version. * *This program is distributed in the hope that it will be useful, *but WITHOUT ANY WARRANTY; without even the implied warranty of *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *Lesser General Public License for more details. * *You should have received a copy of the GNU Lesser General Public *License along with this library; if not, write to the Free Software *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifdef STATIC #define PREBOOT #else #include <linux/decompress/unlzma.h> #endif /* STATIC */ #include <linux/decompress/mm.h> #define MIN(a, b) (((a) < (b)) ? (a) : (b)) static long long INIT read_int(unsigned char *ptr, int size) { int i; long long ret = 0; for (i = 0; i < size; i++) ret = (ret << 8) | ptr[size-i-1]; return ret; } #define ENDIAN_CONVERT(x) \ x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) /* Small range coder implementation for lzma. *Copyright (C) 2006 Aurelien Jacobs < [email protected] > * *Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/) *Copyright (c) 1999-2005 Igor Pavlov */ #include <linux/compiler.h> #define LZMA_IOBUF_SIZE 0x10000 struct rc { long (*fill)(void*, unsigned long); uint8_t *ptr; uint8_t *buffer; uint8_t *buffer_end; long buffer_size; uint32_t code; uint32_t range; uint32_t bound; void (*error)(char *); }; #define RC_TOP_BITS 24 #define RC_MOVE_BITS 5 #define RC_MODEL_TOTAL_BITS 11 static long INIT nofill(void *buffer, unsigned long len) { return -1; } /* Called twice: once at startup and once in rc_normalize() */ static void INIT rc_read(struct rc *rc) { rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); if (rc->buffer_size <= 0) rc->error("unexpected EOF"); rc->ptr = rc->buffer; rc->buffer_end = rc->buffer + rc->buffer_size; } /* Called once */ static inline void INIT rc_init(struct rc *rc, long (*fill)(void*, unsigned long), char *buffer, long buffer_size) { if (fill) rc->fill = fill; else rc->fill = nofill; rc->buffer = (uint8_t *)buffer; rc->buffer_size = buffer_size; rc->buffer_end = rc->buffer + rc->buffer_size; rc->ptr = rc->buffer; rc->code = 0; rc->range = 0xFFFFFFFF; } static inline void INIT rc_init_code(struct rc *rc) { int i; for (i = 0; i < 5; i++) { if (rc->ptr >= rc->buffer_end) rc_read(rc); rc->code = (rc->code << 8) | *rc->ptr++; } } /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ static void INIT rc_do_normalize(struct rc *rc) { if (rc->ptr >= rc->buffer_end) rc_read(rc); rc->range <<= 8; rc->code = (rc->code << 8) | *rc->ptr++; } static inline void INIT rc_normalize(struct rc *rc) { if (rc->range < (1 << RC_TOP_BITS)) rc_do_normalize(rc); } /* Called 9 times */ /* Why rc_is_bit_0_helper exists? *Because we want to always expose (rc->code < rc->bound) to optimizer */ static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p) { rc_normalize(rc); rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); return rc->bound; } static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p) { uint32_t t = rc_is_bit_0_helper(rc, p); return rc->code < t; } /* Called ~10 times, but very small, thus inlined */ static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) { rc->range = rc->bound; *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; } static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p) { rc->range -= rc->bound; rc->code -= rc->bound; *p -= *p >> RC_MOVE_BITS; } /* Called 4 times in unlzma loop */ static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol) { if (rc_is_bit_0(rc, p)) { rc_update_bit_0(rc, p); *symbol *= 2; return 0; } else { rc_update_bit_1(rc, p); *symbol = *symbol * 2 + 1; return 1; } } /* Called once */ static inline int INIT rc_direct_bit(struct rc *rc) { rc_normalize(rc); rc->range >>= 1; if (rc->code >= rc->range) { rc->code -= rc->range; return 1; } return 0; } /* Called twice */ static inline void INIT rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) { int i = num_levels; *symbol = 1; while (i--) rc_get_bit(rc, p + *symbol, symbol); *symbol -= 1 << num_levels; } /* * Small lzma deflate implementation. * Copyright (C) 2006 Aurelien Jacobs < [email protected] > * * Based on LzmaDecode.c from the LZMA SDK 4.22 (https://www.7-zip.org/) * Copyright (C) 1999-2005 Igor Pavlov */ struct lzma_header { uint8_t pos; uint32_t dict_size; uint64_t dst_size; } __attribute__ ((packed)) ; #define LZMA_BASE_SIZE 1846 #define LZMA_LIT_SIZE 768 #define LZMA_NUM_POS_BITS_MAX 4 #define LZMA_LEN_NUM_LOW_BITS 3 #define LZMA_LEN_NUM_MID_BITS 3 #define LZMA_LEN_NUM_HIGH_BITS 8 #define LZMA_LEN_CHOICE 0 #define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) #define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) #define LZMA_LEN_MID (LZMA_LEN_LOW \ + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) #define LZMA_LEN_HIGH (LZMA_LEN_MID \ +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) #define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) #define LZMA_NUM_STATES 12 #define LZMA_NUM_LIT_STATES 7 #define LZMA_START_POS_MODEL_INDEX 4 #define LZMA_END_POS_MODEL_INDEX 14 #define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1)) #define LZMA_NUM_POS_SLOT_BITS 6 #define LZMA_NUM_LEN_TO_POS_STATES 4 #define LZMA_NUM_ALIGN_BITS 4 #define LZMA_MATCH_MIN_LEN 2 #define LZMA_IS_MATCH 0 #define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) #define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES) #define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES) #define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES) #define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES) #define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \ + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) #define LZMA_SPEC_POS (LZMA_POS_SLOT \ +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) #define LZMA_ALIGN (LZMA_SPEC_POS \ + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) #define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)) #define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS) #define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) struct writer { uint8_t *buffer; uint8_t previous_byte; size_t buffer_pos; int bufsize; size_t global_pos; long (*flush)(void*, unsigned long); struct lzma_header *header; }; struct cstate { int state; uint32_t rep0, rep1, rep2, rep3; }; static inline size_t INIT get_pos(struct writer *wr) { return wr->global_pos + wr->buffer_pos; } static inline uint8_t INIT peek_old_byte(struct writer *wr, uint32_t offs) { if (!wr->flush) { int32_t pos; while (offs > wr->header->dict_size) offs -= wr->header->dict_size; pos = wr->buffer_pos - offs; return wr->buffer[pos]; } else { uint32_t pos = wr->buffer_pos - offs; while (pos >= wr->header->dict_size) pos += wr->header->dict_size; return wr->buffer[pos]; } } static inline int INIT write_byte(struct writer *wr, uint8_t byte) { wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; if (wr->flush && wr->buffer_pos == wr->header->dict_size) { wr->buffer_pos = 0; wr->global_pos += wr->header->dict_size; if (wr->flush((char *)wr->buffer, wr->header->dict_size) != wr->header->dict_size) return -1; } return 0; } static inline int INIT copy_byte(struct writer *wr, uint32_t offs) { return write_byte(wr, peek_old_byte(wr, offs)); } static inline int INIT copy_bytes(struct writer *wr, uint32_t rep0, int len) { do { if (copy_byte(wr, rep0)) return -1; len--; } while (len != 0 && wr->buffer_pos < wr->header->dst_size); return len; } static inline int INIT process_bit0(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob, int lc, uint32_t literal_pos_mask) { int mi = 1; rc_update_bit_0(rc, prob); prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE * (((get_pos(wr) & literal_pos_mask) << lc) + (wr->previous_byte >> (8 - lc)))) ); if (cst->state >= LZMA_NUM_LIT_STATES) { int match_byte = peek_old_byte(wr, cst->rep0); do { int bit; uint16_t *prob_lit; match_byte <<= 1; bit = match_byte & 0x100; prob_lit = prob + 0x100 + bit + mi; if (rc_get_bit(rc, prob_lit, &mi)) { if (!bit) break; } else { if (bit) break; } } while (mi < 0x100); } while (mi < 0x100) { uint16_t *prob_lit = prob + mi; rc_get_bit(rc, prob_lit, &mi); } if (cst->state < 4) cst->state = 0; else if (cst->state < 10) cst->state -= 3; else cst->state -= 6; return write_byte(wr, mi); } static inline int INIT process_bit1(struct writer *wr, struct rc *rc, struct cstate *cst, uint16_t *p, int pos_state, uint16_t *prob) { int offset; uint16_t *prob_len; int num_bits; int len; rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); cst->rep3 = cst->rep2; cst->rep2 = cst->rep1; cst->rep1 = cst->rep0; cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; prob = p + LZMA_LEN_CODER; } else { rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G0 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); prob = (p + LZMA_IS_REP_0_LONG + (cst->state << LZMA_NUM_POS_BITS_MAX) + pos_state); if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); cst->state = cst->state < LZMA_NUM_LIT_STATES ? 9 : 11; return copy_byte(wr, cst->rep0); } else { rc_update_bit_1(rc, prob); } } else { uint32_t distance; rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G1 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); distance = cst->rep1; } else { rc_update_bit_1(rc, prob); prob = p + LZMA_IS_REP_G2 + cst->state; if (rc_is_bit_0(rc, prob)) { rc_update_bit_0(rc, prob); distance = cst->rep2; } else { rc_update_bit_1(rc, prob); distance = cst->rep3; cst->rep3 = cst->rep2; } cst->rep2 = cst->rep1; } cst->rep1 = cst->rep0; cst->rep0 = distance; } cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; prob = p + LZMA_REP_LEN_CODER; } prob_len = prob + LZMA_LEN_CHOICE; if (rc_is_bit_0(rc, prob_len)) { rc_update_bit_0(rc, prob_len); prob_len = (prob + LZMA_LEN_LOW + (pos_state << LZMA_LEN_NUM_LOW_BITS)); offset = 0; num_bits = LZMA_LEN_NUM_LOW_BITS; } else { rc_update_bit_1(rc, prob_len); prob_len = prob + LZMA_LEN_CHOICE_2; if (rc_is_bit_0(rc, prob_len)) { rc_update_bit_0(rc, prob_len); prob_len = (prob + LZMA_LEN_MID + (pos_state << LZMA_LEN_NUM_MID_BITS)); offset = 1 << LZMA_LEN_NUM_LOW_BITS; num_bits = LZMA_LEN_NUM_MID_BITS; } else { rc_update_bit_1(rc, prob_len); prob_len = prob + LZMA_LEN_HIGH; offset = ((1 << LZMA_LEN_NUM_LOW_BITS) + (1 << LZMA_LEN_NUM_MID_BITS)); num_bits = LZMA_LEN_NUM_HIGH_BITS; } } rc_bit_tree_decode(rc, prob_len, num_bits, &len); len += offset; if (cst->state < 4) { int pos_slot; cst->state += LZMA_NUM_LIT_STATES; prob = p + LZMA_POS_SLOT + ((len < LZMA_NUM_LEN_TO_POS_STATES ? len : LZMA_NUM_LEN_TO_POS_STATES - 1) << LZMA_NUM_POS_SLOT_BITS); rc_bit_tree_decode(rc, prob, LZMA_NUM_POS_SLOT_BITS, &pos_slot); if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { int i, mi; num_bits = (pos_slot >> 1) - 1; cst->rep0 = 2 | (pos_slot & 1); if (pos_slot < LZMA_END_POS_MODEL_INDEX) { cst->rep0 <<= num_bits; prob = p + LZMA_SPEC_POS + cst->rep0 - pos_slot - 1; } else { num_bits -= LZMA_NUM_ALIGN_BITS; while (num_bits--) cst->rep0 = (cst->rep0 << 1) | rc_direct_bit(rc); prob = p + LZMA_ALIGN; cst->rep0 <<= LZMA_NUM_ALIGN_BITS; num_bits = LZMA_NUM_ALIGN_BITS; } i = 1; mi = 1; while (num_bits--) { if (rc_get_bit(rc, prob + mi, &mi)) cst->rep0 |= i; i <<= 1; } } else cst->rep0 = pos_slot; if (++(cst->rep0) == 0) return 0; if (cst->rep0 > wr->header->dict_size || cst->rep0 > get_pos(wr)) return -1; } len += LZMA_MATCH_MIN_LEN; return copy_bytes(wr, cst->rep0, len); } STATIC inline int INIT unlzma(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), unsigned char *output, long *posp, void(*error)(char *x) ) { struct lzma_header header; int lc, pb, lp; uint32_t pos_state_mask; uint32_t literal_pos_mask; uint16_t *p; int num_probs; struct rc rc; int i, mi; struct writer wr; struct cstate cst; unsigned char *inbuf; int ret = -1; rc.error = error; if (buf) inbuf = buf; else inbuf = malloc(LZMA_IOBUF_SIZE); if (!inbuf) { error("Could not allocate input buffer"); goto exit_0; } cst.state = 0; cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; wr.header = &header; wr.flush = flush; wr.global_pos = 0; wr.previous_byte = 0; wr.buffer_pos = 0; rc_init(&rc, fill, inbuf, in_len); for (i = 0; i < sizeof(header); i++) { if (rc.ptr >= rc.buffer_end) rc_read(&rc); ((unsigned char *)&header)[i] = *rc.ptr++; } if (header.pos >= (9 * 5 * 5)) { error("bad header"); goto exit_1; } mi = 0; lc = header.pos; while (lc >= 9) { mi++; lc -= 9; } pb = 0; lp = mi; while (lp >= 5) { pb++; lp -= 5; } pos_state_mask = (1 << pb) - 1; literal_pos_mask = (1 << lp) - 1; ENDIAN_CONVERT(header.dict_size); ENDIAN_CONVERT(header.dst_size); if (header.dict_size == 0) header.dict_size = 1; if (output) wr.buffer = output; else { wr.bufsize = MIN(header.dst_size, header.dict_size); wr.buffer = large_malloc(wr.bufsize); } if (wr.buffer == NULL) goto exit_1; num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); if (p == NULL) goto exit_2; num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); for (i = 0; i < num_probs; i++) p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; rc_init_code(&rc); while (get_pos(&wr) < header.dst_size) { int pos_state = get_pos(&wr) & pos_state_mask; uint16_t *prob = p + LZMA_IS_MATCH + (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; if (rc_is_bit_0(&rc, prob)) { if (process_bit0(&wr, &rc, &cst, p, pos_state, prob, lc, literal_pos_mask)) { error("LZMA data is corrupt"); goto exit_3; } } else { if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) { error("LZMA data is corrupt"); goto exit_3; } if (cst.rep0 == 0) break; } if (rc.buffer_size <= 0) goto exit_3; } if (posp) *posp = rc.ptr-rc.buffer; if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos) ret = 0; exit_3: large_free(p); exit_2: if (!output) large_free(wr.buffer); exit_1: if (!buf) free(inbuf); exit_0: return ret; } #ifdef PREBOOT STATIC int INIT __decompress(unsigned char *buf, long in_len, long (*fill)(void*, unsigned long), long (*flush)(void*, unsigned long), unsigned char *output, long out_len, long *posp, void (*error)(char *x)) { return unlzma(buf, in_len - 4, fill, flush, output, posp, error); } #endif
linux-master
lib/decompress_unlzma.c
// SPDX-License-Identifier: GPL-2.0 /* * kernel userspace event delivery * * Copyright (C) 2004 Red Hat, Inc. All rights reserved. * Copyright (C) 2004 Novell, Inc. All rights reserved. * Copyright (C) 2004 IBM, Inc. All rights reserved. * * Authors: * Robert Love <[email protected]> * Kay Sievers <[email protected]> * Arjan van de Ven <[email protected]> * Greg Kroah-Hartman <[email protected]> */ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/export.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/uidgid.h> #include <linux/uuid.h> #include <linux/ctype.h> #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> u64 uevent_seqnum; #ifdef CONFIG_UEVENT_HELPER char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; #endif struct uevent_sock { struct list_head list; struct sock *sk; }; #ifdef CONFIG_NET static LIST_HEAD(uevent_sock_list); #endif /* This lock protects uevent_seqnum and uevent_sock_list */ static DEFINE_MUTEX(uevent_sock_mutex); /* the strings here must match the enum in include/linux/kobject.h */ static const char *kobject_actions[] = { [KOBJ_ADD] = "add", [KOBJ_REMOVE] = "remove", [KOBJ_CHANGE] = "change", [KOBJ_MOVE] = "move", [KOBJ_ONLINE] = "online", [KOBJ_OFFLINE] = "offline", [KOBJ_BIND] = "bind", [KOBJ_UNBIND] = "unbind", }; static int kobject_action_type(const char *buf, size_t count, enum kobject_action *type, const char **args) { enum kobject_action action; size_t count_first; const char *args_start; int ret = -EINVAL; if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) count--; if (!count) goto out; args_start = strnchr(buf, count, ' '); if (args_start) { count_first = args_start - buf; args_start = args_start + 1; } else count_first = count; for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { if (strncmp(kobject_actions[action], buf, count_first) != 0) continue; if (kobject_actions[action][count_first] != '\0') continue; if (args) *args = args_start; *type = action; ret = 0; break; } out: return ret; } static const char *action_arg_word_end(const char *buf, const char *buf_end, char delim) { const char *next = buf; while (next <= buf_end && *next != delim) if (!isalnum(*next++)) return NULL; if (next == buf) return NULL; return next; } static int kobject_action_args(const char *buf, size_t count, struct kobj_uevent_env **ret_env) { struct kobj_uevent_env *env = NULL; const char *next, *buf_end, *key; int key_len; int r = -EINVAL; if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0')) count--; if (!count) return -EINVAL; env = kzalloc(sizeof(*env), GFP_KERNEL); if (!env) return -ENOMEM; /* first arg is UUID */ if (count < UUID_STRING_LEN || !uuid_is_valid(buf) || add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf)) goto out; /* * the rest are custom environment variables in KEY=VALUE * format with ' ' delimiter between each KEY=VALUE pair */ next = buf + UUID_STRING_LEN; buf_end = buf + count - 1; while (next <= buf_end) { if (*next != ' ') goto out; /* skip the ' ', key must follow */ key = ++next; if (key > buf_end) goto out; buf = next; next = action_arg_word_end(buf, buf_end, '='); if (!next || next > buf_end || *next != '=') goto out; key_len = next - buf; /* skip the '=', value must follow */ if (++next > buf_end) goto out; buf = next; next = action_arg_word_end(buf, buf_end, ' '); if (!next) goto out; if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s", key_len, key, (int) (next - buf), buf)) goto out; } r = 0; out: if (r) kfree(env); else *ret_env = env; return r; } /** * kobject_synth_uevent - send synthetic uevent with arguments * * @kobj: struct kobject for which synthetic uevent is to be generated * @buf: buffer containing action type and action args, newline is ignored * @count: length of buffer * * Returns 0 if kobject_synthetic_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) { char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL }; enum kobject_action action; const char *action_args; struct kobj_uevent_env *env; const char *msg = NULL, *devpath; int r; r = kobject_action_type(buf, count, &action, &action_args); if (r) { msg = "unknown uevent action string"; goto out; } if (!action_args) { r = kobject_uevent_env(kobj, action, no_uuid_envp); goto out; } r = kobject_action_args(action_args, count - (action_args - buf), &env); if (r == -EINVAL) { msg = "incorrect uevent action arguments"; goto out; } if (r) goto out; r = kobject_uevent_env(kobj, action, env->envp); kfree(env); out: if (r) { devpath = kobject_get_path(kobj, GFP_KERNEL); pr_warn("synth uevent: %s: %s\n", devpath ?: "unknown device", msg ?: "failed to send uevent"); kfree(devpath); } return r; } #ifdef CONFIG_UEVENT_HELPER static int kobj_usermode_filter(struct kobject *kobj) { const struct kobj_ns_type_operations *ops; ops = kobj_ns_ops(kobj); if (ops) { const void *init_ns, *ns; ns = kobj->ktype->namespace(kobj); init_ns = ops->initial_ns(); return ns != init_ns; } return 0; } static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) { int buffer_size = sizeof(env->buf) - env->buflen; int len; len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); if (len >= buffer_size) { pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", buffer_size, len); return -ENOMEM; } env->argv[0] = uevent_helper; env->argv[1] = &env->buf[env->buflen]; env->argv[2] = NULL; env->buflen += len + 1; return 0; } static void cleanup_uevent_env(struct subprocess_info *info) { kfree(info->data); } #endif #ifdef CONFIG_NET static struct sk_buff *alloc_uevent_skb(struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct netlink_skb_parms *parms; struct sk_buff *skb = NULL; char *scratch; size_t len; /* allocate message with maximum possible size */ len = strlen(action_string) + strlen(devpath) + 2; skb = alloc_skb(len + env->buflen, GFP_KERNEL); if (!skb) return NULL; /* add header */ scratch = skb_put(skb, len); sprintf(scratch, "%s@%s", action_string, devpath); skb_put_data(skb, env->buf, env->buflen); parms = &NETLINK_CB(skb); parms->creds.uid = GLOBAL_ROOT_UID; parms->creds.gid = GLOBAL_ROOT_GID; parms->dst_group = 1; parms->portid = 0; return skb; } static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct sk_buff *skb = NULL; struct uevent_sock *ue_sk; int retval = 0; /* send netlink message */ list_for_each_entry(ue_sk, &uevent_sock_list, list) { struct sock *uevent_sock = ue_sk->sk; if (!netlink_has_listeners(uevent_sock, 1)) continue; if (!skb) { retval = -ENOMEM; skb = alloc_uevent_skb(env, action_string, devpath); if (!skb) continue; } retval = netlink_broadcast(uevent_sock, skb_get(skb), 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (retval == -ENOBUFS || retval == -ESRCH) retval = 0; } consume_skb(skb); return retval; } static int uevent_net_broadcast_tagged(struct sock *usk, struct kobj_uevent_env *env, const char *action_string, const char *devpath) { struct user_namespace *owning_user_ns = sock_net(usk)->user_ns; struct sk_buff *skb = NULL; int ret = 0; skb = alloc_uevent_skb(env, action_string, devpath); if (!skb) return -ENOMEM; /* fix credentials */ if (owning_user_ns != &init_user_ns) { struct netlink_skb_parms *parms = &NETLINK_CB(skb); kuid_t root_uid; kgid_t root_gid; /* fix uid */ root_uid = make_kuid(owning_user_ns, 0); if (uid_valid(root_uid)) parms->creds.uid = root_uid; /* fix gid */ root_gid = make_kgid(owning_user_ns, 0); if (gid_valid(root_gid)) parms->creds.gid = root_gid; } ret = netlink_broadcast(usk, skb, 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (ret == -ENOBUFS || ret == -ESRCH) ret = 0; return ret; } #endif static int kobject_uevent_net_broadcast(struct kobject *kobj, struct kobj_uevent_env *env, const char *action_string, const char *devpath) { int ret = 0; #ifdef CONFIG_NET const struct kobj_ns_type_operations *ops; const struct net *net = NULL; ops = kobj_ns_ops(kobj); if (!ops && kobj->kset) { struct kobject *ksobj = &kobj->kset->kobj; if (ksobj->parent != NULL) ops = kobj_ns_ops(ksobj->parent); } /* kobjects currently only carry network namespace tags and they * are the only tag relevant here since we want to decide which * network namespaces to broadcast the uevent into. */ if (ops && ops->netlink_ns && kobj->ktype->namespace) if (ops->type == KOBJ_NS_TYPE_NET) net = kobj->ktype->namespace(kobj); if (!net) ret = uevent_net_broadcast_untagged(env, action_string, devpath); else ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env, action_string, devpath); #endif return ret; } static void zap_modalias_env(struct kobj_uevent_env *env) { static const char modalias_prefix[] = "MODALIAS="; size_t len; int i, j; for (i = 0; i < env->envp_idx;) { if (strncmp(env->envp[i], modalias_prefix, sizeof(modalias_prefix) - 1)) { i++; continue; } len = strlen(env->envp[i]) + 1; if (i != env->envp_idx - 1) { memmove(env->envp[i], env->envp[i + 1], env->buflen - len); for (j = i; j < env->envp_idx - 1; j++) env->envp[j] = env->envp[j + 1] - len; } env->envp_idx--; env->buflen -= len; } } /** * kobject_uevent_env - send an uevent with environmental data * * @kobj: struct kobject that the action is happening to * @action: action that is happening * @envp_ext: pointer to environmental data * * Returns 0 if kobject_uevent_env() is completed with success or the * corresponding error when it fails. */ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, char *envp_ext[]) { struct kobj_uevent_env *env; const char *action_string = kobject_actions[action]; const char *devpath = NULL; const char *subsystem; struct kobject *top_kobj; struct kset *kset; const struct kset_uevent_ops *uevent_ops; int i = 0; int retval = 0; /* * Mark "remove" event done regardless of result, for some subsystems * do not want to re-trigger "remove" event via automatic cleanup. */ if (action == KOBJ_REMOVE) kobj->state_remove_uevent_sent = 1; pr_debug("kobject: '%s' (%p): %s\n", kobject_name(kobj), kobj, __func__); /* search the kset we belong to */ top_kobj = kobj; while (!top_kobj->kset && top_kobj->parent) top_kobj = top_kobj->parent; if (!top_kobj->kset) { pr_debug("kobject: '%s' (%p): %s: attempted to send uevent " "without kset!\n", kobject_name(kobj), kobj, __func__); return -EINVAL; } kset = top_kobj->kset; uevent_ops = kset->uevent_ops; /* skip the event, if uevent_suppress is set*/ if (kobj->uevent_suppress) { pr_debug("kobject: '%s' (%p): %s: uevent_suppress " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) if (!uevent_ops->filter(kobj)) { pr_debug("kobject: '%s' (%p): %s: filter function " "caused the event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* originating subsystem */ if (uevent_ops && uevent_ops->name) subsystem = uevent_ops->name(kobj); else subsystem = kobject_name(&kset->kobj); if (!subsystem) { pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the " "event to drop!\n", kobject_name(kobj), kobj, __func__); return 0; } /* environment buffer */ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); if (!env) return -ENOMEM; /* complete object path */ devpath = kobject_get_path(kobj, GFP_KERNEL); if (!devpath) { retval = -ENOENT; goto exit; } /* default keys */ retval = add_uevent_var(env, "ACTION=%s", action_string); if (retval) goto exit; retval = add_uevent_var(env, "DEVPATH=%s", devpath); if (retval) goto exit; retval = add_uevent_var(env, "SUBSYSTEM=%s", subsystem); if (retval) goto exit; /* keys passed in from the caller */ if (envp_ext) { for (i = 0; envp_ext[i]; i++) { retval = add_uevent_var(env, "%s", envp_ext[i]); if (retval) goto exit; } } /* let the kset specific function add its stuff */ if (uevent_ops && uevent_ops->uevent) { retval = uevent_ops->uevent(kobj, env); if (retval) { pr_debug("kobject: '%s' (%p): %s: uevent() returned " "%d\n", kobject_name(kobj), kobj, __func__, retval); goto exit; } } switch (action) { case KOBJ_ADD: /* * Mark "add" event so we can make sure we deliver "remove" * event to userspace during automatic cleanup. If * the object did send an "add" event, "remove" will * automatically generated by the core, if not already done * by the caller. */ kobj->state_add_uevent_sent = 1; break; case KOBJ_UNBIND: zap_modalias_env(env); break; default: break; } mutex_lock(&uevent_sock_mutex); /* we will send an event, so request a new sequence number */ retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum); if (retval) { mutex_unlock(&uevent_sock_mutex); goto exit; } retval = kobject_uevent_net_broadcast(kobj, env, action_string, devpath); mutex_unlock(&uevent_sock_mutex); #ifdef CONFIG_UEVENT_HELPER /* call uevent_helper, usually only enabled during early boot */ if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { struct subprocess_info *info; retval = add_uevent_var(env, "HOME=/"); if (retval) goto exit; retval = add_uevent_var(env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin"); if (retval) goto exit; retval = init_uevent_argv(env, subsystem); if (retval) goto exit; retval = -ENOMEM; info = call_usermodehelper_setup(env->argv[0], env->argv, env->envp, GFP_KERNEL, NULL, cleanup_uevent_env, env); if (info) { retval = call_usermodehelper_exec(info, UMH_NO_WAIT); env = NULL; /* freed by cleanup_uevent_env */ } } #endif exit: kfree(devpath); kfree(env); return retval; } EXPORT_SYMBOL_GPL(kobject_uevent_env); /** * kobject_uevent - notify userspace by sending an uevent * * @kobj: struct kobject that the action is happening to * @action: action that is happening * * Returns 0 if kobject_uevent() is completed with success or the * corresponding error when it fails. */ int kobject_uevent(struct kobject *kobj, enum kobject_action action) { return kobject_uevent_env(kobj, action, NULL); } EXPORT_SYMBOL_GPL(kobject_uevent); /** * add_uevent_var - add key value string to the environment buffer * @env: environment buffer structure * @format: printf format for the key=value pair * * Returns 0 if environment variable was added successfully or -ENOMEM * if no space was available. */ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) { va_list args; int len; if (env->envp_idx >= ARRAY_SIZE(env->envp)) { WARN(1, KERN_ERR "add_uevent_var: too many keys\n"); return -ENOMEM; } va_start(args, format); len = vsnprintf(&env->buf[env->buflen], sizeof(env->buf) - env->buflen, format, args); va_end(args); if (len >= (sizeof(env->buf) - env->buflen)) { WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n"); return -ENOMEM; } env->envp[env->envp_idx++] = &env->buf[env->buflen]; env->buflen += len + 1; return 0; } EXPORT_SYMBOL_GPL(add_uevent_var); #if defined(CONFIG_NET) static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb, struct netlink_ext_ack *extack) { /* u64 to chars: 2^64 - 1 = 21 chars */ char buf[sizeof("SEQNUM=") + 21]; struct sk_buff *skbc; int ret; /* bump and prepare sequence number */ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum); if (ret < 0 || (size_t)ret >= sizeof(buf)) return -ENOMEM; ret++; /* verify message does not overflow */ if ((skb->len + ret) > UEVENT_BUFFER_SIZE) { NL_SET_ERR_MSG(extack, "uevent message too big"); return -EINVAL; } /* copy skb and extend to accommodate sequence number */ skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL); if (!skbc) return -ENOMEM; /* append sequence number */ skb_put_data(skbc, buf, ret); /* remove msg header */ skb_pull(skbc, NLMSG_HDRLEN); /* set portid 0 to inform userspace message comes from kernel */ NETLINK_CB(skbc).portid = 0; NETLINK_CB(skbc).dst_group = 1; ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL); /* ENOBUFS should be handled in userspace */ if (ret == -ENOBUFS || ret == -ESRCH) ret = 0; return ret; } static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net; int ret; if (!nlmsg_data(nlh)) return -EINVAL; /* * Verify that we are allowed to send messages to the target * network namespace. The caller must have CAP_SYS_ADMIN in the * owning user namespace of the target network namespace. */ net = sock_net(NETLINK_CB(skb).sk); if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) { NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability"); return -EPERM; } mutex_lock(&uevent_sock_mutex); ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack); mutex_unlock(&uevent_sock_mutex); return ret; } static void uevent_net_rcv(struct sk_buff *skb) { netlink_rcv_skb(skb, &uevent_net_rcv_skb); } static int uevent_net_init(struct net *net) { struct uevent_sock *ue_sk; struct netlink_kernel_cfg cfg = { .groups = 1, .input = uevent_net_rcv, .flags = NL_CFG_F_NONROOT_RECV }; ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); if (!ue_sk) return -ENOMEM; ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg); if (!ue_sk->sk) { pr_err("kobject_uevent: unable to create netlink socket!\n"); kfree(ue_sk); return -ENODEV; } net->uevent_sock = ue_sk; /* Restrict uevents to initial user namespace. */ if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { mutex_lock(&uevent_sock_mutex); list_add_tail(&ue_sk->list, &uevent_sock_list); mutex_unlock(&uevent_sock_mutex); } return 0; } static void uevent_net_exit(struct net *net) { struct uevent_sock *ue_sk = net->uevent_sock; if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) { mutex_lock(&uevent_sock_mutex); list_del(&ue_sk->list); mutex_unlock(&uevent_sock_mutex); } netlink_kernel_release(ue_sk->sk); kfree(ue_sk); } static struct pernet_operations uevent_net_ops = { .init = uevent_net_init, .exit = uevent_net_exit, }; static int __init kobject_uevent_init(void) { return register_pernet_subsys(&uevent_net_ops); } postcore_initcall(kobject_uevent_init); #endif
linux-master
lib/kobject_uevent.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Decoder for ASN.1 BER/DER/CER encoded bytestream * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/asn1_decoder.h> #include <linux/asn1_ber_bytecode.h> static const unsigned char asn1_op_lengths[ASN1_OP__NR] = { /* OPC TAG JMP ACT */ [ASN1_OP_MATCH] = 1 + 1, [ASN1_OP_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_MATCH_ACT] = 1 + 1 + 1, [ASN1_OP_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP] = 1 + 1 + 1, [ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_MATCH_ANY] = 1, [ASN1_OP_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1, [ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1, [ASN1_OP_COND_MATCH_ANY] = 1, [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1, [ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1, [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1, [ASN1_OP_COND_FAIL] = 1, [ASN1_OP_COMPLETE] = 1, [ASN1_OP_ACT] = 1 + 1, [ASN1_OP_MAYBE_ACT] = 1 + 1, [ASN1_OP_RETURN] = 1, [ASN1_OP_END_SEQ] = 1, [ASN1_OP_END_SEQ_OF] = 1 + 1, [ASN1_OP_END_SET] = 1, [ASN1_OP_END_SET_OF] = 1 + 1, [ASN1_OP_END_SEQ_ACT] = 1 + 1, [ASN1_OP_END_SEQ_OF_ACT] = 1 + 1 + 1, [ASN1_OP_END_SET_ACT] = 1 + 1, [ASN1_OP_END_SET_OF_ACT] = 1 + 1 + 1, }; /* * Find the length of an indefinite length object * @data: The data buffer * @datalen: The end of the innermost containing element in the buffer * @_dp: The data parse cursor (updated before returning) * @_len: Where to return the size of the element. * @_errmsg: Where to return a pointer to an error message on error */ static int asn1_find_indefinite_length(const unsigned char *data, size_t datalen, size_t *_dp, size_t *_len, const char **_errmsg) { unsigned char tag, tmp; size_t dp = *_dp, len, n; int indef_level = 1; next_tag: if (unlikely(datalen - dp < 2)) { if (datalen == dp) goto missing_eoc; goto data_overrun_error; } /* Extract a tag from the data */ tag = data[dp++]; if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; if (--indef_level <= 0) { *_len = dp - *_dp; *_dp = dp; return 0; } goto next_tag; } if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) { do { if (unlikely(datalen - dp < 2)) goto data_overrun_error; tmp = data[dp++]; } while (tmp & 0x80); } /* Extract the length */ len = data[dp++]; if (len <= 0x7f) goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely((tag & ASN1_CONS_BIT) == ASN1_PRIM << 5)) goto indefinite_len_primitive; indef_level++; goto next_tag; } n = len - 0x80; if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; len = 0; for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } check_length: if (len > datalen - dp) goto data_overrun_error; dp += len; goto next_tag; length_too_long: *_errmsg = "Unsupported length"; goto error; indefinite_len_primitive: *_errmsg = "Indefinite len primitive not permitted"; goto error; invalid_eoc: *_errmsg = "Invalid length EOC"; goto error; data_overrun_error: *_errmsg = "Data overrun error"; goto error; missing_eoc: *_errmsg = "Missing EOC in indefinite len cons"; error: *_dp = dp; return -1; } /** * asn1_ber_decoder - Decoder BER/DER/CER ASN.1 according to pattern * @decoder: The decoder definition (produced by asn1_compiler) * @context: The caller's context (to be passed to the action functions) * @data: The encoded data * @datalen: The size of the encoded data * * Decode BER/DER/CER encoded ASN.1 data according to a bytecode pattern * produced by asn1_compiler. Action functions are called on marked tags to * allow the caller to retrieve significant data. * * LIMITATIONS: * * To keep down the amount of stack used by this function, the following limits * have been imposed: * * (1) This won't handle datalen > 65535 without increasing the size of the * cons stack elements and length_too_long checking. * * (2) The stack of constructed types is 10 deep. If the depth of non-leaf * constructed types exceeds this, the decode will fail. * * (3) The SET type (not the SET OF type) isn't really supported as tracking * what members of the set have been seen is a pain. */ int asn1_ber_decoder(const struct asn1_decoder *decoder, void *context, const unsigned char *data, size_t datalen) { const unsigned char *machine = decoder->machine; const asn1_action_t *actions = decoder->actions; size_t machlen = decoder->machlen; enum asn1_opcode op; unsigned char tag = 0, csp = 0, jsp = 0, optag = 0, hdr = 0; const char *errmsg; size_t pc = 0, dp = 0, tdp = 0, len = 0; int ret; unsigned char flags = 0; #define FLAG_INDEFINITE_LENGTH 0x01 #define FLAG_MATCHED 0x02 #define FLAG_LAST_MATCHED 0x04 /* Last tag matched */ #define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag * - ie. whether or not we are going to parse * a compound type. */ #define NR_CONS_STACK 10 unsigned short cons_dp_stack[NR_CONS_STACK]; unsigned short cons_datalen_stack[NR_CONS_STACK]; unsigned char cons_hdrlen_stack[NR_CONS_STACK]; #define NR_JUMP_STACK 10 unsigned char jump_stack[NR_JUMP_STACK]; if (datalen > 65535) return -EMSGSIZE; next_op: pr_debug("next_op: pc=\e[32m%zu\e[m/%zu dp=\e[33m%zu\e[m/%zu C=%d J=%d\n", pc, machlen, dp, datalen, csp, jsp); if (unlikely(pc >= machlen)) goto machine_overrun_error; op = machine[pc]; if (unlikely(pc + asn1_op_lengths[op] > machlen)) goto machine_overrun_error; /* If this command is meant to match a tag, then do that before * evaluating the command. */ if (op <= ASN1_OP__MATCHES_TAG) { unsigned char tmp; /* Skip conditional matches if possible */ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { flags &= ~FLAG_LAST_MATCHED; pc += asn1_op_lengths[op]; goto next_op; } flags = 0; hdr = 2; /* Extract a tag from the data */ if (unlikely(datalen - dp < 2)) goto data_overrun_error; tag = data[dp++]; if (unlikely((tag & 0x1f) == ASN1_LONG_TAG)) goto long_tag_not_supported; if (op & ASN1_OP_MATCH__ANY) { pr_debug("- any %02x\n", tag); } else { /* Extract the tag from the machine * - Either CONS or PRIM are permitted in the data if * CONS is not set in the op stream, otherwise CONS * is mandatory. */ optag = machine[pc + 1]; flags |= optag & FLAG_CONS; /* Determine whether the tag matched */ tmp = optag ^ tag; tmp &= ~(optag & ASN1_CONS_BIT); pr_debug("- match? %02x %02x %02x\n", tag, optag, tmp); if (tmp != 0) { /* All odd-numbered tags are MATCH_OR_SKIP. */ if (op & ASN1_OP_MATCH__SKIP) { pc += asn1_op_lengths[op]; dp--; goto next_op; } goto tag_mismatch; } } flags |= FLAG_MATCHED; len = data[dp++]; if (len > 0x7f) { if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ if (unlikely(!(tag & ASN1_CONS_BIT))) goto indefinite_len_primitive; flags |= FLAG_INDEFINITE_LENGTH; if (unlikely(2 > datalen - dp)) goto data_overrun_error; } else { int n = len - 0x80; if (unlikely(n > 2)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; hdr += n; for (len = 0; n > 0; n--) { len <<= 8; len |= data[dp++]; } if (unlikely(len > datalen - dp)) goto data_overrun_error; } } else { if (unlikely(len > datalen - dp)) goto data_overrun_error; } if (flags & FLAG_CONS) { /* For expected compound forms, we stack the positions * of the start and end of the data. */ if (unlikely(csp >= NR_CONS_STACK)) goto cons_stack_overflow; cons_dp_stack[csp] = dp; cons_hdrlen_stack[csp] = hdr; if (!(flags & FLAG_INDEFINITE_LENGTH)) { cons_datalen_stack[csp] = datalen; datalen = dp + len; } else { cons_datalen_stack[csp] = 0; } csp++; } pr_debug("- TAG: %02x %zu%s\n", tag, len, flags & FLAG_CONS ? " CONS" : ""); tdp = dp; } /* Decide how to handle the operation */ switch (op) { case ASN1_OP_MATCH: case ASN1_OP_MATCH_OR_SKIP: case ASN1_OP_MATCH_ACT: case ASN1_OP_MATCH_ACT_OR_SKIP: case ASN1_OP_MATCH_ANY: case ASN1_OP_MATCH_ANY_OR_SKIP: case ASN1_OP_MATCH_ANY_ACT: case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_OR_SKIP: case ASN1_OP_COND_MATCH_ACT_OR_SKIP: case ASN1_OP_COND_MATCH_ANY: case ASN1_OP_COND_MATCH_ANY_OR_SKIP: case ASN1_OP_COND_MATCH_ANY_ACT: case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: if (!(flags & FLAG_CONS)) { if (flags & FLAG_INDEFINITE_LENGTH) { size_t tmp = dp; ret = asn1_find_indefinite_length( data, datalen, &tmp, &len, &errmsg); if (ret < 0) goto error; } pr_debug("- LEAF: %zu\n", len); } if (op & ASN1_OP_MATCH__ACT) { unsigned char act; if (op & ASN1_OP_MATCH__ANY) act = machine[pc + 1]; else act = machine[pc + 2]; ret = actions[act](context, hdr, tag, data + dp, len); if (ret < 0) return ret; } if (!(flags & FLAG_CONS)) dp += len; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MATCH_JUMP: case ASN1_OP_MATCH_JUMP_OR_SKIP: case ASN1_OP_COND_MATCH_JUMP_OR_SKIP: pr_debug("- MATCH_JUMP\n"); if (unlikely(jsp == NR_JUMP_STACK)) goto jump_stack_overflow; jump_stack[jsp++] = pc + asn1_op_lengths[op]; pc = machine[pc + 2]; goto next_op; case ASN1_OP_COND_FAIL: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_COMPLETE: if (unlikely(jsp != 0 || csp != 0)) { pr_err("ASN.1 decoder error: Stacks not empty at completion (%u, %u)\n", jsp, csp); return -EBADMSG; } return 0; case ASN1_OP_END_SET: case ASN1_OP_END_SET_ACT: if (unlikely(!(flags & FLAG_MATCHED))) goto tag_mismatch; fallthrough; case ASN1_OP_END_SEQ: case ASN1_OP_END_SET_OF: case ASN1_OP_END_SEQ_OF: case ASN1_OP_END_SEQ_ACT: case ASN1_OP_END_SET_OF_ACT: case ASN1_OP_END_SEQ_OF_ACT: if (unlikely(csp <= 0)) goto cons_stack_underflow; csp--; tdp = cons_dp_stack[csp]; hdr = cons_hdrlen_stack[csp]; len = datalen; datalen = cons_datalen_stack[csp]; pr_debug("- end cons t=%zu dp=%zu l=%zu/%zu\n", tdp, dp, len, datalen); if (datalen == 0) { /* Indefinite length - check for the EOC. */ datalen = len; if (unlikely(datalen - dp < 2)) goto data_overrun_error; if (data[dp++] != 0) { if (op & ASN1_OP_END__OF) { dp--; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } goto missing_eoc; } if (data[dp++] != 0) goto invalid_eoc; len = dp - tdp - 2; } else { if (dp < len && (op & ASN1_OP_END__OF)) { datalen = len; csp++; pc = machine[pc + 1]; pr_debug("- continue\n"); goto next_op; } if (dp != len) goto cons_length_error; len -= tdp; pr_debug("- cons len l=%zu d=%zu\n", len, dp - tdp); } if (op & ASN1_OP_END__ACT) { unsigned char act; if (op & ASN1_OP_END__OF) act = machine[pc + 2]; else act = machine[pc + 1]; ret = actions[act](context, hdr, 0, data + tdp, len); if (ret < 0) return ret; } pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_MAYBE_ACT: if (!(flags & FLAG_LAST_MATCHED)) { pc += asn1_op_lengths[op]; goto next_op; } fallthrough; case ASN1_OP_ACT: ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); if (ret < 0) return ret; pc += asn1_op_lengths[op]; goto next_op; case ASN1_OP_RETURN: if (unlikely(jsp <= 0)) goto jump_stack_underflow; pc = jump_stack[--jsp]; flags |= FLAG_MATCHED | FLAG_LAST_MATCHED; goto next_op; default: break; } /* Shouldn't reach here */ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n", op, pc); return -EBADMSG; data_overrun_error: errmsg = "Data overrun error"; goto error; machine_overrun_error: errmsg = "Machine overrun error"; goto error; jump_stack_underflow: errmsg = "Jump stack underflow"; goto error; jump_stack_overflow: errmsg = "Jump stack overflow"; goto error; cons_stack_underflow: errmsg = "Cons stack underflow"; goto error; cons_stack_overflow: errmsg = "Cons stack overflow"; goto error; cons_length_error: errmsg = "Cons length error"; goto error; missing_eoc: errmsg = "Missing EOC in indefinite len cons"; goto error; invalid_eoc: errmsg = "Invalid length EOC"; goto error; length_too_long: errmsg = "Unsupported length"; goto error; indefinite_len_primitive: errmsg = "Indefinite len primitive not permitted"; goto error; tag_mismatch: errmsg = "Unexpected tag"; goto error; long_tag_not_supported: errmsg = "Long tag not supported"; error: pr_debug("\nASN1: %s [m=%zu d=%zu ot=%02x t=%02x l=%zu]\n", errmsg, pc, dp, optag, tag, len); return -EBADMSG; } EXPORT_SYMBOL_GPL(asn1_ber_decoder); MODULE_LICENSE("GPL");
linux-master
lib/asn1_decoder.c
/* * xxHash - Extremely Fast Hash algorithm * Copyright (C) 2012-2016, Yann Collet. * * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. This program is dual-licensed; you may select * either version 2 of the GNU General Public License ("GPL") or BSD license * ("BSD"). * * You can contact the author at: * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ #include <asm/unaligned.h> #include <linux/errno.h> #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/xxhash.h> /*-************************************* * Macros **************************************/ #define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r))) #define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r))) #ifdef __LITTLE_ENDIAN # define XXH_CPU_LITTLE_ENDIAN 1 #else # define XXH_CPU_LITTLE_ENDIAN 0 #endif /*-************************************* * Constants **************************************/ static const uint32_t PRIME32_1 = 2654435761U; static const uint32_t PRIME32_2 = 2246822519U; static const uint32_t PRIME32_3 = 3266489917U; static const uint32_t PRIME32_4 = 668265263U; static const uint32_t PRIME32_5 = 374761393U; static const uint64_t PRIME64_1 = 11400714785074694791ULL; static const uint64_t PRIME64_2 = 14029467366897019727ULL; static const uint64_t PRIME64_3 = 1609587929392839161ULL; static const uint64_t PRIME64_4 = 9650029242287828579ULL; static const uint64_t PRIME64_5 = 2870177450012600261ULL; /*-************************** * Utils ***************************/ void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) { memcpy(dst, src, sizeof(*dst)); } EXPORT_SYMBOL(xxh32_copy_state); void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) { memcpy(dst, src, sizeof(*dst)); } EXPORT_SYMBOL(xxh64_copy_state); /*-*************************** * Simple Hash Functions ****************************/ static uint32_t xxh32_round(uint32_t seed, const uint32_t input) { seed += input * PRIME32_2; seed = xxh_rotl32(seed, 13); seed *= PRIME32_1; return seed; } uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) { const uint8_t *p = (const uint8_t *)input; const uint8_t *b_end = p + len; uint32_t h32; if (len >= 16) { const uint8_t *const limit = b_end - 16; uint32_t v1 = seed + PRIME32_1 + PRIME32_2; uint32_t v2 = seed + PRIME32_2; uint32_t v3 = seed + 0; uint32_t v4 = seed - PRIME32_1; do { v1 = xxh32_round(v1, get_unaligned_le32(p)); p += 4; v2 = xxh32_round(v2, get_unaligned_le32(p)); p += 4; v3 = xxh32_round(v3, get_unaligned_le32(p)); p += 4; v4 = xxh32_round(v4, get_unaligned_le32(p)); p += 4; } while (p <= limit); h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) + xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); } else { h32 = seed + PRIME32_5; } h32 += (uint32_t)len; while (p + 4 <= b_end) { h32 += get_unaligned_le32(p) * PRIME32_3; h32 = xxh_rotl32(h32, 17) * PRIME32_4; p += 4; } while (p < b_end) { h32 += (*p) * PRIME32_5; h32 = xxh_rotl32(h32, 11) * PRIME32_1; p++; } h32 ^= h32 >> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } EXPORT_SYMBOL(xxh32); static uint64_t xxh64_round(uint64_t acc, const uint64_t input) { acc += input * PRIME64_2; acc = xxh_rotl64(acc, 31); acc *= PRIME64_1; return acc; } static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) { val = xxh64_round(0, val); acc ^= val; acc = acc * PRIME64_1 + PRIME64_4; return acc; } uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; uint64_t h64; if (len >= 32) { const uint8_t *const limit = b_end - 32; uint64_t v1 = seed + PRIME64_1 + PRIME64_2; uint64_t v2 = seed + PRIME64_2; uint64_t v3 = seed + 0; uint64_t v4 = seed - PRIME64_1; do { v1 = xxh64_round(v1, get_unaligned_le64(p)); p += 8; v2 = xxh64_round(v2, get_unaligned_le64(p)); p += 8; v3 = xxh64_round(v3, get_unaligned_le64(p)); p += 8; v4 = xxh64_round(v4, get_unaligned_le64(p)); p += 8; } while (p <= limit); h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); h64 = xxh64_merge_round(h64, v1); h64 = xxh64_merge_round(h64, v2); h64 = xxh64_merge_round(h64, v3); h64 = xxh64_merge_round(h64, v4); } else { h64 = seed + PRIME64_5; } h64 += (uint64_t)len; while (p + 8 <= b_end) { const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); h64 ^= k1; h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; p += 8; } if (p + 4 <= b_end) { h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p += 4; } while (p < b_end) { h64 ^= (*p) * PRIME64_5; h64 = xxh_rotl64(h64, 11) * PRIME64_1; p++; } h64 ^= h64 >> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } EXPORT_SYMBOL(xxh64); /*-************************************************** * Advanced Hash Functions ***************************************************/ void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) { /* use a local state for memcpy() to avoid strict-aliasing warnings */ struct xxh32_state state; memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME32_1 + PRIME32_2; state.v2 = seed + PRIME32_2; state.v3 = seed + 0; state.v4 = seed - PRIME32_1; memcpy(statePtr, &state, sizeof(state)); } EXPORT_SYMBOL(xxh32_reset); void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) { /* use a local state for memcpy() to avoid strict-aliasing warnings */ struct xxh64_state state; memset(&state, 0, sizeof(state)); state.v1 = seed + PRIME64_1 + PRIME64_2; state.v2 = seed + PRIME64_2; state.v3 = seed + 0; state.v4 = seed - PRIME64_1; memcpy(statePtr, &state, sizeof(state)); } EXPORT_SYMBOL(xxh64_reset); int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; if (input == NULL) return -EINVAL; state->total_len_32 += (uint32_t)len; state->large_len |= (len >= 16) | (state->total_len_32 >= 16); if (state->memsize + len < 16) { /* fill in tmp buffer */ memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); state->memsize += (uint32_t)len; return 0; } if (state->memsize) { /* some data left from previous update */ const uint32_t *p32 = state->mem32; memcpy((uint8_t *)(state->mem32) + state->memsize, input, 16 - state->memsize); state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); p32++; state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); p32++; state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); p32++; state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); p32++; p += 16-state->memsize; state->memsize = 0; } if (p <= b_end - 16) { const uint8_t *const limit = b_end - 16; uint32_t v1 = state->v1; uint32_t v2 = state->v2; uint32_t v3 = state->v3; uint32_t v4 = state->v4; do { v1 = xxh32_round(v1, get_unaligned_le32(p)); p += 4; v2 = xxh32_round(v2, get_unaligned_le32(p)); p += 4; v3 = xxh32_round(v3, get_unaligned_le32(p)); p += 4; v4 = xxh32_round(v4, get_unaligned_le32(p)); p += 4; } while (p <= limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < b_end) { memcpy(state->mem32, p, (size_t)(b_end-p)); state->memsize = (uint32_t)(b_end-p); } return 0; } EXPORT_SYMBOL(xxh32_update); uint32_t xxh32_digest(const struct xxh32_state *state) { const uint8_t *p = (const uint8_t *)state->mem32; const uint8_t *const b_end = (const uint8_t *)(state->mem32) + state->memsize; uint32_t h32; if (state->large_len) { h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); } else { h32 = state->v3 /* == seed */ + PRIME32_5; } h32 += state->total_len_32; while (p + 4 <= b_end) { h32 += get_unaligned_le32(p) * PRIME32_3; h32 = xxh_rotl32(h32, 17) * PRIME32_4; p += 4; } while (p < b_end) { h32 += (*p) * PRIME32_5; h32 = xxh_rotl32(h32, 11) * PRIME32_1; p++; } h32 ^= h32 >> 15; h32 *= PRIME32_2; h32 ^= h32 >> 13; h32 *= PRIME32_3; h32 ^= h32 >> 16; return h32; } EXPORT_SYMBOL(xxh32_digest); int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) { const uint8_t *p = (const uint8_t *)input; const uint8_t *const b_end = p + len; if (input == NULL) return -EINVAL; state->total_len += len; if (state->memsize + len < 32) { /* fill in tmp buffer */ memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); state->memsize += (uint32_t)len; return 0; } if (state->memsize) { /* tmp buffer is full */ uint64_t *p64 = state->mem64; memcpy(((uint8_t *)p64) + state->memsize, input, 32 - state->memsize); state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); p64++; state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); p64++; state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); p64++; state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); p += 32 - state->memsize; state->memsize = 0; } if (p + 32 <= b_end) { const uint8_t *const limit = b_end - 32; uint64_t v1 = state->v1; uint64_t v2 = state->v2; uint64_t v3 = state->v3; uint64_t v4 = state->v4; do { v1 = xxh64_round(v1, get_unaligned_le64(p)); p += 8; v2 = xxh64_round(v2, get_unaligned_le64(p)); p += 8; v3 = xxh64_round(v3, get_unaligned_le64(p)); p += 8; v4 = xxh64_round(v4, get_unaligned_le64(p)); p += 8; } while (p <= limit); state->v1 = v1; state->v2 = v2; state->v3 = v3; state->v4 = v4; } if (p < b_end) { memcpy(state->mem64, p, (size_t)(b_end-p)); state->memsize = (uint32_t)(b_end - p); } return 0; } EXPORT_SYMBOL(xxh64_update); uint64_t xxh64_digest(const struct xxh64_state *state) { const uint8_t *p = (const uint8_t *)state->mem64; const uint8_t *const b_end = (const uint8_t *)state->mem64 + state->memsize; uint64_t h64; if (state->total_len >= 32) { const uint64_t v1 = state->v1; const uint64_t v2 = state->v2; const uint64_t v3 = state->v3; const uint64_t v4 = state->v4; h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); h64 = xxh64_merge_round(h64, v1); h64 = xxh64_merge_round(h64, v2); h64 = xxh64_merge_round(h64, v3); h64 = xxh64_merge_round(h64, v4); } else { h64 = state->v3 + PRIME64_5; } h64 += (uint64_t)state->total_len; while (p + 8 <= b_end) { const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); h64 ^= k1; h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; p += 8; } if (p + 4 <= b_end) { h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; p += 4; } while (p < b_end) { h64 ^= (*p) * PRIME64_5; h64 = xxh_rotl64(h64, 11) * PRIME64_1; p++; } h64 ^= h64 >> 33; h64 *= PRIME64_2; h64 ^= h64 >> 29; h64 *= PRIME64_3; h64 ^= h64 >> 32; return h64; } EXPORT_SYMBOL(xxh64_digest); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("xxHash");
linux-master
lib/xxhash.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kernel module for testing static keys. * * Copyright 2015 Akamai Technologies Inc. All Rights Reserved * * Authors: * Jason Baron <[email protected]> */ #include <linux/module.h> #include <linux/jump_label.h> /* old keys */ struct static_key old_true_key = STATIC_KEY_INIT_TRUE; struct static_key old_false_key = STATIC_KEY_INIT_FALSE; /* new api */ DEFINE_STATIC_KEY_TRUE(true_key); DEFINE_STATIC_KEY_FALSE(false_key); /* external */ extern struct static_key base_old_true_key; extern struct static_key base_inv_old_true_key; extern struct static_key base_old_false_key; extern struct static_key base_inv_old_false_key; /* new api */ extern struct static_key_true base_true_key; extern struct static_key_true base_inv_true_key; extern struct static_key_false base_false_key; extern struct static_key_false base_inv_false_key; struct test_key { bool init_state; struct static_key *key; bool (*test_key)(void); }; #define test_key_func(key, branch) \ static bool key ## _ ## branch(void) \ { \ return branch(&key); \ } static void invert_key(struct static_key *key) { if (static_key_enabled(key)) static_key_disable(key); else static_key_enable(key); } static void invert_keys(struct test_key *keys, int size) { struct static_key *previous = NULL; int i; for (i = 0; i < size; i++) { if (previous != keys[i].key) { invert_key(keys[i].key); previous = keys[i].key; } } } static int verify_keys(struct test_key *keys, int size, bool invert) { int i; bool ret, init; for (i = 0; i < size; i++) { ret = static_key_enabled(keys[i].key); init = keys[i].init_state; if (ret != (invert ? !init : init)) return -EINVAL; ret = keys[i].test_key(); if (static_key_enabled(keys[i].key)) { if (!ret) return -EINVAL; } else { if (ret) return -EINVAL; } } return 0; } test_key_func(old_true_key, static_key_true) test_key_func(old_false_key, static_key_false) test_key_func(true_key, static_branch_likely) test_key_func(true_key, static_branch_unlikely) test_key_func(false_key, static_branch_likely) test_key_func(false_key, static_branch_unlikely) test_key_func(base_old_true_key, static_key_true) test_key_func(base_inv_old_true_key, static_key_true) test_key_func(base_old_false_key, static_key_false) test_key_func(base_inv_old_false_key, static_key_false) test_key_func(base_true_key, static_branch_likely) test_key_func(base_true_key, static_branch_unlikely) test_key_func(base_inv_true_key, static_branch_likely) test_key_func(base_inv_true_key, static_branch_unlikely) test_key_func(base_false_key, static_branch_likely) test_key_func(base_false_key, static_branch_unlikely) test_key_func(base_inv_false_key, static_branch_likely) test_key_func(base_inv_false_key, static_branch_unlikely) static int __init test_static_key_init(void) { int ret; int size; struct test_key static_key_tests[] = { /* internal keys - old keys */ { .init_state = true, .key = &old_true_key, .test_key = &old_true_key_static_key_true, }, { .init_state = false, .key = &old_false_key, .test_key = &old_false_key_static_key_false, }, /* internal keys - new keys */ { .init_state = true, .key = &true_key.key, .test_key = &true_key_static_branch_likely, }, { .init_state = true, .key = &true_key.key, .test_key = &true_key_static_branch_unlikely, }, { .init_state = false, .key = &false_key.key, .test_key = &false_key_static_branch_likely, }, { .init_state = false, .key = &false_key.key, .test_key = &false_key_static_branch_unlikely, }, /* external keys - old keys */ { .init_state = true, .key = &base_old_true_key, .test_key = &base_old_true_key_static_key_true, }, { .init_state = false, .key = &base_inv_old_true_key, .test_key = &base_inv_old_true_key_static_key_true, }, { .init_state = false, .key = &base_old_false_key, .test_key = &base_old_false_key_static_key_false, }, { .init_state = true, .key = &base_inv_old_false_key, .test_key = &base_inv_old_false_key_static_key_false, }, /* external keys - new keys */ { .init_state = true, .key = &base_true_key.key, .test_key = &base_true_key_static_branch_likely, }, { .init_state = true, .key = &base_true_key.key, .test_key = &base_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_inv_true_key.key, .test_key = &base_inv_true_key_static_branch_likely, }, { .init_state = false, .key = &base_inv_true_key.key, .test_key = &base_inv_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_false_key.key, .test_key = &base_false_key_static_branch_likely, }, { .init_state = false, .key = &base_false_key.key, .test_key = &base_false_key_static_branch_unlikely, }, { .init_state = true, .key = &base_inv_false_key.key, .test_key = &base_inv_false_key_static_branch_likely, }, { .init_state = true, .key = &base_inv_false_key.key, .test_key = &base_inv_false_key_static_branch_unlikely, }, }; size = ARRAY_SIZE(static_key_tests); ret = verify_keys(static_key_tests, size, false); if (ret) goto out; invert_keys(static_key_tests, size); ret = verify_keys(static_key_tests, size, true); if (ret) goto out; invert_keys(static_key_tests, size); ret = verify_keys(static_key_tests, size, false); if (ret) goto out; return 0; out: return ret; } static void __exit test_static_key_exit(void) { } module_init(test_static_key_init); module_exit(test_static_key_exit); MODULE_AUTHOR("Jason Baron <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_static_keys.c
/* * Copyright (c) 2011 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/crc8.h> #include <linux/printk.h> /** * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. * * @table: table to be filled. * @polynomial: polynomial for which table is to be filled. */ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; const u8 msbit = 0x80; u8 t = msbit; table[0] = 0; for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) { t = (t << 1) ^ (t & msbit ? polynomial : 0); for (j = 0; j < i; j++) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_msb); /** * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. * * @table: table to be filled. * @polynomial: polynomial for which table is to be filled. */ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; u8 t = 1; table[0] = 0; for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) { t = (t >> 1) ^ (t & 1 ? polynomial : 0); for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_lsb); /** * crc8 - calculate a crc8 over the given input data. * * @table: crc table used for calculation. * @pdata: pointer to data buffer. * @nbytes: number of bytes in data buffer. * @crc: previous returned crc8 value. */ u8 crc8(const u8 table[CRC8_TABLE_SIZE], const u8 *pdata, size_t nbytes, u8 crc) { /* loop over the buffer data */ while (nbytes-- > 0) crc = table[(crc ^ *pdata++) & 0xff]; return crc; } EXPORT_SYMBOL(crc8); MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function"); MODULE_AUTHOR("Broadcom Corporation"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
lib/crc8.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { if (audit_is_compat(arch)) return 1; else return 0; } int audit_classify_syscall(int abi, unsigned syscall) { if (audit_is_compat(abi)) return audit_classify_compat_syscall(abi, syscall); switch(syscall) { #ifdef __NR_open case __NR_open: return AUDITSC_OPEN; #endif #ifdef __NR_openat case __NR_openat: return AUDITSC_OPENAT; #endif #ifdef __NR_socketcall case __NR_socketcall: return AUDITSC_SOCKETCALL; #endif #ifdef __NR_execveat case __NR_execveat: #endif case __NR_execve: return AUDITSC_EXECVE; #ifdef __NR_openat2 case __NR_openat2: return AUDITSC_OPENAT2; #endif default: return AUDITSC_NATIVE; } } static int __init audit_classes_init(void) { #ifdef CONFIG_AUDIT_COMPAT_GENERIC audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class); audit_register_class(AUDIT_CLASS_READ_32, compat_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
linux-master
lib/audit.c
// SPDX-License-Identifier: GPL-2.0 /* * Generate lookup table for the table-driven CRC64 calculation. * * gen_crc64table is executed in kernel build time and generates * lib/crc64table.h. This header is included by lib/crc64.c for * the table-driven CRC64 calculation. * * See lib/crc64.c for more information about which specification * and polynomial arithmetic that gen_crc64table.c follows to * generate the lookup table. * * Copyright 2018 SUSE Linux. * Author: Coly Li <[email protected]> */ #include <inttypes.h> #include <stdio.h> #define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL #define CRC64_ROCKSOFT_POLY 0x9A6C9329AC4BC9B5ULL static uint64_t crc64_table[256] = {0}; static uint64_t crc64_rocksoft_table[256] = {0}; static void generate_reflected_crc64_table(uint64_t table[256], uint64_t poly) { uint64_t i, j, c, crc; for (i = 0; i < 256; i++) { crc = 0ULL; c = i; for (j = 0; j < 8; j++) { if ((crc ^ (c >> j)) & 1) crc = (crc >> 1) ^ poly; else crc >>= 1; } table[i] = crc; } } static void generate_crc64_table(uint64_t table[256], uint64_t poly) { uint64_t i, j, c, crc; for (i = 0; i < 256; i++) { crc = 0; c = i << 56; for (j = 0; j < 8; j++) { if ((crc ^ c) & 0x8000000000000000ULL) crc = (crc << 1) ^ poly; else crc <<= 1; c <<= 1; } table[i] = crc; } } static void output_table(uint64_t table[256]) { int i; for (i = 0; i < 256; i++) { printf("\t0x%016" PRIx64 "ULL", table[i]); if (i & 0x1) printf(",\n"); else printf(", "); } printf("};\n"); } static void print_crc64_tables(void) { printf("/* this file is generated - do not edit */\n\n"); printf("#include <linux/types.h>\n"); printf("#include <linux/cache.h>\n\n"); printf("static const u64 ____cacheline_aligned crc64table[256] = {\n"); output_table(crc64_table); printf("\nstatic const u64 ____cacheline_aligned crc64rocksofttable[256] = {\n"); output_table(crc64_rocksoft_table); } int main(int argc, char *argv[]) { generate_crc64_table(crc64_table, CRC64_ECMA182_POLY); generate_reflected_crc64_table(crc64_rocksoft_table, CRC64_ROCKSOFT_POLY); print_crc64_tables(); return 0; }
linux-master
lib/gen_crc64table.c
// SPDX-License-Identifier: GPL-2.0-only /* ----------------------------------------------------------------------- * * * Copyright 2012 Intel Corporation; author H. Peter Anvin * * ----------------------------------------------------------------------- */ /* * earlycpio.c * * Find a specific cpio member; must precede any compressed content. * This is used to locate data items in the initramfs used by the * kernel itself during early boot (before the main initramfs is * decompressed.) It is the responsibility of the initramfs creator * to ensure that these items are uncompressed at the head of the * blob. Depending on the boot loader or package tool that may be a * separate file or part of the same file. */ #include <linux/earlycpio.h> #include <linux/kernel.h> #include <linux/string.h> enum cpio_fields { C_MAGIC, C_INO, C_MODE, C_UID, C_GID, C_NLINK, C_MTIME, C_FILESIZE, C_MAJ, C_MIN, C_RMAJ, C_RMIN, C_NAMESIZE, C_CHKSUM, C_NFIELDS }; /** * find_cpio_data - Search for files in an uncompressed cpio * @path: The directory to search for, including a slash at the end * @data: Pointer to the cpio archive or a header inside * @len: Remaining length of the cpio based on data pointer * @nextoff: When a matching file is found, this is the offset from the * beginning of the cpio to the beginning of the next file, not the * matching file itself. It can be used to iterate through the cpio * to find all files inside of a directory path. * * Return: &struct cpio_data containing the address, length and * filename (with the directory path cut off) of the found file. * If you search for a filename and not for files in a directory, * pass the absolute path of the filename in the cpio and make sure * the match returned an empty filename string. */ struct cpio_data find_cpio_data(const char *path, void *data, size_t len, long *nextoff) { const size_t cpio_header_len = 8*C_NFIELDS - 2; struct cpio_data cd = { NULL, 0, "" }; const char *p, *dptr, *nptr; unsigned int ch[C_NFIELDS], *chp, v; unsigned char c, x; size_t mypathsize = strlen(path); int i, j; p = data; while (len > cpio_header_len) { if (!*p) { /* All cpio headers need to be 4-byte aligned */ p += 4; len -= 4; continue; } j = 6; /* The magic field is only 6 characters */ chp = ch; for (i = C_NFIELDS; i; i--) { v = 0; while (j--) { v <<= 4; c = *p++; x = c - '0'; if (x < 10) { v += x; continue; } x = (c | 0x20) - 'a'; if (x < 6) { v += x + 10; continue; } goto quit; /* Invalid hexadecimal */ } *chp++ = v; j = 8; /* All other fields are 8 characters */ } if ((ch[C_MAGIC] - 0x070701) > 1) goto quit; /* Invalid magic */ len -= cpio_header_len; dptr = PTR_ALIGN(p + ch[C_NAMESIZE], 4); nptr = PTR_ALIGN(dptr + ch[C_FILESIZE], 4); if (nptr > p + len || dptr < p || nptr < dptr) goto quit; /* Buffer overrun */ if ((ch[C_MODE] & 0170000) == 0100000 && ch[C_NAMESIZE] >= mypathsize && !memcmp(p, path, mypathsize)) { if (nextoff) *nextoff = (long)nptr - (long)data; if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { pr_warn( "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", p, MAX_CPIO_FILE_NAME); } strscpy(cd.name, p + mypathsize, MAX_CPIO_FILE_NAME); cd.data = (void *)dptr; cd.size = ch[C_FILESIZE]; return cd; /* Found it! */ } len -= (nptr - p); p = nptr; } quit: return cd; }
linux-master
lib/earlycpio.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/lockref.h> #if USE_CMPXCHG_LOCKREF /* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. */ #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ int retry = 100; \ struct lockref old; \ BUILD_BUG_ON(sizeof(old) != 8); \ old.lock_count = READ_ONCE(lockref->lock_count); \ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old; \ CODE \ if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \ &old.lock_count, \ new.lock_count))) { \ SUCCESS; \ } \ if (!--retry) \ break; \ } \ } while (0) #else #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) #endif /** * lockref_get - Increments reference count unconditionally * @lockref: pointer to lockref structure * * This operation is only valid if you already hold a reference * to the object, so you know the count cannot be zero. */ void lockref_get(struct lockref *lockref) { CMPXCHG_LOOP( new.count++; , return; ); spin_lock(&lockref->lock); lockref->count++; spin_unlock(&lockref->lock); } EXPORT_SYMBOL(lockref_get); /** * lockref_get_not_zero - Increments count unless the count is 0 or dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero */ int lockref_get_not_zero(struct lockref *lockref) { int retval; CMPXCHG_LOOP( new.count++; if (old.count <= 0) return 0; , return 1; ); spin_lock(&lockref->lock); retval = 0; if (lockref->count > 0) { lockref->count++; retval = 1; } spin_unlock(&lockref->lock); return retval; } EXPORT_SYMBOL(lockref_get_not_zero); /** * lockref_put_not_zero - Decrements count unless count <= 1 before decrement * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count would become zero */ int lockref_put_not_zero(struct lockref *lockref) { int retval; CMPXCHG_LOOP( new.count--; if (old.count <= 1) return 0; , return 1; ); spin_lock(&lockref->lock); retval = 0; if (lockref->count > 1) { lockref->count--; retval = 1; } spin_unlock(&lockref->lock); return retval; } EXPORT_SYMBOL(lockref_put_not_zero); /** * lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * * Decrement the reference count and return the new value. * If the lockref was dead or locked, return an error. */ int lockref_put_return(struct lockref *lockref) { CMPXCHG_LOOP( new.count--; if (old.count <= 0) return -1; , return new.count; ); return -1; } EXPORT_SYMBOL(lockref_put_return); /** * lockref_put_or_lock - decrements count unless count <= 1 before decrement * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken */ int lockref_put_or_lock(struct lockref *lockref) { CMPXCHG_LOOP( new.count--; if (old.count <= 1) break; , return 1; ); spin_lock(&lockref->lock); if (lockref->count <= 1) return 0; lockref->count--; spin_unlock(&lockref->lock); return 1; } EXPORT_SYMBOL(lockref_put_or_lock); /** * lockref_mark_dead - mark lockref dead * @lockref: pointer to lockref structure */ void lockref_mark_dead(struct lockref *lockref) { assert_spin_locked(&lockref->lock); lockref->count = -128; } EXPORT_SYMBOL(lockref_mark_dead); /** * lockref_get_not_dead - Increments count unless the ref is dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if lockref was dead */ int lockref_get_not_dead(struct lockref *lockref) { int retval; CMPXCHG_LOOP( new.count++; if (old.count < 0) return 0; , return 1; ); spin_lock(&lockref->lock); retval = 0; if (lockref->count >= 0) { lockref->count++; retval = 1; } spin_unlock(&lockref->lock); return retval; } EXPORT_SYMBOL(lockref_get_not_dead);
linux-master
lib/lockref.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/buildid.h> #include <linux/cache.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/pagemap.h> #define BUILD_ID 3 /* * Parse build id from the note segment. This logic can be shared between * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are * identical. */ static int parse_build_id_buf(unsigned char *build_id, __u32 *size, const void *note_start, Elf32_Word note_size) { Elf32_Word note_offs = 0, new_offs; while (note_offs + sizeof(Elf32_Nhdr) < note_size) { Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); if (nhdr->n_type == BUILD_ID && nhdr->n_namesz == sizeof("GNU") && !strcmp((char *)(nhdr + 1), "GNU") && nhdr->n_descsz > 0 && nhdr->n_descsz <= BUILD_ID_SIZE_MAX) { memcpy(build_id, note_start + note_offs + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), nhdr->n_descsz); memset(build_id + nhdr->n_descsz, 0, BUILD_ID_SIZE_MAX - nhdr->n_descsz); if (size) *size = nhdr->n_descsz; return 0; } new_offs = note_offs + sizeof(Elf32_Nhdr) + ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); if (new_offs <= note_offs) /* overflow */ break; note_offs = new_offs; } return -EINVAL; } static inline int parse_build_id(const void *page_addr, unsigned char *build_id, __u32 *size, const void *note_start, Elf32_Word note_size) { /* check for overflow */ if (note_start < page_addr || note_start + note_size < note_start) return -EINVAL; /* only supports note that fits in the first page */ if (note_start + note_size > page_addr + PAGE_SIZE) return -EINVAL; return parse_build_id_buf(build_id, size, note_start, note_size); } /* Parse build ID from 32-bit ELF */ static int get_build_id_32(const void *page_addr, unsigned char *build_id, __u32 *size) { Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; Elf32_Phdr *phdr; int i; /* only supports phdr that fits in one page */ if (ehdr->e_phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) return -EINVAL; phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); for (i = 0; i < ehdr->e_phnum; ++i) { if (phdr[i].p_type == PT_NOTE && !parse_build_id(page_addr, build_id, size, page_addr + phdr[i].p_offset, phdr[i].p_filesz)) return 0; } return -EINVAL; } /* Parse build ID from 64-bit ELF */ static int get_build_id_64(const void *page_addr, unsigned char *build_id, __u32 *size) { Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; Elf64_Phdr *phdr; int i; /* only supports phdr that fits in one page */ if (ehdr->e_phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) return -EINVAL; phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); for (i = 0; i < ehdr->e_phnum; ++i) { if (phdr[i].p_type == PT_NOTE && !parse_build_id(page_addr, build_id, size, page_addr + phdr[i].p_offset, phdr[i].p_filesz)) return 0; } return -EINVAL; } /* * Parse build ID of ELF file mapped to vma * @vma: vma object * @build_id: buffer to store build id, at least BUILD_ID_SIZE long * @size: returns actual build id size in case of success * * Return: 0 on success, -EINVAL otherwise */ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { Elf32_Ehdr *ehdr; struct page *page; void *page_addr; int ret; /* only works for page backed storage */ if (!vma->vm_file) return -EINVAL; page = find_get_page(vma->vm_file->f_mapping, 0); if (!page) return -EFAULT; /* page not mapped */ ret = -EINVAL; page_addr = kmap_atomic(page); ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) goto out; /* only support executable file and shared object file */ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) goto out; if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) ret = get_build_id_32(page_addr, build_id, size); else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ret = get_build_id_64(page_addr, build_id, size); out: kunmap_atomic(page_addr); put_page(page); return ret; } /** * build_id_parse_buf - Get build ID from a buffer * @buf: ELF note section(s) to parse * @buf_size: Size of @buf in bytes * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long * * Return: 0 on success, -EINVAL otherwise */ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) { return parse_build_id_buf(build_id, NULL, buf, buf_size); } #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; /** * init_vmlinux_build_id - Compute and stash the running kernel's build ID */ void __init init_vmlinux_build_id(void) { extern const void __start_notes __weak; extern const void __stop_notes __weak; unsigned int size = &__stop_notes - &__start_notes; build_id_parse_buf(&__start_notes, vmlinux_build_id, size); } #endif
linux-master
lib/buildid.c
// SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) "min_heap_test: " fmt /* * Test cases for the min max heap. */ #include <linux/log2.h> #include <linux/min_heap.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/random.h> static __init bool less_than(const void *lhs, const void *rhs) { return *(int *)lhs < *(int *)rhs; } static __init bool greater_than(const void *lhs, const void *rhs) { return *(int *)lhs > *(int *)rhs; } static __init void swap_ints(void *lhs, void *rhs) { int temp = *(int *)lhs; *(int *)lhs = *(int *)rhs; *(int *)rhs = temp; } static __init int pop_verify_heap(bool min_heap, struct min_heap *heap, const struct min_heap_callbacks *funcs) { int *values = heap->data; int err = 0; int last; last = values[0]; min_heap_pop(heap, funcs); while (heap->nr > 0) { if (min_heap) { if (last > values[0]) { pr_err("error: expected %d <= %d\n", last, values[0]); err++; } } else { if (last < values[0]) { pr_err("error: expected %d >= %d\n", last, values[0]); err++; } } last = values[0]; min_heap_pop(heap, funcs); } return err; } static __init int test_heapify_all(bool min_heap) { int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0, -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; struct min_heap heap = { .data = values, .nr = ARRAY_SIZE(values), .size = ARRAY_SIZE(values), }; struct min_heap_callbacks funcs = { .elem_size = sizeof(int), .less = min_heap ? less_than : greater_than, .swp = swap_ints, }; int i, err; /* Test with known set of values. */ min_heapify_all(&heap, &funcs); err = pop_verify_heap(min_heap, &heap, &funcs); /* Test with randomly generated values. */ heap.nr = ARRAY_SIZE(values); for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); min_heapify_all(&heap, &funcs); err += pop_verify_heap(min_heap, &heap, &funcs); return err; } static __init int test_heap_push(bool min_heap) { const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; int values[ARRAY_SIZE(data)]; struct min_heap heap = { .data = values, .nr = 0, .size = ARRAY_SIZE(values), }; struct min_heap_callbacks funcs = { .elem_size = sizeof(int), .less = min_heap ? less_than : greater_than, .swp = swap_ints, }; int i, temp, err; /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) min_heap_push(&heap, &data[i], &funcs); err = pop_verify_heap(min_heap, &heap, &funcs); /* Test with randomly generated values. */ while (heap.nr < heap.size) { temp = get_random_u32(); min_heap_push(&heap, &temp, &funcs); } err += pop_verify_heap(min_heap, &heap, &funcs); return err; } static __init int test_heap_pop_push(bool min_heap) { const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; int values[ARRAY_SIZE(data)]; struct min_heap heap = { .data = values, .nr = 0, .size = ARRAY_SIZE(values), }; struct min_heap_callbacks funcs = { .elem_size = sizeof(int), .less = min_heap ? less_than : greater_than, .swp = swap_ints, }; int i, temp, err; /* Fill values with data to pop and replace. */ temp = min_heap ? 0x80000000 : 0x7FFFFFFF; for (i = 0; i < ARRAY_SIZE(data); i++) min_heap_push(&heap, &temp, &funcs); /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) min_heap_pop_push(&heap, &data[i], &funcs); err = pop_verify_heap(min_heap, &heap, &funcs); heap.nr = 0; for (i = 0; i < ARRAY_SIZE(data); i++) min_heap_push(&heap, &temp, &funcs); /* Test with randomly generated values. */ for (i = 0; i < ARRAY_SIZE(data); i++) { temp = get_random_u32(); min_heap_pop_push(&heap, &temp, &funcs); } err += pop_verify_heap(min_heap, &heap, &funcs); return err; } static int __init test_min_heap_init(void) { int err = 0; err += test_heapify_all(true); err += test_heapify_all(false); err += test_heap_push(true); err += test_heap_push(false); err += test_heap_pop_push(true); err += test_heap_pop_push(false); if (err) { pr_err("test failed with %d errors\n", err); return -EINVAL; } pr_info("test passed\n"); return 0; } module_init(test_min_heap_init); static void __exit test_min_heap_exit(void) { /* do nothing */ } module_exit(test_min_heap_exit); MODULE_LICENSE("GPL");
linux-master
lib/test_min_heap.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/init.h> #include <linux/scatterlist.h> #include <linux/mempool.h> #include <linux/slab.h> #define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools) #define SG_MEMPOOL_SIZE 2 struct sg_pool { size_t size; char *name; struct kmem_cache *slab; mempool_t *pool; }; #define SP(x) { .size = x, "sgpool-" __stringify(x) } #if (SG_CHUNK_SIZE < 32) #error SG_CHUNK_SIZE is too small (must be 32 or greater) #endif static struct sg_pool sg_pools[] = { SP(8), SP(16), #if (SG_CHUNK_SIZE > 32) SP(32), #if (SG_CHUNK_SIZE > 64) SP(64), #if (SG_CHUNK_SIZE > 128) SP(128), #if (SG_CHUNK_SIZE > 256) #error SG_CHUNK_SIZE is too large (256 MAX) #endif #endif #endif #endif SP(SG_CHUNK_SIZE) }; #undef SP static inline unsigned int sg_pool_index(unsigned short nents) { unsigned int index; BUG_ON(nents > SG_CHUNK_SIZE); if (nents <= 8) index = 0; else index = get_count_order(nents) - 3; return index; } static void sg_pool_free(struct scatterlist *sgl, unsigned int nents) { struct sg_pool *sgp; sgp = sg_pools + sg_pool_index(nents); mempool_free(sgl, sgp->pool); } static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask) { struct sg_pool *sgp; sgp = sg_pools + sg_pool_index(nents); return mempool_alloc(sgp->pool, gfp_mask); } /** * sg_free_table_chained - Free a previously mapped sg table * @table: The sg table header to use * @nents_first_chunk: size of the first_chunk SGL passed to * sg_alloc_table_chained * * Description: * Free an sg table previously allocated and setup with * sg_alloc_table_chained(). * * @nents_first_chunk has to be same with that same parameter passed * to sg_alloc_table_chained(). * **/ void sg_free_table_chained(struct sg_table *table, unsigned nents_first_chunk) { if (table->orig_nents <= nents_first_chunk) return; if (nents_first_chunk == 1) nents_first_chunk = 0; __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free, table->orig_nents); } EXPORT_SYMBOL_GPL(sg_free_table_chained); /** * sg_alloc_table_chained - Allocate and chain SGLs in an sg table * @table: The sg table header to use * @nents: Number of entries in sg list * @first_chunk: first SGL * @nents_first_chunk: number of the SGL of @first_chunk * * Description: * Allocate and chain SGLs in an sg table. If @nents@ is larger than * @nents_first_chunk a chained sg table will be setup. @first_chunk is * ignored if nents_first_chunk <= 1 because user expects the SGL points * non-chain SGL. * **/ int sg_alloc_table_chained(struct sg_table *table, int nents, struct scatterlist *first_chunk, unsigned nents_first_chunk) { int ret; BUG_ON(!nents); if (first_chunk && nents_first_chunk) { if (nents <= nents_first_chunk) { table->nents = table->orig_nents = nents; sg_init_table(table->sgl, nents); return 0; } } /* User supposes that the 1st SGL includes real entry */ if (nents_first_chunk <= 1) { first_chunk = NULL; nents_first_chunk = 0; } ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, first_chunk, nents_first_chunk, GFP_ATOMIC, sg_pool_alloc); if (unlikely(ret)) sg_free_table_chained(table, nents_first_chunk); return ret; } EXPORT_SYMBOL_GPL(sg_alloc_table_chained); static __init int sg_pool_init(void) { int i; for (i = 0; i < SG_MEMPOOL_NR; i++) { struct sg_pool *sgp = sg_pools + i; int size = sgp->size * sizeof(struct scatterlist); sgp->slab = kmem_cache_create(sgp->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); if (!sgp->slab) { printk(KERN_ERR "SG_POOL: can't init sg slab %s\n", sgp->name); goto cleanup_sdb; } sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, sgp->slab); if (!sgp->pool) { printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n", sgp->name); goto cleanup_sdb; } } return 0; cleanup_sdb: for (i = 0; i < SG_MEMPOOL_NR; i++) { struct sg_pool *sgp = sg_pools + i; mempool_destroy(sgp->pool); kmem_cache_destroy(sgp->slab); } return -ENOMEM; } subsys_initcall(sg_pool_init);
linux-master
lib/sg_pool.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kernel module for testing dynamic_debug * * Authors: * Jim Cromie <[email protected]> */ #define pr_fmt(fmt) "test_dd: " fmt #include <linux/module.h> /* run tests by reading or writing sysfs node: do_prints */ static void do_prints(void); /* device under test */ static int param_set_do_prints(const char *instr, const struct kernel_param *kp) { do_prints(); return 0; } static int param_get_do_prints(char *buffer, const struct kernel_param *kp) { do_prints(); return scnprintf(buffer, PAGE_SIZE, "did do_prints\n"); } static const struct kernel_param_ops param_ops_do_prints = { .set = param_set_do_prints, .get = param_get_do_prints, }; module_param_cb(do_prints, &param_ops_do_prints, NULL, 0600); /* * Using the CLASSMAP api: * - classmaps must have corresponding enum * - enum symbols must match/correlate with class-name strings in the map. * - base must equal enum's 1st value * - multiple maps must set their base to share the 0-30 class_id space !! * (build-bug-on tips welcome) * Additionally, here: * - tie together sysname, mapname, bitsname, flagsname */ #define DD_SYS_WRAP(_model, _flags) \ static unsigned long bits_##_model; \ static struct ddebug_class_param _flags##_model = { \ .bits = &bits_##_model, \ .flags = #_flags, \ .map = &map_##_model, \ }; \ module_param_cb(_flags##_##_model, &param_ops_dyndbg_classes, &_flags##_model, 0600) /* numeric input, independent bits */ enum cat_disjoint_bits { D2_CORE = 0, D2_DRIVER, D2_KMS, D2_PRIME, D2_ATOMIC, D2_VBL, D2_STATE, D2_LEASE, D2_DP, D2_DRMRES }; DECLARE_DYNDBG_CLASSMAP(map_disjoint_bits, DD_CLASS_TYPE_DISJOINT_BITS, 0, "D2_CORE", "D2_DRIVER", "D2_KMS", "D2_PRIME", "D2_ATOMIC", "D2_VBL", "D2_STATE", "D2_LEASE", "D2_DP", "D2_DRMRES"); DD_SYS_WRAP(disjoint_bits, p); DD_SYS_WRAP(disjoint_bits, T); /* symbolic input, independent bits */ enum cat_disjoint_names { LOW = 11, MID, HI }; DECLARE_DYNDBG_CLASSMAP(map_disjoint_names, DD_CLASS_TYPE_DISJOINT_NAMES, 10, "LOW", "MID", "HI"); DD_SYS_WRAP(disjoint_names, p); DD_SYS_WRAP(disjoint_names, T); /* numeric verbosity, V2 > V1 related */ enum cat_level_num { V0 = 14, V1, V2, V3, V4, V5, V6, V7 }; DECLARE_DYNDBG_CLASSMAP(map_level_num, DD_CLASS_TYPE_LEVEL_NUM, 14, "V0", "V1", "V2", "V3", "V4", "V5", "V6", "V7"); DD_SYS_WRAP(level_num, p); DD_SYS_WRAP(level_num, T); /* symbolic verbosity */ enum cat_level_names { L0 = 22, L1, L2, L3, L4, L5, L6, L7 }; DECLARE_DYNDBG_CLASSMAP(map_level_names, DD_CLASS_TYPE_LEVEL_NAMES, 22, "L0", "L1", "L2", "L3", "L4", "L5", "L6", "L7"); DD_SYS_WRAP(level_names, p); DD_SYS_WRAP(level_names, T); /* stand-in for all pr_debug etc */ #define prdbg(SYM) __pr_debug_cls(SYM, #SYM " msg\n") static void do_cats(void) { pr_debug("doing categories\n"); prdbg(LOW); prdbg(MID); prdbg(HI); prdbg(D2_CORE); prdbg(D2_DRIVER); prdbg(D2_KMS); prdbg(D2_PRIME); prdbg(D2_ATOMIC); prdbg(D2_VBL); prdbg(D2_STATE); prdbg(D2_LEASE); prdbg(D2_DP); prdbg(D2_DRMRES); } static void do_levels(void) { pr_debug("doing levels\n"); prdbg(V1); prdbg(V2); prdbg(V3); prdbg(V4); prdbg(V5); prdbg(V6); prdbg(V7); prdbg(L1); prdbg(L2); prdbg(L3); prdbg(L4); prdbg(L5); prdbg(L6); prdbg(L7); } static void do_prints(void) { do_cats(); do_levels(); } static int __init test_dynamic_debug_init(void) { pr_debug("init start\n"); do_prints(); pr_debug("init done\n"); return 0; } static void __exit test_dynamic_debug_exit(void) { pr_debug("exited\n"); } module_init(test_dynamic_debug_init); module_exit(test_dynamic_debug_exit); MODULE_AUTHOR("Jim Cromie <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_dynamic_debug.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/kasan-checks.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <asm/word-at-a-time.h> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define IS_UNALIGNED(src, dst) 0 #else #define IS_UNALIGNED(src, dst) \ (((long) dst | (long) src) & (sizeof(long) - 1)) #endif /* * Do a strncpy, return length of string without final '\0'. * 'count' is the user-supplied count (return 'count' if we * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ static __always_inline long do_strncpy_from_user(char *dst, const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; while (max >= sizeof(unsigned long)) { unsigned long c, data, mask; /* Fall back to byte-at-a-time if we get a page fault */ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); /* * Note that we mask out the bytes following the NUL. This is * important to do because string oblivious code may read past * the NUL. For those routines, we don't want to give them * potentially random bytes after the NUL in `src`. * * One example of such code is BPF map keys. BPF treats map keys * as an opaque set of bytes. Without the post-NUL mask, any BPF * maps keyed by strings returned from strncpy_from_user() may * have multiple entries for semantically identical strings. */ if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); mask = zero_bytemask(data); *(unsigned long *)(dst+res) = c & mask; return res + find_zero(data); } *(unsigned long *)(dst+res) = c; res += sizeof(unsigned long); max -= sizeof(unsigned long); } byte_at_a_time: while (max) { char c; unsafe_get_user(c,src+res, efault); dst[res] = c; if (!c) return res; res++; max--; } /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, that's ok - we got as much as the user asked for. */ if (res >= count) return res; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's an EFAULT. */ efault: return -EFAULT; } /** * strncpy_from_user: - Copy a NUL terminated string from userspace. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from userspace to kernel space. * * On success, returns the length of the string (not including the trailing * NUL). * * If access to userspace fails, returns -EFAULT (some data may have been * copied). * * If @count is smaller than the length of the string, copies @count bytes * and returns @count. */ long strncpy_from_user(char *dst, const char __user *src, long count) { unsigned long max_addr, src_addr; might_fault(); if (should_fail_usercopy()) return -EFAULT; if (unlikely(count <= 0)) return 0; max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(src); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; long retval; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; kasan_check_write(dst, count); check_object_size(dst, count, false); if (user_read_access_begin(src, max)) { retval = do_strncpy_from_user(dst, src, count, max); user_read_access_end(); return retval; } } return -EFAULT; } EXPORT_SYMBOL(strncpy_from_user);
linux-master
lib/strncpy_from_user.c
// SPDX-License-Identifier: GPL-2.0 /* * Kernel module for testing 'strcat' family of functions. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <kunit/test.h> #include <linux/string.h> static volatile int unconst; static void strcat_test(struct kunit *test) { char dest[8]; /* Destination is terminated. */ memset(dest, 0, sizeof(dest)); KUNIT_EXPECT_EQ(test, strlen(dest), 0); /* Empty copy does nothing. */ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); KUNIT_EXPECT_STREQ(test, dest, ""); /* 4 characters copied in, stops at %NUL. */ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); KUNIT_EXPECT_STREQ(test, dest, "four"); KUNIT_EXPECT_EQ(test, dest[5], '\0'); /* 2 more characters copied in okay. */ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); KUNIT_EXPECT_STREQ(test, dest, "fourAB"); } static void strncat_test(struct kunit *test) { char dest[8]; /* Destination is terminated. */ memset(dest, 0, sizeof(dest)); KUNIT_EXPECT_EQ(test, strlen(dest), 0); /* Empty copy of size 0 does nothing. */ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); KUNIT_EXPECT_STREQ(test, dest, ""); /* Empty copy of size 1 does nothing too. */ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); KUNIT_EXPECT_STREQ(test, dest, ""); /* Copy of max 0 characters should do nothing. */ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); KUNIT_EXPECT_STREQ(test, dest, ""); /* 4 characters copied in, even if max is 8. */ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); KUNIT_EXPECT_STREQ(test, dest, "four"); KUNIT_EXPECT_EQ(test, dest[5], '\0'); KUNIT_EXPECT_EQ(test, dest[6], '\0'); /* 2 characters copied in okay, 2 ignored. */ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); KUNIT_EXPECT_STREQ(test, dest, "fourAB"); } static void strlcat_test(struct kunit *test) { char dest[8] = ""; int len = sizeof(dest) + unconst; /* Destination is terminated. */ KUNIT_EXPECT_EQ(test, strlen(dest), 0); /* Empty copy is size 0. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); KUNIT_EXPECT_STREQ(test, dest, ""); /* Size 1 should keep buffer terminated, report size of source only. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); KUNIT_EXPECT_STREQ(test, dest, ""); /* 4 characters copied in. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); KUNIT_EXPECT_STREQ(test, dest, "four"); /* 2 characters copied in okay, gets to 6 total. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); KUNIT_EXPECT_STREQ(test, dest, "fourAB"); /* 2 characters ignored if max size (7) reached. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); KUNIT_EXPECT_STREQ(test, dest, "fourAB"); /* 1 of 2 characters skipped, now at true max size. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); KUNIT_EXPECT_STREQ(test, dest, "fourABE"); /* Everything else ignored, now at full size. */ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); KUNIT_EXPECT_STREQ(test, dest, "fourABE"); } static struct kunit_case strcat_test_cases[] = { KUNIT_CASE(strcat_test), KUNIT_CASE(strncat_test), KUNIT_CASE(strlcat_test), {} }; static struct kunit_suite strcat_test_suite = { .name = "strcat", .test_cases = strcat_test_cases, }; kunit_test_suite(strcat_test_suite); MODULE_LICENSE("GPL");
linux-master
lib/strcat_kunit.c
// SPDX-License-Identifier: GPL-2.0 /* * Extra Boot Config * Masami Hiramatsu <[email protected]> */ #ifdef __KERNEL__ #include <linux/bootconfig.h> #include <linux/bug.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/string.h> #ifdef CONFIG_BOOT_CONFIG_EMBED /* embedded_bootconfig_data is defined in bootconfig-data.S */ extern __visible const char embedded_bootconfig_data[]; extern __visible const char embedded_bootconfig_data_end[]; const char * __init xbc_get_embedded_bootconfig(size_t *size) { *size = embedded_bootconfig_data_end - embedded_bootconfig_data; return (*size) ? embedded_bootconfig_data : NULL; } #endif #else /* !__KERNEL__ */ /* * NOTE: This is only for tools/bootconfig, because tools/bootconfig will * run the parser sanity test. * This does NOT mean lib/bootconfig.c is available in the user space. * However, if you change this file, please make sure the tools/bootconfig * has no issue on building and running. */ #include <linux/bootconfig.h> #endif /* * Extra Boot Config (XBC) is given as tree-structured ascii text of * key-value pairs on memory. * xbc_parse() parses the text to build a simple tree. Each tree node is * simply a key word or a value. A key node may have a next key node or/and * a child node (both key and value). A value node may have a next value * node (for array). */ static struct xbc_node *xbc_nodes __initdata; static int xbc_node_num __initdata; static char *xbc_data __initdata; static size_t xbc_data_size __initdata; static struct xbc_node *last_parent __initdata; static const char *xbc_err_msg __initdata; static int xbc_err_pos __initdata; static int open_brace[XBC_DEPTH_MAX] __initdata; static int brace_index __initdata; #ifdef __KERNEL__ static inline void * __init xbc_alloc_mem(size_t size) { return memblock_alloc(size, SMP_CACHE_BYTES); } static inline void __init xbc_free_mem(void *addr, size_t size) { memblock_free(addr, size); } #else /* !__KERNEL__ */ static inline void *xbc_alloc_mem(size_t size) { return malloc(size); } static inline void xbc_free_mem(void *addr, size_t size) { free(addr); } #endif /** * xbc_get_info() - Get the information of loaded boot config * @node_size: A pointer to store the number of nodes. * @data_size: A pointer to store the size of bootconfig data. * * Get the number of used nodes in @node_size if it is not NULL, * and the size of bootconfig data in @data_size if it is not NULL. * Return 0 if the boot config is initialized, or return -ENODEV. */ int __init xbc_get_info(int *node_size, size_t *data_size) { if (!xbc_data) return -ENODEV; if (node_size) *node_size = xbc_node_num; if (data_size) *data_size = xbc_data_size; return 0; } static int __init xbc_parse_error(const char *msg, const char *p) { xbc_err_msg = msg; xbc_err_pos = (int)(p - xbc_data); return -EINVAL; } /** * xbc_root_node() - Get the root node of extended boot config * * Return the address of root node of extended boot config. If the * extended boot config is not initiized, return NULL. */ struct xbc_node * __init xbc_root_node(void) { if (unlikely(!xbc_data)) return NULL; return xbc_nodes; } /** * xbc_node_index() - Get the index of XBC node * @node: A target node of getting index. * * Return the index number of @node in XBC node list. */ int __init xbc_node_index(struct xbc_node *node) { return node - &xbc_nodes[0]; } /** * xbc_node_get_parent() - Get the parent XBC node * @node: An XBC node. * * Return the parent node of @node. If the node is top node of the tree, * return NULL. */ struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node) { return node->parent == XBC_NODE_MAX ? NULL : &xbc_nodes[node->parent]; } /** * xbc_node_get_child() - Get the child XBC node * @node: An XBC node. * * Return the first child node of @node. If the node has no child, return * NULL. */ struct xbc_node * __init xbc_node_get_child(struct xbc_node *node) { return node->child ? &xbc_nodes[node->child] : NULL; } /** * xbc_node_get_next() - Get the next sibling XBC node * @node: An XBC node. * * Return the NEXT sibling node of @node. If the node has no next sibling, * return NULL. Note that even if this returns NULL, it doesn't mean @node * has no siblings. (You also has to check whether the parent's child node * is @node or not.) */ struct xbc_node * __init xbc_node_get_next(struct xbc_node *node) { return node->next ? &xbc_nodes[node->next] : NULL; } /** * xbc_node_get_data() - Get the data of XBC node * @node: An XBC node. * * Return the data (which is always a null terminated string) of @node. * If the node has invalid data, warn and return NULL. */ const char * __init xbc_node_get_data(struct xbc_node *node) { int offset = node->data & ~XBC_VALUE; if (WARN_ON(offset >= xbc_data_size)) return NULL; return xbc_data + offset; } static bool __init xbc_node_match_prefix(struct xbc_node *node, const char **prefix) { const char *p = xbc_node_get_data(node); int len = strlen(p); if (strncmp(*prefix, p, len)) return false; p = *prefix + len; if (*p == '.') p++; else if (*p != '\0') return false; *prefix = p; return true; } /** * xbc_node_find_subkey() - Find a subkey node which matches given key * @parent: An XBC node. * @key: A key string. * * Search a key node under @parent which matches @key. The @key can contain * several words jointed with '.'. If @parent is NULL, this searches the * node from whole tree. Return NULL if no node is matched. */ struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent, const char *key) { struct xbc_node *node; if (parent) node = xbc_node_get_subkey(parent); else node = xbc_root_node(); while (node && xbc_node_is_key(node)) { if (!xbc_node_match_prefix(node, &key)) node = xbc_node_get_next(node); else if (*key != '\0') node = xbc_node_get_subkey(node); else break; } return node; } /** * xbc_node_find_value() - Find a value node which matches given key * @parent: An XBC node. * @key: A key string. * @vnode: A container pointer of found XBC node. * * Search a value node under @parent whose (parent) key node matches @key, * store it in *@vnode, and returns the value string. * The @key can contain several words jointed with '.'. If @parent is NULL, * this searches the node from whole tree. Return the value string if a * matched key found, return NULL if no node is matched. * Note that this returns 0-length string and stores NULL in *@vnode if the * key has no value. And also it will return the value of the first entry if * the value is an array. */ const char * __init xbc_node_find_value(struct xbc_node *parent, const char *key, struct xbc_node **vnode) { struct xbc_node *node = xbc_node_find_subkey(parent, key); if (!node || !xbc_node_is_key(node)) return NULL; node = xbc_node_get_child(node); if (node && !xbc_node_is_value(node)) return NULL; if (vnode) *vnode = node; return node ? xbc_node_get_data(node) : ""; } /** * xbc_node_compose_key_after() - Compose partial key string of the XBC node * @root: Root XBC node * @node: Target XBC node. * @buf: A buffer to store the key. * @size: The size of the @buf. * * Compose the partial key of the @node into @buf, which is starting right * after @root (@root is not included.) If @root is NULL, this returns full * key words of @node. * Returns the total length of the key stored in @buf. Returns -EINVAL * if @node is NULL or @root is not the ancestor of @node or @root is @node, * or returns -ERANGE if the key depth is deeper than max depth. * This is expected to be used with xbc_find_node() to list up all (child) * keys under given key. */ int __init xbc_node_compose_key_after(struct xbc_node *root, struct xbc_node *node, char *buf, size_t size) { uint16_t keys[XBC_DEPTH_MAX]; int depth = 0, ret = 0, total = 0; if (!node || node == root) return -EINVAL; if (xbc_node_is_value(node)) node = xbc_node_get_parent(node); while (node && node != root) { keys[depth++] = xbc_node_index(node); if (depth == XBC_DEPTH_MAX) return -ERANGE; node = xbc_node_get_parent(node); } if (!node && root) return -EINVAL; while (--depth >= 0) { node = xbc_nodes + keys[depth]; ret = snprintf(buf, size, "%s%s", xbc_node_get_data(node), depth ? "." : ""); if (ret < 0) return ret; if (ret > size) { size = 0; } else { size -= ret; buf += ret; } total += ret; } return total; } /** * xbc_node_find_next_leaf() - Find the next leaf node under given node * @root: An XBC root node * @node: An XBC node which starts from. * * Search the next leaf node (which means the terminal key node) of @node * under @root node (including @root node itself). * Return the next node or NULL if next leaf node is not found. */ struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root, struct xbc_node *node) { struct xbc_node *next; if (unlikely(!xbc_data)) return NULL; if (!node) { /* First try */ node = root; if (!node) node = xbc_nodes; } else { /* Leaf node may have a subkey */ next = xbc_node_get_subkey(node); if (next) { node = next; goto found; } if (node == root) /* @root was a leaf, no child node. */ return NULL; while (!node->next) { node = xbc_node_get_parent(node); if (node == root) return NULL; /* User passed a node which is not uder parent */ if (WARN_ON(!node)) return NULL; } node = xbc_node_get_next(node); } found: while (node && !xbc_node_is_leaf(node)) node = xbc_node_get_child(node); return node; } /** * xbc_node_find_next_key_value() - Find the next key-value pair nodes * @root: An XBC root node * @leaf: A container pointer of XBC node which starts from. * * Search the next leaf node (which means the terminal key node) of *@leaf * under @root node. Returns the value and update *@leaf if next leaf node * is found, or NULL if no next leaf node is found. * Note that this returns 0-length string if the key has no value, or * the value of the first entry if the value is an array. */ const char * __init xbc_node_find_next_key_value(struct xbc_node *root, struct xbc_node **leaf) { /* tip must be passed */ if (WARN_ON(!leaf)) return NULL; *leaf = xbc_node_find_next_leaf(root, *leaf); if (!*leaf) return NULL; if ((*leaf)->child) return xbc_node_get_data(xbc_node_get_child(*leaf)); else return ""; /* No value key */ } /* XBC parse and tree build */ static int __init xbc_init_node(struct xbc_node *node, char *data, uint32_t flag) { unsigned long offset = data - xbc_data; if (WARN_ON(offset >= XBC_DATA_MAX)) return -EINVAL; node->data = (uint16_t)offset | flag; node->child = 0; node->next = 0; return 0; } static struct xbc_node * __init xbc_add_node(char *data, uint32_t flag) { struct xbc_node *node; if (xbc_node_num == XBC_NODE_MAX) return NULL; node = &xbc_nodes[xbc_node_num++]; if (xbc_init_node(node, data, flag) < 0) return NULL; return node; } static inline __init struct xbc_node *xbc_last_sibling(struct xbc_node *node) { while (node->next) node = xbc_node_get_next(node); return node; } static inline __init struct xbc_node *xbc_last_child(struct xbc_node *node) { while (node->child) node = xbc_node_get_child(node); return node; } static struct xbc_node * __init __xbc_add_sibling(char *data, uint32_t flag, bool head) { struct xbc_node *sib, *node = xbc_add_node(data, flag); if (node) { if (!last_parent) { /* Ignore @head in this case */ node->parent = XBC_NODE_MAX; sib = xbc_last_sibling(xbc_nodes); sib->next = xbc_node_index(node); } else { node->parent = xbc_node_index(last_parent); if (!last_parent->child || head) { node->next = last_parent->child; last_parent->child = xbc_node_index(node); } else { sib = xbc_node_get_child(last_parent); sib = xbc_last_sibling(sib); sib->next = xbc_node_index(node); } } } else xbc_parse_error("Too many nodes", data); return node; } static inline struct xbc_node * __init xbc_add_sibling(char *data, uint32_t flag) { return __xbc_add_sibling(data, flag, false); } static inline struct xbc_node * __init xbc_add_head_sibling(char *data, uint32_t flag) { return __xbc_add_sibling(data, flag, true); } static inline __init struct xbc_node *xbc_add_child(char *data, uint32_t flag) { struct xbc_node *node = xbc_add_sibling(data, flag); if (node) last_parent = node; return node; } static inline __init bool xbc_valid_keyword(char *key) { if (key[0] == '\0') return false; while (isalnum(*key) || *key == '-' || *key == '_') key++; return *key == '\0'; } static char *skip_comment(char *p) { char *ret; ret = strchr(p, '\n'); if (!ret) ret = p + strlen(p); else ret++; return ret; } static char *skip_spaces_until_newline(char *p) { while (isspace(*p) && *p != '\n') p++; return p; } static int __init __xbc_open_brace(char *p) { /* Push the last key as open brace */ open_brace[brace_index++] = xbc_node_index(last_parent); if (brace_index >= XBC_DEPTH_MAX) return xbc_parse_error("Exceed max depth of braces", p); return 0; } static int __init __xbc_close_brace(char *p) { brace_index--; if (!last_parent || brace_index < 0 || (open_brace[brace_index] != xbc_node_index(last_parent))) return xbc_parse_error("Unexpected closing brace", p); if (brace_index == 0) last_parent = NULL; else last_parent = &xbc_nodes[open_brace[brace_index - 1]]; return 0; } /* * Return delimiter or error, no node added. As same as lib/cmdline.c, * you can use " around spaces, but can't escape " for value. */ static int __init __xbc_parse_value(char **__v, char **__n) { char *p, *v = *__v; int c, quotes = 0; v = skip_spaces(v); while (*v == '#') { v = skip_comment(v); v = skip_spaces(v); } if (*v == '"' || *v == '\'') { quotes = *v; v++; } p = v - 1; while ((c = *++p)) { if (!isprint(c) && !isspace(c)) return xbc_parse_error("Non printable value", p); if (quotes) { if (c != quotes) continue; quotes = 0; *p++ = '\0'; p = skip_spaces_until_newline(p); c = *p; if (c && !strchr(",;\n#}", c)) return xbc_parse_error("No value delimiter", p); if (*p) p++; break; } if (strchr(",;\n#}", c)) { *p++ = '\0'; v = strim(v); break; } } if (quotes) return xbc_parse_error("No closing quotes", p); if (c == '#') { p = skip_comment(p); c = '\n'; /* A comment must be treated as a newline */ } *__n = p; *__v = v; return c; } static int __init xbc_parse_array(char **__v) { struct xbc_node *node; char *next; int c = 0; if (last_parent->child) last_parent = xbc_node_get_child(last_parent); do { c = __xbc_parse_value(__v, &next); if (c < 0) return c; node = xbc_add_child(*__v, XBC_VALUE); if (!node) return -ENOMEM; *__v = next; } while (c == ','); node->child = 0; return c; } static inline __init struct xbc_node *find_match_node(struct xbc_node *node, char *k) { while (node) { if (!strcmp(xbc_node_get_data(node), k)) break; node = xbc_node_get_next(node); } return node; } static int __init __xbc_add_key(char *k) { struct xbc_node *node, *child; if (!xbc_valid_keyword(k)) return xbc_parse_error("Invalid keyword", k); if (unlikely(xbc_node_num == 0)) goto add_node; if (!last_parent) /* the first level */ node = find_match_node(xbc_nodes, k); else { child = xbc_node_get_child(last_parent); /* Since the value node is the first child, skip it. */ if (child && xbc_node_is_value(child)) child = xbc_node_get_next(child); node = find_match_node(child, k); } if (node) last_parent = node; else { add_node: node = xbc_add_child(k, XBC_KEY); if (!node) return -ENOMEM; } return 0; } static int __init __xbc_parse_keys(char *k) { char *p; int ret; k = strim(k); while ((p = strchr(k, '.'))) { *p++ = '\0'; ret = __xbc_add_key(k); if (ret) return ret; k = p; } return __xbc_add_key(k); } static int __init xbc_parse_kv(char **k, char *v, int op) { struct xbc_node *prev_parent = last_parent; struct xbc_node *child; char *next; int c, ret; ret = __xbc_parse_keys(*k); if (ret) return ret; c = __xbc_parse_value(&v, &next); if (c < 0) return c; child = xbc_node_get_child(last_parent); if (child && xbc_node_is_value(child)) { if (op == '=') return xbc_parse_error("Value is redefined", v); if (op == ':') { unsigned short nidx = child->next; xbc_init_node(child, v, XBC_VALUE); child->next = nidx; /* keep subkeys */ goto array; } /* op must be '+' */ last_parent = xbc_last_child(child); } /* The value node should always be the first child */ if (!xbc_add_head_sibling(v, XBC_VALUE)) return -ENOMEM; array: if (c == ',') { /* Array */ c = xbc_parse_array(&next); if (c < 0) return c; } last_parent = prev_parent; if (c == '}') { ret = __xbc_close_brace(next - 1); if (ret < 0) return ret; } *k = next; return 0; } static int __init xbc_parse_key(char **k, char *n) { struct xbc_node *prev_parent = last_parent; int ret; *k = strim(*k); if (**k != '\0') { ret = __xbc_parse_keys(*k); if (ret) return ret; last_parent = prev_parent; } *k = n; return 0; } static int __init xbc_open_brace(char **k, char *n) { int ret; ret = __xbc_parse_keys(*k); if (ret) return ret; *k = n; return __xbc_open_brace(n - 1); } static int __init xbc_close_brace(char **k, char *n) { int ret; ret = xbc_parse_key(k, n); if (ret) return ret; /* k is updated in xbc_parse_key() */ return __xbc_close_brace(n - 1); } static int __init xbc_verify_tree(void) { int i, depth, len, wlen; struct xbc_node *n, *m; /* Brace closing */ if (brace_index) { n = &xbc_nodes[open_brace[brace_index]]; return xbc_parse_error("Brace is not closed", xbc_node_get_data(n)); } /* Empty tree */ if (xbc_node_num == 0) { xbc_parse_error("Empty config", xbc_data); return -ENOENT; } for (i = 0; i < xbc_node_num; i++) { if (xbc_nodes[i].next > xbc_node_num) { return xbc_parse_error("No closing brace", xbc_node_get_data(xbc_nodes + i)); } } /* Key tree limitation check */ n = &xbc_nodes[0]; depth = 1; len = 0; while (n) { wlen = strlen(xbc_node_get_data(n)) + 1; len += wlen; if (len > XBC_KEYLEN_MAX) return xbc_parse_error("Too long key length", xbc_node_get_data(n)); m = xbc_node_get_child(n); if (m && xbc_node_is_key(m)) { n = m; depth++; if (depth > XBC_DEPTH_MAX) return xbc_parse_error("Too many key words", xbc_node_get_data(n)); continue; } len -= wlen; m = xbc_node_get_next(n); while (!m) { n = xbc_node_get_parent(n); if (!n) break; len -= strlen(xbc_node_get_data(n)) + 1; depth--; m = xbc_node_get_next(n); } n = m; } return 0; } /* Need to setup xbc_data and xbc_nodes before call this. */ static int __init xbc_parse_tree(void) { char *p, *q; int ret = 0, c; last_parent = NULL; p = xbc_data; do { q = strpbrk(p, "{}=+;:\n#"); if (!q) { p = skip_spaces(p); if (*p != '\0') ret = xbc_parse_error("No delimiter", p); break; } c = *q; *q++ = '\0'; switch (c) { case ':': case '+': if (*q++ != '=') { ret = xbc_parse_error(c == '+' ? "Wrong '+' operator" : "Wrong ':' operator", q - 2); break; } fallthrough; case '=': ret = xbc_parse_kv(&p, q, c); break; case '{': ret = xbc_open_brace(&p, q); break; case '#': q = skip_comment(q); fallthrough; case ';': case '\n': ret = xbc_parse_key(&p, q); break; case '}': ret = xbc_close_brace(&p, q); break; } } while (!ret); return ret; } /** * xbc_exit() - Clean up all parsed bootconfig * * This clears all data structures of parsed bootconfig on memory. * If you need to reuse xbc_init() with new boot config, you can * use this. */ void __init xbc_exit(void) { xbc_free_mem(xbc_data, xbc_data_size); xbc_data = NULL; xbc_data_size = 0; xbc_node_num = 0; xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX); xbc_nodes = NULL; brace_index = 0; } /** * xbc_init() - Parse given XBC file and build XBC internal tree * @data: The boot config text original data * @size: The size of @data * @emsg: A pointer of const char * to store the error message * @epos: A pointer of int to store the error position * * This parses the boot config text in @data. @size must be smaller * than XBC_DATA_MAX. * Return the number of stored nodes (>0) if succeeded, or -errno * if there is any error. * In error cases, @emsg will be updated with an error message and * @epos will be updated with the error position which is the byte offset * of @buf. If the error is not a parser error, @epos will be -1. */ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos) { int ret; if (epos) *epos = -1; if (xbc_data) { if (emsg) *emsg = "Bootconfig is already initialized"; return -EBUSY; } if (size > XBC_DATA_MAX || size == 0) { if (emsg) *emsg = size ? "Config data is too big" : "Config data is empty"; return -ERANGE; } xbc_data = xbc_alloc_mem(size + 1); if (!xbc_data) { if (emsg) *emsg = "Failed to allocate bootconfig data"; return -ENOMEM; } memcpy(xbc_data, data, size); xbc_data[size] = '\0'; xbc_data_size = size + 1; xbc_nodes = xbc_alloc_mem(sizeof(struct xbc_node) * XBC_NODE_MAX); if (!xbc_nodes) { if (emsg) *emsg = "Failed to allocate bootconfig nodes"; xbc_exit(); return -ENOMEM; } memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX); ret = xbc_parse_tree(); if (!ret) ret = xbc_verify_tree(); if (ret < 0) { if (epos) *epos = xbc_err_pos; if (emsg) *emsg = xbc_err_msg; xbc_exit(); } else ret = xbc_node_num; return ret; }
linux-master
lib/bootconfig.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/module.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/sizes.h> #include <linux/io.h> #include <asm/page.h> #ifdef CONFIG_MIPS #include <asm/bootinfo.h> #endif struct foo { unsigned int bar; }; static struct foo *foo; static int __init test_debug_virtual_init(void) { phys_addr_t pa; void *va; va = (void *)VMALLOC_START; pa = virt_to_phys(va); pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); foo = kzalloc(sizeof(*foo), GFP_KERNEL); if (!foo) return -ENOMEM; pa = virt_to_phys(foo); va = foo; pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); return 0; } module_init(test_debug_virtual_init); static void __exit test_debug_virtual_exit(void) { kfree(foo); } module_exit(test_debug_virtual_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Test module for CONFIG_DEBUG_VIRTUAL");
linux-master
lib/test_debug_virtual.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/module.h> #include <linux/memory.h> #include "notifier-error-inject.h" static int priority; module_param(priority, int, 0); MODULE_PARM_DESC(priority, "specify memory notifier priority"); static struct notifier_err_inject memory_notifier_err_inject = { .actions = { { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_ONLINE) }, { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_OFFLINE) }, {} } }; static struct dentry *dir; static int err_inject_init(void) { int err; dir = notifier_err_inject_init("memory", notifier_err_inject_dir, &memory_notifier_err_inject, priority); if (IS_ERR(dir)) return PTR_ERR(dir); err = register_memory_notifier(&memory_notifier_err_inject.nb); if (err) debugfs_remove_recursive(dir); return err; } static void err_inject_exit(void) { unregister_memory_notifier(&memory_notifier_err_inject.nb); debugfs_remove_recursive(dir); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_DESCRIPTION("memory notifier error injection module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akinobu Mita <[email protected]>");
linux-master
lib/memory-notifier-error-inject.c
// SPDX-License-Identifier: GPL-2.0 /* * KUnit test for the linear_ranges helper. * * Copyright (C) 2020, ROHM Semiconductors. * Author: Matti Vaittinen <[email protected]> */ #include <kunit/test.h> #include <linux/linear_range.h> /* First things first. I deeply dislike unit-tests. I have seen all the hell * breaking loose when people who think the unit tests are "the silver bullet" * to kill bugs get to decide how a company should implement testing strategy... * * Believe me, it may get _really_ ridiculous. It is tempting to think that * walking through all the possible execution branches will nail down 100% of * bugs. This may lead to ideas about demands to get certain % of "test * coverage" - measured as line coverage. And that is one of the worst things * you can do. * * Ask people to provide line coverage and they do. I've seen clever tools * which generate test cases to test the existing functions - and by default * these tools expect code to be correct and just generate checks which are * passing when ran against current code-base. Run this generator and you'll get * tests that do not test code is correct but just verify nothing changes. * Problem is that testing working code is pointless. And if it is not * working, your test must not assume it is working. You won't catch any bugs * by such tests. What you can do is to generate a huge amount of tests. * Especially if you were are asked to proivde 100% line-coverage x_x. So what * does these tests - which are not finding any bugs now - do? * * They add inertia to every future development. I think it was Terry Pratchet * who wrote someone having same impact as thick syrup has to chronometre. * Excessive amount of unit-tests have this effect to development. If you do * actually find _any_ bug from code in such environment and try fixing it... * ...chances are you also need to fix the test cases. In sunny day you fix one * test. But I've done refactoring which resulted 500+ broken tests (which had * really zero value other than proving to managers that we do do "quality")... * * After this being said - there are situations where UTs can be handy. If you * have algorithms which take some input and should produce output - then you * can implement few, carefully selected simple UT-cases which test this. I've * previously used this for example for netlink and device-tree data parsing * functions. Feed some data examples to functions and verify the output is as * expected. I am not covering all the cases but I will see the logic should be * working. * * Here we also do some minor testing. I don't want to go through all branches * or test more or less obvious things - but I want to see the main logic is * working. And I definitely don't want to add 500+ test cases that break when * some simple fix is done x_x. So - let's only add few, well selected tests * which ensure as much logic is good as possible. */ /* * Test Range 1: * selectors: 2 3 4 5 6 * values (5): 10 20 30 40 50 * * Test Range 2: * selectors: 7 8 9 10 * values (4): 100 150 200 250 */ #define RANGE1_MIN 10 #define RANGE1_MIN_SEL 2 #define RANGE1_STEP 10 /* 2, 3, 4, 5, 6 */ static const unsigned int range1_sels[] = { RANGE1_MIN_SEL, RANGE1_MIN_SEL + 1, RANGE1_MIN_SEL + 2, RANGE1_MIN_SEL + 3, RANGE1_MIN_SEL + 4 }; /* 10, 20, 30, 40, 50 */ static const unsigned int range1_vals[] = { RANGE1_MIN, RANGE1_MIN + RANGE1_STEP, RANGE1_MIN + RANGE1_STEP * 2, RANGE1_MIN + RANGE1_STEP * 3, RANGE1_MIN + RANGE1_STEP * 4 }; #define RANGE2_MIN 100 #define RANGE2_MIN_SEL 7 #define RANGE2_STEP 50 /* 7, 8, 9, 10 */ static const unsigned int range2_sels[] = { RANGE2_MIN_SEL, RANGE2_MIN_SEL + 1, RANGE2_MIN_SEL + 2, RANGE2_MIN_SEL + 3 }; /* 100, 150, 200, 250 */ static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN + RANGE2_STEP, RANGE2_MIN + RANGE2_STEP * 2, RANGE2_MIN + RANGE2_STEP * 3 }; #define RANGE1_NUM_VALS (ARRAY_SIZE(range1_vals)) #define RANGE2_NUM_VALS (ARRAY_SIZE(range2_vals)) #define RANGE_NUM_VALS (RANGE1_NUM_VALS + RANGE2_NUM_VALS) #define RANGE1_MAX_SEL (RANGE1_MIN_SEL + RANGE1_NUM_VALS - 1) #define RANGE1_MAX_VAL (range1_vals[RANGE1_NUM_VALS - 1]) #define RANGE2_MAX_SEL (RANGE2_MIN_SEL + RANGE2_NUM_VALS - 1) #define RANGE2_MAX_VAL (range2_vals[RANGE2_NUM_VALS - 1]) #define SMALLEST_SEL RANGE1_MIN_SEL #define SMALLEST_VAL RANGE1_MIN static struct linear_range testr[] = { LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP), LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP), }; static void range_test_get_value(struct kunit *test) { int ret, i; unsigned int sel, val; for (i = 0; i < RANGE1_NUM_VALS; i++) { sel = range1_sels[i]; ret = linear_range_get_value_array(&testr[0], 2, sel, &val); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, val, range1_vals[i]); } for (i = 0; i < RANGE2_NUM_VALS; i++) { sel = range2_sels[i]; ret = linear_range_get_value_array(&testr[0], 2, sel, &val); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, val, range2_vals[i]); } ret = linear_range_get_value_array(&testr[0], 2, sel + 1, &val); KUNIT_EXPECT_NE(test, 0, ret); } static void range_test_get_selector_high(struct kunit *test) { int ret, i; unsigned int sel; bool found; for (i = 0; i < RANGE1_NUM_VALS; i++) { ret = linear_range_get_selector_high(&testr[0], range1_vals[i], &sel, &found); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, sel, range1_sels[i]); KUNIT_EXPECT_TRUE(test, found); } ret = linear_range_get_selector_high(&testr[0], RANGE1_MAX_VAL + 1, &sel, &found); KUNIT_EXPECT_LE(test, ret, 0); ret = linear_range_get_selector_high(&testr[0], RANGE1_MIN - 1, &sel, &found); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_FALSE(test, found); KUNIT_EXPECT_EQ(test, sel, range1_sels[0]); } static void range_test_get_value_amount(struct kunit *test) { int ret; ret = linear_range_values_in_range_array(&testr[0], 2); KUNIT_EXPECT_EQ(test, (int)RANGE_NUM_VALS, ret); } static void range_test_get_selector_low(struct kunit *test) { int i, ret; unsigned int sel; bool found; for (i = 0; i < RANGE1_NUM_VALS; i++) { ret = linear_range_get_selector_low_array(&testr[0], 2, range1_vals[i], &sel, &found); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, sel, range1_sels[i]); KUNIT_EXPECT_TRUE(test, found); } for (i = 0; i < RANGE2_NUM_VALS; i++) { ret = linear_range_get_selector_low_array(&testr[0], 2, range2_vals[i], &sel, &found); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, sel, range2_sels[i]); KUNIT_EXPECT_TRUE(test, found); } /* * Seek value greater than range max => get_selector_*_low should * return Ok - but set found to false as value is not in range */ ret = linear_range_get_selector_low_array(&testr[0], 2, range2_vals[RANGE2_NUM_VALS - 1] + 1, &sel, &found); KUNIT_EXPECT_EQ(test, 0, ret); KUNIT_EXPECT_EQ(test, sel, range2_sels[RANGE2_NUM_VALS - 1]); KUNIT_EXPECT_FALSE(test, found); } static struct kunit_case range_test_cases[] = { KUNIT_CASE(range_test_get_value_amount), KUNIT_CASE(range_test_get_selector_high), KUNIT_CASE(range_test_get_selector_low), KUNIT_CASE(range_test_get_value), {}, }; static struct kunit_suite range_test_module = { .name = "linear-ranges-test", .test_cases = range_test_cases, }; kunit_test_suites(&range_test_module); MODULE_LICENSE("GPL");
linux-master
lib/test_linear_ranges.c
// SPDX-License-Identifier: GPL-2.0-or-later /* */ #include <linux/export.h> #include <linux/libgcc.h> long long notrace __ashldi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { w.s.low = 0; w.s.high = (unsigned int) uu.s.low << -bm; } else { const unsigned int carries = (unsigned int) uu.s.low >> bm; w.s.low = (unsigned int) uu.s.low << b; w.s.high = ((unsigned int) uu.s.high << b) | carries; } return w.ll; } EXPORT_SYMBOL(__ashldi3);
linux-master
lib/ashldi3.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/string.h> #include <linux/if_ether.h> #include <linux/ctype.h> #include <linux/export.h> #include <linux/hex.h> bool mac_pton(const char *s, u8 *mac) { size_t maxlen = 3 * ETH_ALEN - 1; int i; /* XX:XX:XX:XX:XX:XX */ if (strnlen(s, maxlen) < maxlen) return false; /* Don't dirty result unless string is valid MAC. */ for (i = 0; i < ETH_ALEN; i++) { if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1])) return false; if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') return false; } for (i = 0; i < ETH_ALEN; i++) { mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); } return true; } EXPORT_SYMBOL(mac_pton);
linux-master
lib/net_utils.c
// SPDX-License-Identifier: GPL-2.0-only /* identifiers for device / performance-differentiated memory regions */ #include <linux/idr.h> #include <linux/types.h> #include <linux/memregion.h> static DEFINE_IDA(memregion_ids); int memregion_alloc(gfp_t gfp) { return ida_alloc(&memregion_ids, gfp); } EXPORT_SYMBOL(memregion_alloc); void memregion_free(int id) { ida_free(&memregion_ids, id); } EXPORT_SYMBOL(memregion_free);
linux-master
lib/memregion.c
// SPDX-License-Identifier: GPL-2.0 /* * base64.c - RFC4648-compliant base64 encoding * * Copyright (c) 2020 Hannes Reinecke, SUSE * * Based on the base64url routines from fs/crypto/fname.c * (which are using the URL-safe base64 encoding), * modified to use the standard coding table from RFC4648 section 4. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/export.h> #include <linux/string.h> #include <linux/base64.h> static const char base64_table[65] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; /** * base64_encode() - base64-encode some binary data * @src: the binary data to encode * @srclen: the length of @src in bytes * @dst: (output) the base64-encoded string. Not NUL-terminated. * * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified * by RFC 4648, including the '='-padding. * * Return: the length of the resulting base64-encoded string in bytes. */ int base64_encode(const u8 *src, int srclen, char *dst) { u32 ac = 0; int bits = 0; int i; char *cp = dst; for (i = 0; i < srclen; i++) { ac = (ac << 8) | src[i]; bits += 8; do { bits -= 6; *cp++ = base64_table[(ac >> bits) & 0x3f]; } while (bits >= 6); } if (bits) { *cp++ = base64_table[(ac << (6 - bits)) & 0x3f]; bits -= 6; } while (bits < 0) { *cp++ = '='; bits += 2; } return cp - dst; } EXPORT_SYMBOL_GPL(base64_encode); /** * base64_decode() - base64-decode a string * @src: the string to decode. Doesn't need to be NUL-terminated. * @srclen: the length of @src in bytes * @dst: (output) the decoded binary data * * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding" * specified by RFC 4648, including the '='-padding. * * This implementation hasn't been optimized for performance. * * Return: the length of the resulting decoded binary data in bytes, * or -1 if the string isn't a valid base64 string. */ int base64_decode(const char *src, int srclen, u8 *dst) { u32 ac = 0; int bits = 0; int i; u8 *bp = dst; for (i = 0; i < srclen; i++) { const char *p = strchr(base64_table, src[i]); if (src[i] == '=') { ac = (ac << 6); bits += 6; if (bits >= 8) bits -= 8; continue; } if (p == NULL || src[i] == 0) return -1; ac = (ac << 6) | (p - base64_table); bits += 6; if (bits >= 8) { bits -= 8; *bp++ = (u8)(ac >> bits); } } if (ac & ((1 << bits) - 1)) return -1; return bp - dst; } EXPORT_SYMBOL_GPL(base64_decode);
linux-master
lib/base64.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* **************************************************************************** * * "DHRYSTONE" Benchmark Program * ----------------------------- * * Version: C, Version 2.1 * * File: dhry_1.c (part 2 of 3) * * Date: May 25, 1988 * * Author: Reinhold P. Weicker * **************************************************************************** */ #include "dhry.h" #include <linux/ktime.h> #include <linux/slab.h> #include <linux/string.h> /* Global Variables: */ int Int_Glob; char Ch_1_Glob; static Rec_Pointer Ptr_Glob, Next_Ptr_Glob; static Boolean Bool_Glob; static char Ch_2_Glob; static int Arr_1_Glob[50]; static int Arr_2_Glob[50][50]; static void Proc_3(Rec_Pointer *Ptr_Ref_Par) /******************/ /* executed once */ /* Ptr_Ref_Par becomes Ptr_Glob */ { if (Ptr_Glob) { /* then, executed */ *Ptr_Ref_Par = Ptr_Glob->Ptr_Comp; } Proc_7(10, Int_Glob, &Ptr_Glob->variant.var_1.Int_Comp); } /* Proc_3 */ static void Proc_1(Rec_Pointer Ptr_Val_Par) /******************/ /* executed once */ { Rec_Pointer Next_Record = Ptr_Val_Par->Ptr_Comp; /* == Ptr_Glob_Next */ /* Local variable, initialized with Ptr_Val_Par->Ptr_Comp, */ /* corresponds to "rename" in Ada, "with" in Pascal */ *Ptr_Val_Par->Ptr_Comp = *Ptr_Glob; Ptr_Val_Par->variant.var_1.Int_Comp = 5; Next_Record->variant.var_1.Int_Comp = Ptr_Val_Par->variant.var_1.Int_Comp; Next_Record->Ptr_Comp = Ptr_Val_Par->Ptr_Comp; Proc_3(&Next_Record->Ptr_Comp); /* Ptr_Val_Par->Ptr_Comp->Ptr_Comp == Ptr_Glob->Ptr_Comp */ if (Next_Record->Discr == Ident_1) { /* then, executed */ Next_Record->variant.var_1.Int_Comp = 6; Proc_6(Ptr_Val_Par->variant.var_1.Enum_Comp, &Next_Record->variant.var_1.Enum_Comp); Next_Record->Ptr_Comp = Ptr_Glob->Ptr_Comp; Proc_7(Next_Record->variant.var_1.Int_Comp, 10, &Next_Record->variant.var_1.Int_Comp); } else { /* not executed */ *Ptr_Val_Par = *Ptr_Val_Par->Ptr_Comp; } } /* Proc_1 */ static void Proc_2(One_Fifty *Int_Par_Ref) /******************/ /* executed once */ /* *Int_Par_Ref == 1, becomes 4 */ { One_Fifty Int_Loc; Enumeration Enum_Loc; Int_Loc = *Int_Par_Ref + 10; do { /* executed once */ if (Ch_1_Glob == 'A') { /* then, executed */ Int_Loc -= 1; *Int_Par_Ref = Int_Loc - Int_Glob; Enum_Loc = Ident_1; } /* if */ } while (Enum_Loc != Ident_1); /* true */ } /* Proc_2 */ static void Proc_4(void) /*******/ /* executed once */ { Boolean Bool_Loc; Bool_Loc = Ch_1_Glob == 'A'; Bool_Glob = Bool_Loc | Bool_Glob; Ch_2_Glob = 'B'; } /* Proc_4 */ static void Proc_5(void) /*******/ /* executed once */ { Ch_1_Glob = 'A'; Bool_Glob = false; } /* Proc_5 */ int dhry(int n) /*****/ /* main program, corresponds to procedures */ /* Main and Proc_0 in the Ada version */ { One_Fifty Int_1_Loc; One_Fifty Int_2_Loc; One_Fifty Int_3_Loc; char Ch_Index; Enumeration Enum_Loc; Str_30 Str_1_Loc; Str_30 Str_2_Loc; int Run_Index; int Number_Of_Runs; ktime_t Begin_Time, End_Time; u32 User_Time; /* Initializations */ Next_Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC); if (!Next_Ptr_Glob) return -ENOMEM; Ptr_Glob = (Rec_Pointer)kzalloc(sizeof(Rec_Type), GFP_ATOMIC); if (!Ptr_Glob) { kfree(Next_Ptr_Glob); return -ENOMEM; } Ptr_Glob->Ptr_Comp = Next_Ptr_Glob; Ptr_Glob->Discr = Ident_1; Ptr_Glob->variant.var_1.Enum_Comp = Ident_3; Ptr_Glob->variant.var_1.Int_Comp = 40; strcpy(Ptr_Glob->variant.var_1.Str_Comp, "DHRYSTONE PROGRAM, SOME STRING"); strcpy(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING"); Arr_2_Glob[8][7] = 10; /* Was missing in published program. Without this statement, */ /* Arr_2_Glob[8][7] would have an undefined value. */ /* Warning: With 16-Bit processors and Number_Of_Runs > 32000, */ /* overflow may occur for this array element. */ pr_debug("Dhrystone Benchmark, Version 2.1 (Language: C)\n"); Number_Of_Runs = n; pr_debug("Execution starts, %d runs through Dhrystone\n", Number_Of_Runs); /***************/ /* Start timer */ /***************/ Begin_Time = ktime_get(); for (Run_Index = 1; Run_Index <= Number_Of_Runs; ++Run_Index) { Proc_5(); Proc_4(); /* Ch_1_Glob == 'A', Ch_2_Glob == 'B', Bool_Glob == true */ Int_1_Loc = 2; Int_2_Loc = 3; strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING"); Enum_Loc = Ident_2; Bool_Glob = !Func_2(Str_1_Loc, Str_2_Loc); /* Bool_Glob == 1 */ while (Int_1_Loc < Int_2_Loc) { /* loop body executed once */ Int_3_Loc = 5 * Int_1_Loc - Int_2_Loc; /* Int_3_Loc == 7 */ Proc_7(Int_1_Loc, Int_2_Loc, &Int_3_Loc); /* Int_3_Loc == 7 */ Int_1_Loc += 1; } /* while */ /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */ Proc_8(Arr_1_Glob, Arr_2_Glob, Int_1_Loc, Int_3_Loc); /* Int_Glob == 5 */ Proc_1(Ptr_Glob); for (Ch_Index = 'A'; Ch_Index <= Ch_2_Glob; ++Ch_Index) { /* loop body executed twice */ if (Enum_Loc == Func_1(Ch_Index, 'C')) { /* then, not executed */ Proc_6(Ident_1, &Enum_Loc); strcpy(Str_2_Loc, "DHRYSTONE PROGRAM, 3'RD STRING"); Int_2_Loc = Run_Index; Int_Glob = Run_Index; } } /* Int_1_Loc == 3, Int_2_Loc == 3, Int_3_Loc == 7 */ Int_2_Loc = Int_2_Loc * Int_1_Loc; Int_1_Loc = Int_2_Loc / Int_3_Loc; Int_2_Loc = 7 * (Int_2_Loc - Int_3_Loc) - Int_1_Loc; /* Int_1_Loc == 1, Int_2_Loc == 13, Int_3_Loc == 7 */ Proc_2(&Int_1_Loc); /* Int_1_Loc == 5 */ } /* loop "for Run_Index" */ /**************/ /* Stop timer */ /**************/ End_Time = ktime_get(); #define dhry_assert_int_eq(val, expected) \ if (val != expected) \ pr_err("%s: %d (FAIL, expected %d)\n", #val, val, \ expected); \ else \ pr_debug("%s: %d (OK)\n", #val, val) #define dhry_assert_char_eq(val, expected) \ if (val != expected) \ pr_err("%s: %c (FAIL, expected %c)\n", #val, val, \ expected); \ else \ pr_debug("%s: %c (OK)\n", #val, val) #define dhry_assert_string_eq(val, expected) \ if (strcmp(val, expected)) \ pr_err("%s: %s (FAIL, expected %s)\n", #val, val, \ expected); \ else \ pr_debug("%s: %s (OK)\n", #val, val) pr_debug("Execution ends\n"); pr_debug("Final values of the variables used in the benchmark:\n"); dhry_assert_int_eq(Int_Glob, 5); dhry_assert_int_eq(Bool_Glob, 1); dhry_assert_char_eq(Ch_1_Glob, 'A'); dhry_assert_char_eq(Ch_2_Glob, 'B'); dhry_assert_int_eq(Arr_1_Glob[8], 7); dhry_assert_int_eq(Arr_2_Glob[8][7], Number_Of_Runs + 10); pr_debug("Ptr_Comp: %px\n", Ptr_Glob->Ptr_Comp); dhry_assert_int_eq(Ptr_Glob->Discr, 0); dhry_assert_int_eq(Ptr_Glob->variant.var_1.Enum_Comp, 2); dhry_assert_int_eq(Ptr_Glob->variant.var_1.Int_Comp, 17); dhry_assert_string_eq(Ptr_Glob->variant.var_1.Str_Comp, "DHRYSTONE PROGRAM, SOME STRING"); if (Next_Ptr_Glob->Ptr_Comp != Ptr_Glob->Ptr_Comp) pr_err("Next_Ptr_Glob->Ptr_Comp: %px (expected %px)\n", Next_Ptr_Glob->Ptr_Comp, Ptr_Glob->Ptr_Comp); else pr_debug("Next_Ptr_Glob->Ptr_Comp: %px\n", Next_Ptr_Glob->Ptr_Comp); dhry_assert_int_eq(Next_Ptr_Glob->Discr, 0); dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Enum_Comp, 1); dhry_assert_int_eq(Next_Ptr_Glob->variant.var_1.Int_Comp, 18); dhry_assert_string_eq(Next_Ptr_Glob->variant.var_1.Str_Comp, "DHRYSTONE PROGRAM, SOME STRING"); dhry_assert_int_eq(Int_1_Loc, 5); dhry_assert_int_eq(Int_2_Loc, 13); dhry_assert_int_eq(Int_3_Loc, 7); dhry_assert_int_eq(Enum_Loc, 1); dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING"); dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING"); User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time)); kfree(Ptr_Glob); kfree(Next_Ptr_Glob); /* Measurements should last at least 2 seconds */ if (User_Time < 2 * MSEC_PER_SEC) return -EAGAIN; return div_u64(mul_u32_u32(MSEC_PER_SEC, Number_Of_Runs), User_Time); }
linux-master
lib/dhry_1.c
// SPDX-License-Identifier: GPL-2.0-only /* * Unified UUID/GUID definition * * Copyright (C) 2009, 2016 Intel Corp. * Huang Ying <[email protected]> */ #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/uuid.h> #include <linux/random.h> const guid_t guid_null; EXPORT_SYMBOL(guid_null); const uuid_t uuid_null; EXPORT_SYMBOL(uuid_null); const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; /** * generate_random_uuid - generate a random UUID * @uuid: where to put the generated UUID * * Random UUID interface * * Used to create a Boot ID or a filesystem UUID/GUID, but can be * useful for other kernel drivers. */ void generate_random_uuid(unsigned char uuid[16]) { get_random_bytes(uuid, 16); /* Set UUID version to 4 --- truly random generation */ uuid[6] = (uuid[6] & 0x0F) | 0x40; /* Set the UUID variant to DCE */ uuid[8] = (uuid[8] & 0x3F) | 0x80; } EXPORT_SYMBOL(generate_random_uuid); void generate_random_guid(unsigned char guid[16]) { get_random_bytes(guid, 16); /* Set GUID version to 4 --- truly random generation */ guid[7] = (guid[7] & 0x0F) | 0x40; /* Set the GUID variant to DCE */ guid[8] = (guid[8] & 0x3F) | 0x80; } EXPORT_SYMBOL(generate_random_guid); static void __uuid_gen_common(__u8 b[16]) { get_random_bytes(b, 16); /* reversion 0b10 */ b[8] = (b[8] & 0x3F) | 0x80; } void guid_gen(guid_t *lu) { __uuid_gen_common(lu->b); /* version 4 : random generation */ lu->b[7] = (lu->b[7] & 0x0F) | 0x40; } EXPORT_SYMBOL_GPL(guid_gen); void uuid_gen(uuid_t *bu) { __uuid_gen_common(bu->b); /* version 4 : random generation */ bu->b[6] = (bu->b[6] & 0x0F) | 0x40; } EXPORT_SYMBOL_GPL(uuid_gen); /** * uuid_is_valid - checks if a UUID string is valid * @uuid: UUID string to check * * Description: * It checks if the UUID string is following the format: * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx * * where x is a hex digit. * * Return: true if input is valid UUID string. */ bool uuid_is_valid(const char *uuid) { unsigned int i; for (i = 0; i < UUID_STRING_LEN; i++) { if (i == 8 || i == 13 || i == 18 || i == 23) { if (uuid[i] != '-') return false; } else if (!isxdigit(uuid[i])) { return false; } } return true; } EXPORT_SYMBOL(uuid_is_valid); static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16]) { static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34}; unsigned int i; if (!uuid_is_valid(uuid)) return -EINVAL; for (i = 0; i < 16; i++) { int hi = hex_to_bin(uuid[si[i] + 0]); int lo = hex_to_bin(uuid[si[i] + 1]); b[ei[i]] = (hi << 4) | lo; } return 0; } int guid_parse(const char *uuid, guid_t *u) { return __uuid_parse(uuid, u->b, guid_index); } EXPORT_SYMBOL(guid_parse); int uuid_parse(const char *uuid, uuid_t *u) { return __uuid_parse(uuid, u->b, uuid_index); } EXPORT_SYMBOL(uuid_parse);
linux-master
lib/uuid.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic polynomial calculation using integer coefficients. * * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC * * Authors: * Maxim Kaurkin <[email protected]> * Serge Semin <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/polynomial.h> /* * Originally this was part of drivers/hwmon/bt1-pvt.c. * There the following conversion is used and should serve as an example here: * * The original translation formulae of the temperature (in degrees of Celsius) * to PVT data and vice-versa are following: * * N = 1.8322e-8*(T^4) + 2.343e-5*(T^3) + 8.7018e-3*(T^2) + 3.9269*(T^1) + * 1.7204e2 * T = -1.6743e-11*(N^4) + 8.1542e-8*(N^3) + -1.8201e-4*(N^2) + * 3.1020e-1*(N^1) - 4.838e1 * * where T = [-48.380, 147.438]C and N = [0, 1023]. * * They must be accordingly altered to be suitable for the integer arithmetics. * The technique is called 'factor redistribution', which just makes sure the * multiplications and divisions are made so to have a result of the operations * within the integer numbers limit. In addition we need to translate the * formulae to accept millidegrees of Celsius. Here what they look like after * the alterations: * * N = (18322e-20*(T^4) + 2343e-13*(T^3) + 87018e-9*(T^2) + 39269e-3*T + * 17204e2) / 1e4 * T = -16743e-12*(D^4) + 81542e-9*(D^3) - 182010e-6*(D^2) + 310200e-3*D - * 48380 * where T = [-48380, 147438] mC and N = [0, 1023]. * * static const struct polynomial poly_temp_to_N = { * .total_divider = 10000, * .terms = { * {4, 18322, 10000, 10000}, * {3, 2343, 10000, 10}, * {2, 87018, 10000, 10}, * {1, 39269, 1000, 1}, * {0, 1720400, 1, 1} * } * }; * * static const struct polynomial poly_N_to_temp = { * .total_divider = 1, * .terms = { * {4, -16743, 1000, 1}, * {3, 81542, 1000, 1}, * {2, -182010, 1000, 1}, * {1, 310200, 1000, 1}, * {0, -48380, 1, 1} * } * }; */ /** * polynomial_calc - calculate a polynomial using integer arithmetic * * @poly: pointer to the descriptor of the polynomial * @data: input value of the polynimal * * Calculate the result of a polynomial using only integer arithmetic. For * this to work without too much loss of precision the coefficients has to * be altered. This is called factor redistribution. * * Returns the result of the polynomial calculation. */ long polynomial_calc(const struct polynomial *poly, long data) { const struct polynomial_term *term = poly->terms; long total_divider = poly->total_divider ?: 1; long tmp, ret = 0; int deg; /* * Here is the polynomial calculation function, which performs the * redistributed terms calculations. It's pretty straightforward. * We walk over each degree term up to the free one, and perform * the redistributed multiplication of the term coefficient, its * divider (as for the rationale fraction representation), data * power and the rational fraction divider leftover. Then all of * this is collected in a total sum variable, which value is * normalized by the total divider before being returned. */ do { tmp = term->coef; for (deg = 0; deg < term->deg; ++deg) tmp = mult_frac(tmp, data, term->divider); ret += tmp / term->divider_leftover; } while ((term++)->deg); return ret / total_divider; } EXPORT_SYMBOL_GPL(polynomial_calc); MODULE_DESCRIPTION("Generic polynomial calculations"); MODULE_LICENSE("GPL");
linux-master
lib/polynomial.c
/* * Generic binary BCH encoding/decoding library * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Copyright © 2011 Parrot S.A. * * Author: Ivan Djelic <[email protected]> * * Description: * * This library provides runtime configurable encoding/decoding of binary * Bose-Chaudhuri-Hocquenghem (BCH) codes. * * Call bch_init to get a pointer to a newly allocated bch_control structure for * the given m (Galois field order), t (error correction capability) and * (optional) primitive polynomial parameters. * * Call bch_encode to compute and store ecc parity bytes to a given buffer. * Call bch_decode to detect and locate errors in received data. * * On systems supporting hw BCH features, intermediate results may be provided * to bch_decode in order to skip certain steps. See bch_decode() documentation * for details. * * Option CONFIG_BCH_CONST_PARAMS can be used to force fixed values of * parameters m and t; thus allowing extra compiler optimizations and providing * better (up to 2x) encoding performance. Using this option makes sense when * (m,t) are fixed and known in advance, e.g. when using BCH error correction * on a particular NAND flash device. * * Algorithmic details: * * Encoding is performed by processing 32 input bits in parallel, using 4 * remainder lookup tables. * * The final stage of decoding involves the following internal steps: * a. Syndrome computation * b. Error locator polynomial computation using Berlekamp-Massey algorithm * c. Error locator root finding (by far the most expensive step) * * In this implementation, step c is not performed using the usual Chien search. * Instead, an alternative approach described in [1] is used. It consists in * factoring the error locator polynomial using the Berlekamp Trace algorithm * (BTA) down to a certain degree (4), after which ad hoc low-degree polynomial * solving techniques [2] are used. The resulting algorithm, called BTZ, yields * much better performance than Chien search for usual (m,t) values (typically * m >= 13, t < 32, see [1]). * * [1] B. Biswas, V. Herbert. Efficient root finding of polynomials over fields * of characteristic 2, in: Western European Workshop on Research in Cryptology * - WEWoRC 2009, Graz, Austria, LNCS, Springer, July 2009, to appear. * [2] [Zin96] V.A. Zinoviev. On the solution of equations of degree 10 over * finite fields GF(2^q). In Rapport de recherche INRIA no 2829, 1996. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/bitrev.h> #include <asm/byteorder.h> #include <linux/bch.h> #if defined(CONFIG_BCH_CONST_PARAMS) #define GF_M(_p) (CONFIG_BCH_CONST_M) #define GF_T(_p) (CONFIG_BCH_CONST_T) #define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1) #define BCH_MAX_M (CONFIG_BCH_CONST_M) #define BCH_MAX_T (CONFIG_BCH_CONST_T) #else #define GF_M(_p) ((_p)->m) #define GF_T(_p) ((_p)->t) #define GF_N(_p) ((_p)->n) #define BCH_MAX_M 15 /* 2KB */ #define BCH_MAX_T 64 /* 64 bit correction */ #endif #define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32) #define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8) #define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32) #ifndef dbg #define dbg(_fmt, args...) do {} while (0) #endif /* * represent a polynomial over GF(2^m) */ struct gf_poly { unsigned int deg; /* polynomial degree */ unsigned int c[]; /* polynomial terms */ }; /* given its degree, compute a polynomial size in bytes */ #define GF_POLY_SZ(_d) (sizeof(struct gf_poly)+((_d)+1)*sizeof(unsigned int)) /* polynomial of degree 1 */ struct gf_poly_deg1 { struct gf_poly poly; unsigned int c[2]; }; static u8 swap_bits(struct bch_control *bch, u8 in) { if (!bch->swap_bits) return in; return bitrev8(in); } /* * same as bch_encode(), but process input data one byte at a time */ static void bch_encode_unaligned(struct bch_control *bch, const unsigned char *data, unsigned int len, uint32_t *ecc) { int i; const uint32_t *p; const int l = BCH_ECC_WORDS(bch)-1; while (len--) { u8 tmp = swap_bits(bch, *data++); p = bch->mod8_tab + (l+1)*(((ecc[0] >> 24)^(tmp)) & 0xff); for (i = 0; i < l; i++) ecc[i] = ((ecc[i] << 8)|(ecc[i+1] >> 24))^(*p++); ecc[l] = (ecc[l] << 8)^(*p); } } /* * convert ecc bytes to aligned, zero-padded 32-bit ecc words */ static void load_ecc8(struct bch_control *bch, uint32_t *dst, const uint8_t *src) { uint8_t pad[4] = {0, 0, 0, 0}; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++, src += 4) dst[i] = ((u32)swap_bits(bch, src[0]) << 24) | ((u32)swap_bits(bch, src[1]) << 16) | ((u32)swap_bits(bch, src[2]) << 8) | swap_bits(bch, src[3]); memcpy(pad, src, BCH_ECC_BYTES(bch)-4*nwords); dst[nwords] = ((u32)swap_bits(bch, pad[0]) << 24) | ((u32)swap_bits(bch, pad[1]) << 16) | ((u32)swap_bits(bch, pad[2]) << 8) | swap_bits(bch, pad[3]); } /* * convert 32-bit ecc words to ecc bytes */ static void store_ecc8(struct bch_control *bch, uint8_t *dst, const uint32_t *src) { uint8_t pad[4]; unsigned int i, nwords = BCH_ECC_WORDS(bch)-1; for (i = 0; i < nwords; i++) { *dst++ = swap_bits(bch, src[i] >> 24); *dst++ = swap_bits(bch, src[i] >> 16); *dst++ = swap_bits(bch, src[i] >> 8); *dst++ = swap_bits(bch, src[i]); } pad[0] = swap_bits(bch, src[nwords] >> 24); pad[1] = swap_bits(bch, src[nwords] >> 16); pad[2] = swap_bits(bch, src[nwords] >> 8); pad[3] = swap_bits(bch, src[nwords]); memcpy(dst, pad, BCH_ECC_BYTES(bch)-4*nwords); } /** * bch_encode - calculate BCH ecc parity of data * @bch: BCH control structure * @data: data to encode * @len: data length in bytes * @ecc: ecc parity data, must be initialized by caller * * The @ecc parity array is used both as input and output parameter, in order to * allow incremental computations. It should be of the size indicated by member * @ecc_bytes of @bch, and should be initialized to 0 before the first call. * * The exact number of computed ecc parity bits is given by member @ecc_bits of * @bch; it may be less than m*t for large values of t. */ void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc) { const unsigned int l = BCH_ECC_WORDS(bch)-1; unsigned int i, mlen; unsigned long m; uint32_t w, r[BCH_ECC_MAX_WORDS]; const size_t r_bytes = BCH_ECC_WORDS(bch) * sizeof(*r); const uint32_t * const tab0 = bch->mod8_tab; const uint32_t * const tab1 = tab0 + 256*(l+1); const uint32_t * const tab2 = tab1 + 256*(l+1); const uint32_t * const tab3 = tab2 + 256*(l+1); const uint32_t *pdata, *p0, *p1, *p2, *p3; if (WARN_ON(r_bytes > sizeof(r))) return; if (ecc) { /* load ecc parity bytes into internal 32-bit buffer */ load_ecc8(bch, bch->ecc_buf, ecc); } else { memset(bch->ecc_buf, 0, r_bytes); } /* process first unaligned data bytes */ m = ((unsigned long)data) & 3; if (m) { mlen = (len < (4-m)) ? len : 4-m; bch_encode_unaligned(bch, data, mlen, bch->ecc_buf); data += mlen; len -= mlen; } /* process 32-bit aligned data words */ pdata = (uint32_t *)data; mlen = len/4; data += 4*mlen; len -= 4*mlen; memcpy(r, bch->ecc_buf, r_bytes); /* * split each 32-bit word into 4 polynomials of weight 8 as follows: * * 31 ...24 23 ...16 15 ... 8 7 ... 0 * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt * tttttttt mod g = r0 (precomputed) * zzzzzzzz 00000000 mod g = r1 (precomputed) * yyyyyyyy 00000000 00000000 mod g = r2 (precomputed) * xxxxxxxx 00000000 00000000 00000000 mod g = r3 (precomputed) * xxxxxxxx yyyyyyyy zzzzzzzz tttttttt mod g = r0^r1^r2^r3 */ while (mlen--) { /* input data is read in big-endian format */ w = cpu_to_be32(*pdata++); if (bch->swap_bits) w = (u32)swap_bits(bch, w) | ((u32)swap_bits(bch, w >> 8) << 8) | ((u32)swap_bits(bch, w >> 16) << 16) | ((u32)swap_bits(bch, w >> 24) << 24); w ^= r[0]; p0 = tab0 + (l+1)*((w >> 0) & 0xff); p1 = tab1 + (l+1)*((w >> 8) & 0xff); p2 = tab2 + (l+1)*((w >> 16) & 0xff); p3 = tab3 + (l+1)*((w >> 24) & 0xff); for (i = 0; i < l; i++) r[i] = r[i+1]^p0[i]^p1[i]^p2[i]^p3[i]; r[l] = p0[l]^p1[l]^p2[l]^p3[l]; } memcpy(bch->ecc_buf, r, r_bytes); /* process last unaligned bytes */ if (len) bch_encode_unaligned(bch, data, len, bch->ecc_buf); /* store ecc parity bytes into original parity buffer */ if (ecc) store_ecc8(bch, ecc, bch->ecc_buf); } EXPORT_SYMBOL_GPL(bch_encode); static inline int modulo(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); while (v >= n) { v -= n; v = (v & n) + (v >> GF_M(bch)); } return v; } /* * shorter and faster modulo function, only works when v < 2N. */ static inline int mod_s(struct bch_control *bch, unsigned int v) { const unsigned int n = GF_N(bch); return (v < n) ? v : v-n; } static inline int deg(unsigned int poly) { /* polynomial degree is the most-significant bit index */ return fls(poly)-1; } static inline int parity(unsigned int x) { /* * public domain code snippet, lifted from * http://www-graphics.stanford.edu/~seander/bithacks.html */ x ^= x >> 1; x ^= x >> 2; x = (x & 0x11111111U) * 0x11111111U; return (x >> 28) & 1; } /* Galois field basic operations: multiply, divide, inverse, etc. */ static inline unsigned int gf_mul(struct bch_control *bch, unsigned int a, unsigned int b) { return (a && b) ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ bch->a_log_tab[b])] : 0; } static inline unsigned int gf_sqr(struct bch_control *bch, unsigned int a) { return a ? bch->a_pow_tab[mod_s(bch, 2*bch->a_log_tab[a])] : 0; } static inline unsigned int gf_div(struct bch_control *bch, unsigned int a, unsigned int b) { return a ? bch->a_pow_tab[mod_s(bch, bch->a_log_tab[a]+ GF_N(bch)-bch->a_log_tab[b])] : 0; } static inline unsigned int gf_inv(struct bch_control *bch, unsigned int a) { return bch->a_pow_tab[GF_N(bch)-bch->a_log_tab[a]]; } static inline unsigned int a_pow(struct bch_control *bch, int i) { return bch->a_pow_tab[modulo(bch, i)]; } static inline int a_log(struct bch_control *bch, unsigned int x) { return bch->a_log_tab[x]; } static inline int a_ilog(struct bch_control *bch, unsigned int x) { return mod_s(bch, GF_N(bch)-bch->a_log_tab[x]); } /* * compute 2t syndromes of ecc polynomial, i.e. ecc(a^j) for j=1..2t */ static void compute_syndromes(struct bch_control *bch, uint32_t *ecc, unsigned int *syn) { int i, j, s; unsigned int m; uint32_t poly; const int t = GF_T(bch); s = bch->ecc_bits; /* make sure extra bits in last ecc word are cleared */ m = ((unsigned int)s) & 31; if (m) ecc[s/32] &= ~((1u << (32-m))-1); memset(syn, 0, 2*t*sizeof(*syn)); /* compute v(a^j) for j=1 .. 2t-1 */ do { poly = *ecc++; s -= 32; while (poly) { i = deg(poly); for (j = 0; j < 2*t; j += 2) syn[j] ^= a_pow(bch, (j+1)*(i+s)); poly ^= (1 << i); } } while (s > 0); /* v(a^(2j)) = v(a^j)^2 */ for (j = 0; j < t; j++) syn[2*j+1] = gf_sqr(bch, syn[j]); } static void gf_poly_copy(struct gf_poly *dst, struct gf_poly *src) { memcpy(dst, src, GF_POLY_SZ(src->deg)); } static int compute_error_locator_polynomial(struct bch_control *bch, const unsigned int *syn) { const unsigned int t = GF_T(bch); const unsigned int n = GF_N(bch); unsigned int i, j, tmp, l, pd = 1, d = syn[0]; struct gf_poly *elp = bch->elp; struct gf_poly *pelp = bch->poly_2t[0]; struct gf_poly *elp_copy = bch->poly_2t[1]; int k, pp = -1; memset(pelp, 0, GF_POLY_SZ(2*t)); memset(elp, 0, GF_POLY_SZ(2*t)); pelp->deg = 0; pelp->c[0] = 1; elp->deg = 0; elp->c[0] = 1; /* use simplified binary Berlekamp-Massey algorithm */ for (i = 0; (i < t) && (elp->deg <= t); i++) { if (d) { k = 2*i-pp; gf_poly_copy(elp_copy, elp); /* e[i+1](X) = e[i](X)+di*dp^-1*X^2(i-p)*e[p](X) */ tmp = a_log(bch, d)+n-a_log(bch, pd); for (j = 0; j <= pelp->deg; j++) { if (pelp->c[j]) { l = a_log(bch, pelp->c[j]); elp->c[j+k] ^= a_pow(bch, tmp+l); } } /* compute l[i+1] = max(l[i]->c[l[p]+2*(i-p]) */ tmp = pelp->deg+k; if (tmp > elp->deg) { elp->deg = tmp; gf_poly_copy(pelp, elp_copy); pd = d; pp = 2*i; } } /* di+1 = S(2i+3)+elp[i+1].1*S(2i+2)+...+elp[i+1].lS(2i+3-l) */ if (i < t-1) { d = syn[2*i+2]; for (j = 1; j <= elp->deg; j++) d ^= gf_mul(bch, elp->c[j], syn[2*i+2-j]); } } dbg("elp=%s\n", gf_poly_str(elp)); return (elp->deg > t) ? -1 : (int)elp->deg; } /* * solve a m x m linear system in GF(2) with an expected number of solutions, * and return the number of found solutions */ static int solve_linear_system(struct bch_control *bch, unsigned int *rows, unsigned int *sol, int nsol) { const int m = GF_M(bch); unsigned int tmp, mask; int rem, c, r, p, k, param[BCH_MAX_M]; k = 0; mask = 1 << m; /* Gaussian elimination */ for (c = 0; c < m; c++) { rem = 0; p = c-k; /* find suitable row for elimination */ for (r = p; r < m; r++) { if (rows[r] & mask) { if (r != p) { tmp = rows[r]; rows[r] = rows[p]; rows[p] = tmp; } rem = r+1; break; } } if (rem) { /* perform elimination on remaining rows */ tmp = rows[p]; for (r = rem; r < m; r++) { if (rows[r] & mask) rows[r] ^= tmp; } } else { /* elimination not needed, store defective row index */ param[k++] = c; } mask >>= 1; } /* rewrite system, inserting fake parameter rows */ if (k > 0) { p = k; for (r = m-1; r >= 0; r--) { if ((r > m-1-k) && rows[r]) /* system has no solution */ return 0; rows[r] = (p && (r == param[p-1])) ? p--, 1u << (m-r) : rows[r-p]; } } if (nsol != (1 << k)) /* unexpected number of solutions */ return 0; for (p = 0; p < nsol; p++) { /* set parameters for p-th solution */ for (c = 0; c < k; c++) rows[param[c]] = (rows[param[c]] & ~1)|((p >> c) & 1); /* compute unique solution */ tmp = 0; for (r = m-1; r >= 0; r--) { mask = rows[r] & (tmp|1); tmp |= parity(mask) << (m-r); } sol[p] = tmp >> 1; } return nsol; } /* * this function builds and solves a linear system for finding roots of a degree * 4 affine monic polynomial X^4+aX^2+bX+c over GF(2^m). */ static int find_affine4_roots(struct bch_control *bch, unsigned int a, unsigned int b, unsigned int c, unsigned int *roots) { int i, j, k; const int m = GF_M(bch); unsigned int mask = 0xff, t, rows[16] = {0,}; j = a_log(bch, b); k = a_log(bch, a); rows[0] = c; /* build linear system to solve X^4+aX^2+bX+c = 0 */ for (i = 0; i < m; i++) { rows[i+1] = bch->a_pow_tab[4*i]^ (a ? bch->a_pow_tab[mod_s(bch, k)] : 0)^ (b ? bch->a_pow_tab[mod_s(bch, j)] : 0); j++; k += 2; } /* * transpose 16x16 matrix before passing it to linear solver * warning: this code assumes m < 16 */ for (j = 8; j != 0; j >>= 1, mask ^= (mask << j)) { for (k = 0; k < 16; k = (k+j+1) & ~j) { t = ((rows[k] >> j)^rows[k+j]) & mask; rows[k] ^= (t << j); rows[k+j] ^= t; } } return solve_linear_system(bch, rows, roots, 4); } /* * compute root r of a degree 1 polynomial over GF(2^m) (returned as log(1/r)) */ static int find_poly_deg1_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0; if (poly->c[0]) /* poly[X] = bX+c with c!=0, root=c/b */ roots[n++] = mod_s(bch, GF_N(bch)-bch->a_log_tab[poly->c[0]]+ bch->a_log_tab[poly->c[1]]); return n; } /* * compute roots of a degree 2 polynomial over GF(2^m) */ static int find_poly_deg2_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int n = 0, i, l0, l1, l2; unsigned int u, v, r; if (poly->c[0] && poly->c[1]) { l0 = bch->a_log_tab[poly->c[0]]; l1 = bch->a_log_tab[poly->c[1]]; l2 = bch->a_log_tab[poly->c[2]]; /* using z=a/bX, transform aX^2+bX+c into z^2+z+u (u=ac/b^2) */ u = a_pow(bch, l0+l2+2*(GF_N(bch)-l1)); /* * let u = sum(li.a^i) i=0..m-1; then compute r = sum(li.xi): * r^2+r = sum(li.(xi^2+xi)) = sum(li.(a^i+Tr(a^i).a^k)) = * u + sum(li.Tr(a^i).a^k) = u+a^k.Tr(sum(li.a^i)) = u+a^k.Tr(u) * i.e. r and r+1 are roots iff Tr(u)=0 */ r = 0; v = u; while (v) { i = deg(v); r ^= bch->xi_tab[i]; v ^= (1 << i); } /* verify root */ if ((gf_sqr(bch, r)^r) == u) { /* reverse z=a/bX transformation and compute log(1/r) */ roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r]+l2); roots[n++] = modulo(bch, 2*GF_N(bch)-l1- bch->a_log_tab[r^1]+l2); } } return n; } /* * compute roots of a degree 3 polynomial over GF(2^m) */ static int find_poly_deg3_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, n = 0; unsigned int a, b, c, a2, b2, c2, e3, tmp[4]; if (poly->c[0]) { /* transform polynomial into monic X^3 + a2X^2 + b2X + c2 */ e3 = poly->c[3]; c2 = gf_div(bch, poly->c[0], e3); b2 = gf_div(bch, poly->c[1], e3); a2 = gf_div(bch, poly->c[2], e3); /* (X+a2)(X^3+a2X^2+b2X+c2) = X^4+aX^2+bX+c (affine) */ c = gf_mul(bch, a2, c2); /* c = a2c2 */ b = gf_mul(bch, a2, b2)^c2; /* b = a2b2 + c2 */ a = gf_sqr(bch, a2)^b2; /* a = a2^2 + b2 */ /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a, b, c, tmp) == 4) { /* remove a2 from final list of roots */ for (i = 0; i < 4; i++) { if (tmp[i] != a2) roots[n++] = a_ilog(bch, tmp[i]); } } } return n; } /* * compute roots of a degree 4 polynomial over GF(2^m) */ static int find_poly_deg4_roots(struct bch_control *bch, struct gf_poly *poly, unsigned int *roots) { int i, l, n = 0; unsigned int a, b, c, d, e = 0, f, a2, b2, c2, e4; if (poly->c[0] == 0) return 0; /* transform polynomial into monic X^4 + aX^3 + bX^2 + cX + d */ e4 = poly->c[4]; d = gf_div(bch, poly->c[0], e4); c = gf_div(bch, poly->c[1], e4); b = gf_div(bch, poly->c[2], e4); a = gf_div(bch, poly->c[3], e4); /* use Y=1/X transformation to get an affine polynomial */ if (a) { /* first, eliminate cX by using z=X+e with ae^2+c=0 */ if (c) { /* compute e such that e^2 = c/a */ f = gf_div(bch, c, a); l = a_log(bch, f); l += (l & 1) ? GF_N(bch) : 0; e = a_pow(bch, l/2); /* * use transformation z=X+e: * z^4+e^4 + a(z^3+ez^2+e^2z+e^3) + b(z^2+e^2) +cz+ce+d * z^4 + az^3 + (ae+b)z^2 + (ae^2+c)z+e^4+be^2+ae^3+ce+d * z^4 + az^3 + (ae+b)z^2 + e^4+be^2+d * z^4 + az^3 + b'z^2 + d' */ d = a_pow(bch, 2*l)^gf_mul(bch, b, f)^d; b = gf_mul(bch, a, e)^b; } /* now, use Y=1/X to get Y^4 + b/dY^2 + a/dY + 1/d */ if (d == 0) /* assume all roots have multiplicity 1 */ return 0; c2 = gf_inv(bch, d); b2 = gf_div(bch, a, d); a2 = gf_div(bch, b, d); } else { /* polynomial is already affine */ c2 = d; b2 = c; a2 = b; } /* find the 4 roots of this affine polynomial */ if (find_affine4_roots(bch, a2, b2, c2, roots) == 4) { for (i = 0; i < 4; i++) { /* post-process roots (reverse transformations) */ f = a ? gf_inv(bch, roots[i]) : roots[i]; roots[i] = a_ilog(bch, f^e); } n = 4; } return n; } /* * build monic, log-based representation of a polynomial */ static void gf_poly_logrep(struct bch_control *bch, const struct gf_poly *a, int *rep) { int i, d = a->deg, l = GF_N(bch)-a_log(bch, a->c[a->deg]); /* represent 0 values with -1; warning, rep[d] is not set to 1 */ for (i = 0; i < d; i++) rep[i] = a->c[i] ? mod_s(bch, a_log(bch, a->c[i])+l) : -1; } /* * compute polynomial Euclidean division remainder in GF(2^m)[X] */ static void gf_poly_mod(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, int *rep) { int la, p, m; unsigned int i, j, *c = a->c; const unsigned int d = b->deg; if (a->deg < d) return; /* reuse or compute log representation of denominator */ if (!rep) { rep = bch->cache; gf_poly_logrep(bch, b, rep); } for (j = a->deg; j >= d; j--) { if (c[j]) { la = a_log(bch, c[j]); p = j-d; for (i = 0; i < d; i++, p++) { m = rep[i]; if (m >= 0) c[p] ^= bch->a_pow_tab[mod_s(bch, m+la)]; } } } a->deg = d-1; while (!c[a->deg] && a->deg) a->deg--; } /* * compute polynomial Euclidean division quotient in GF(2^m)[X] */ static void gf_poly_div(struct bch_control *bch, struct gf_poly *a, const struct gf_poly *b, struct gf_poly *q) { if (a->deg >= b->deg) { q->deg = a->deg-b->deg; /* compute a mod b (modifies a) */ gf_poly_mod(bch, a, b, NULL); /* quotient is stored in upper part of polynomial a */ memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); } else { q->deg = 0; q->c[0] = 0; } } /* * compute polynomial GCD (Greatest Common Divisor) in GF(2^m)[X] */ static struct gf_poly *gf_poly_gcd(struct bch_control *bch, struct gf_poly *a, struct gf_poly *b) { struct gf_poly *tmp; dbg("gcd(%s,%s)=", gf_poly_str(a), gf_poly_str(b)); if (a->deg < b->deg) { tmp = b; b = a; a = tmp; } while (b->deg > 0) { gf_poly_mod(bch, a, b, NULL); tmp = b; b = a; a = tmp; } dbg("%s\n", gf_poly_str(a)); return a; } /* * Given a polynomial f and an integer k, compute Tr(a^kX) mod f * This is used in Berlekamp Trace algorithm for splitting polynomials */ static void compute_trace_bk_mod(struct bch_control *bch, int k, const struct gf_poly *f, struct gf_poly *z, struct gf_poly *out) { const int m = GF_M(bch); int i, j; /* z contains z^2j mod f */ z->deg = 1; z->c[0] = 0; z->c[1] = bch->a_pow_tab[k]; out->deg = 0; memset(out, 0, GF_POLY_SZ(f->deg)); /* compute f log representation only once */ gf_poly_logrep(bch, f, bch->cache); for (i = 0; i < m; i++) { /* add a^(k*2^i)(z^(2^i) mod f) and compute (z^(2^i) mod f)^2 */ for (j = z->deg; j >= 0; j--) { out->c[j] ^= z->c[j]; z->c[2*j] = gf_sqr(bch, z->c[j]); z->c[2*j+1] = 0; } if (z->deg > out->deg) out->deg = z->deg; if (i < m-1) { z->deg *= 2; /* z^(2(i+1)) mod f = (z^(2^i) mod f)^2 mod f */ gf_poly_mod(bch, z, f, bch->cache); } } while (!out->c[out->deg] && out->deg) out->deg--; dbg("Tr(a^%d.X) mod f = %s\n", k, gf_poly_str(out)); } /* * factor a polynomial using Berlekamp Trace algorithm (BTA) */ static void factor_polynomial(struct bch_control *bch, int k, struct gf_poly *f, struct gf_poly **g, struct gf_poly **h) { struct gf_poly *f2 = bch->poly_2t[0]; struct gf_poly *q = bch->poly_2t[1]; struct gf_poly *tk = bch->poly_2t[2]; struct gf_poly *z = bch->poly_2t[3]; struct gf_poly *gcd; dbg("factoring %s...\n", gf_poly_str(f)); *g = f; *h = NULL; /* tk = Tr(a^k.X) mod f */ compute_trace_bk_mod(bch, k, f, z, tk); if (tk->deg > 0) { /* compute g = gcd(f, tk) (destructive operation) */ gf_poly_copy(f2, f); gcd = gf_poly_gcd(bch, f2, tk); if (gcd->deg < f->deg) { /* compute h=f/gcd(f,tk); this will modify f and q */ gf_poly_div(bch, f, gcd, q); /* store g and h in-place (clobbering f) */ *h = &((struct gf_poly_deg1 *)f)[gcd->deg].poly; gf_poly_copy(*g, gcd); gf_poly_copy(*h, q); } } } /* * find roots of a polynomial, using BTZ algorithm; see the beginning of this * file for details */ static int find_poly_roots(struct bch_control *bch, unsigned int k, struct gf_poly *poly, unsigned int *roots) { int cnt; struct gf_poly *f1, *f2; switch (poly->deg) { /* handle low degree polynomials with ad hoc techniques */ case 1: cnt = find_poly_deg1_roots(bch, poly, roots); break; case 2: cnt = find_poly_deg2_roots(bch, poly, roots); break; case 3: cnt = find_poly_deg3_roots(bch, poly, roots); break; case 4: cnt = find_poly_deg4_roots(bch, poly, roots); break; default: /* factor polynomial using Berlekamp Trace Algorithm (BTA) */ cnt = 0; if (poly->deg && (k <= GF_M(bch))) { factor_polynomial(bch, k, poly, &f1, &f2); if (f1) cnt += find_poly_roots(bch, k+1, f1, roots); if (f2) cnt += find_poly_roots(bch, k+1, f2, roots+cnt); } break; } return cnt; } #if defined(USE_CHIEN_SEARCH) /* * exhaustive root search (Chien) implementation - not used, included only for * reference/comparison tests */ static int chien_search(struct bch_control *bch, unsigned int len, struct gf_poly *p, unsigned int *roots) { int m; unsigned int i, j, syn, syn0, count = 0; const unsigned int k = 8*len+bch->ecc_bits; /* use a log-based representation of polynomial */ gf_poly_logrep(bch, p, bch->cache); bch->cache[p->deg] = 0; syn0 = gf_div(bch, p->c[0], p->c[p->deg]); for (i = GF_N(bch)-k+1; i <= GF_N(bch); i++) { /* compute elp(a^i) */ for (j = 1, syn = syn0; j <= p->deg; j++) { m = bch->cache[j]; if (m >= 0) syn ^= a_pow(bch, m+j*i); } if (syn == 0) { roots[count++] = GF_N(bch)-i; if (count == p->deg) break; } } return (count == p->deg) ? count : 0; } #define find_poly_roots(_p, _k, _elp, _loc) chien_search(_p, len, _elp, _loc) #endif /* USE_CHIEN_SEARCH */ /** * bch_decode - decode received codeword and find bit error locations * @bch: BCH control structure * @data: received data, ignored if @calc_ecc is provided * @len: data length in bytes, must always be provided * @recv_ecc: received ecc, if NULL then assume it was XORed in @calc_ecc * @calc_ecc: calculated ecc, if NULL then calc_ecc is computed from @data * @syn: hw computed syndrome data (if NULL, syndrome is calculated) * @errloc: output array of error locations * * Returns: * The number of errors found, or -EBADMSG if decoding failed, or -EINVAL if * invalid parameters were provided * * Depending on the available hw BCH support and the need to compute @calc_ecc * separately (using bch_encode()), this function should be called with one of * the following parameter configurations - * * by providing @data and @recv_ecc only: * bch_decode(@bch, @data, @len, @recv_ecc, NULL, NULL, @errloc) * * by providing @recv_ecc and @calc_ecc: * bch_decode(@bch, NULL, @len, @recv_ecc, @calc_ecc, NULL, @errloc) * * by providing ecc = recv_ecc XOR calc_ecc: * bch_decode(@bch, NULL, @len, NULL, ecc, NULL, @errloc) * * by providing syndrome results @syn: * bch_decode(@bch, NULL, @len, NULL, NULL, @syn, @errloc) * * Once bch_decode() has successfully returned with a positive value, error * locations returned in array @errloc should be interpreted as follows - * * if (errloc[n] >= 8*len), then n-th error is located in ecc (no need for * data correction) * * if (errloc[n] < 8*len), then n-th error is located in data and can be * corrected with statement data[errloc[n]/8] ^= 1 << (errloc[n] % 8); * * Note that this function does not perform any data correction by itself, it * merely indicates error locations. */ int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc) { const unsigned int ecc_words = BCH_ECC_WORDS(bch); unsigned int nbits; int i, err, nroots; uint32_t sum; /* sanity check: make sure data length can be handled */ if (8*len > (bch->n-bch->ecc_bits)) return -EINVAL; /* if caller does not provide syndromes, compute them */ if (!syn) { if (!calc_ecc) { /* compute received data ecc into an internal buffer */ if (!data || !recv_ecc) return -EINVAL; bch_encode(bch, data, len, NULL); } else { /* load provided calculated ecc */ load_ecc8(bch, bch->ecc_buf, calc_ecc); } /* load received ecc or assume it was XORed in calc_ecc */ if (recv_ecc) { load_ecc8(bch, bch->ecc_buf2, recv_ecc); /* XOR received and calculated ecc */ for (i = 0, sum = 0; i < (int)ecc_words; i++) { bch->ecc_buf[i] ^= bch->ecc_buf2[i]; sum |= bch->ecc_buf[i]; } if (!sum) /* no error found */ return 0; } compute_syndromes(bch, bch->ecc_buf, bch->syn); syn = bch->syn; } err = compute_error_locator_polynomial(bch, syn); if (err > 0) { nroots = find_poly_roots(bch, 1, bch->elp, errloc); if (err != nroots) err = -1; } if (err > 0) { /* post-process raw error locations for easier correction */ nbits = (len*8)+bch->ecc_bits; for (i = 0; i < err; i++) { if (errloc[i] >= nbits) { err = -1; break; } errloc[i] = nbits-1-errloc[i]; if (!bch->swap_bits) errloc[i] = (errloc[i] & ~7) | (7-(errloc[i] & 7)); } } return (err >= 0) ? err : -EBADMSG; } EXPORT_SYMBOL_GPL(bch_decode); /* * generate Galois field lookup tables */ static int build_gf_tables(struct bch_control *bch, unsigned int poly) { unsigned int i, x = 1; const unsigned int k = 1 << deg(poly); /* primitive polynomial must be of degree m */ if (k != (1u << GF_M(bch))) return -1; for (i = 0; i < GF_N(bch); i++) { bch->a_pow_tab[i] = x; bch->a_log_tab[x] = i; if (i && (x == 1)) /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */ return -1; x <<= 1; if (x & k) x ^= poly; } bch->a_pow_tab[GF_N(bch)] = 1; bch->a_log_tab[0] = 0; return 0; } /* * compute generator polynomial remainder tables for fast encoding */ static void build_mod8_tables(struct bch_control *bch, const uint32_t *g) { int i, j, b, d; uint32_t data, hi, lo, *tab; const int l = BCH_ECC_WORDS(bch); const int plen = DIV_ROUND_UP(bch->ecc_bits+1, 32); const int ecclen = DIV_ROUND_UP(bch->ecc_bits, 32); memset(bch->mod8_tab, 0, 4*256*l*sizeof(*bch->mod8_tab)); for (i = 0; i < 256; i++) { /* p(X)=i is a small polynomial of weight <= 8 */ for (b = 0; b < 4; b++) { /* we want to compute (p(X).X^(8*b+deg(g))) mod g(X) */ tab = bch->mod8_tab + (b*256+i)*l; data = i << (8*b); while (data) { d = deg(data); /* subtract X^d.g(X) from p(X).X^(8*b+deg(g)) */ data ^= g[0] >> (31-d); for (j = 0; j < ecclen; j++) { hi = (d < 31) ? g[j] << (d+1) : 0; lo = (j+1 < plen) ? g[j+1] >> (31-d) : 0; tab[j] ^= hi|lo; } } } } } /* * build a base for factoring degree 2 polynomials */ static int build_deg2_base(struct bch_control *bch) { const int m = GF_M(bch); int i, j, r; unsigned int sum, x, y, remaining, ak = 0, xi[BCH_MAX_M]; /* find k s.t. Tr(a^k) = 1 and 0 <= k < m */ for (i = 0; i < m; i++) { for (j = 0, sum = 0; j < m; j++) sum ^= a_pow(bch, i*(1 << j)); if (sum) { ak = bch->a_pow_tab[i]; break; } } /* find xi, i=0..m-1 such that xi^2+xi = a^i+Tr(a^i).a^k */ remaining = m; memset(xi, 0, sizeof(xi)); for (x = 0; (x <= GF_N(bch)) && remaining; x++) { y = gf_sqr(bch, x)^x; for (i = 0; i < 2; i++) { r = a_log(bch, y); if (y && (r < m) && !xi[r]) { bch->xi_tab[r] = x; xi[r] = 1; remaining--; dbg("x%d = %x\n", r, x); break; } y ^= ak; } } /* should not happen but check anyway */ return remaining ? -1 : 0; } static void *bch_alloc(size_t size, int *err) { void *ptr; ptr = kmalloc(size, GFP_KERNEL); if (ptr == NULL) *err = 1; return ptr; } /* * compute generator polynomial for given (m,t) parameters. */ static uint32_t *compute_generator_polynomial(struct bch_control *bch) { const unsigned int m = GF_M(bch); const unsigned int t = GF_T(bch); int n, err = 0; unsigned int i, j, nbits, r, word, *roots; struct gf_poly *g; uint32_t *genpoly; g = bch_alloc(GF_POLY_SZ(m*t), &err); roots = bch_alloc((bch->n+1)*sizeof(*roots), &err); genpoly = bch_alloc(DIV_ROUND_UP(m*t+1, 32)*sizeof(*genpoly), &err); if (err) { kfree(genpoly); genpoly = NULL; goto finish; } /* enumerate all roots of g(X) */ memset(roots , 0, (bch->n+1)*sizeof(*roots)); for (i = 0; i < t; i++) { for (j = 0, r = 2*i+1; j < m; j++) { roots[r] = 1; r = mod_s(bch, 2*r); } } /* build generator polynomial g(X) */ g->deg = 0; g->c[0] = 1; for (i = 0; i < GF_N(bch); i++) { if (roots[i]) { /* multiply g(X) by (X+root) */ r = bch->a_pow_tab[i]; g->c[g->deg+1] = 1; for (j = g->deg; j > 0; j--) g->c[j] = gf_mul(bch, g->c[j], r)^g->c[j-1]; g->c[0] = gf_mul(bch, g->c[0], r); g->deg++; } } /* store left-justified binary representation of g(X) */ n = g->deg+1; i = 0; while (n > 0) { nbits = (n > 32) ? 32 : n; for (j = 0, word = 0; j < nbits; j++) { if (g->c[n-1-j]) word |= 1u << (31-j); } genpoly[i++] = word; n -= nbits; } bch->ecc_bits = g->deg; finish: kfree(g); kfree(roots); return genpoly; } /** * bch_init - initialize a BCH encoder/decoder * @m: Galois field order, should be in the range 5-15 * @t: maximum error correction capability, in bits * @prim_poly: user-provided primitive polynomial (or 0 to use default) * @swap_bits: swap bits within data and syndrome bytes * * Returns: * a newly allocated BCH control structure if successful, NULL otherwise * * This initialization can take some time, as lookup tables are built for fast * encoding/decoding; make sure not to call this function from a time critical * path. Usually, bch_init() should be called on module/driver init and * bch_free() should be called to release memory on exit. * * You may provide your own primitive polynomial of degree @m in argument * @prim_poly, or let bch_init() use its default polynomial. * * Once bch_init() has successfully returned a pointer to a newly allocated * BCH control structure, ecc length in bytes is given by member @ecc_bytes of * the structure. */ struct bch_control *bch_init(int m, int t, unsigned int prim_poly, bool swap_bits) { int err = 0; unsigned int i, words; uint32_t *genpoly; struct bch_control *bch = NULL; const int min_m = 5; /* default primitive polynomials */ static const unsigned int prim_poly_tab[] = { 0x25, 0x43, 0x83, 0x11d, 0x211, 0x409, 0x805, 0x1053, 0x201b, 0x402b, 0x8003, }; #if defined(CONFIG_BCH_CONST_PARAMS) if ((m != (CONFIG_BCH_CONST_M)) || (t != (CONFIG_BCH_CONST_T))) { printk(KERN_ERR "bch encoder/decoder was configured to support " "parameters m=%d, t=%d only!\n", CONFIG_BCH_CONST_M, CONFIG_BCH_CONST_T); goto fail; } #endif if ((m < min_m) || (m > BCH_MAX_M)) /* * values of m greater than 15 are not currently supported; * supporting m > 15 would require changing table base type * (uint16_t) and a small patch in matrix transposition */ goto fail; if (t > BCH_MAX_T) /* * we can support larger than 64 bits if necessary, at the * cost of higher stack usage. */ goto fail; /* sanity checks */ if ((t < 1) || (m*t >= ((1 << m)-1))) /* invalid t value */ goto fail; /* select a primitive polynomial for generating GF(2^m) */ if (prim_poly == 0) prim_poly = prim_poly_tab[m-min_m]; bch = kzalloc(sizeof(*bch), GFP_KERNEL); if (bch == NULL) goto fail; bch->m = m; bch->t = t; bch->n = (1 << m)-1; words = DIV_ROUND_UP(m*t, 32); bch->ecc_bytes = DIV_ROUND_UP(m*t, 8); bch->a_pow_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_pow_tab), &err); bch->a_log_tab = bch_alloc((1+bch->n)*sizeof(*bch->a_log_tab), &err); bch->mod8_tab = bch_alloc(words*1024*sizeof(*bch->mod8_tab), &err); bch->ecc_buf = bch_alloc(words*sizeof(*bch->ecc_buf), &err); bch->ecc_buf2 = bch_alloc(words*sizeof(*bch->ecc_buf2), &err); bch->xi_tab = bch_alloc(m*sizeof(*bch->xi_tab), &err); bch->syn = bch_alloc(2*t*sizeof(*bch->syn), &err); bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); bch->elp = bch_alloc((t+1)*sizeof(struct gf_poly_deg1), &err); bch->swap_bits = swap_bits; for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) bch->poly_2t[i] = bch_alloc(GF_POLY_SZ(2*t), &err); if (err) goto fail; err = build_gf_tables(bch, prim_poly); if (err) goto fail; /* use generator polynomial for computing encoding tables */ genpoly = compute_generator_polynomial(bch); if (genpoly == NULL) goto fail; build_mod8_tables(bch, genpoly); kfree(genpoly); err = build_deg2_base(bch); if (err) goto fail; return bch; fail: bch_free(bch); return NULL; } EXPORT_SYMBOL_GPL(bch_init); /** * bch_free - free the BCH control structure * @bch: BCH control structure to release */ void bch_free(struct bch_control *bch) { unsigned int i; if (bch) { kfree(bch->a_pow_tab); kfree(bch->a_log_tab); kfree(bch->mod8_tab); kfree(bch->ecc_buf); kfree(bch->ecc_buf2); kfree(bch->xi_tab); kfree(bch->syn); kfree(bch->cache); kfree(bch->elp); for (i = 0; i < ARRAY_SIZE(bch->poly_2t); i++) kfree(bch->poly_2t[i]); kfree(bch); } } EXPORT_SYMBOL_GPL(bch_free); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ivan Djelic <[email protected]>"); MODULE_DESCRIPTION("Binary BCH encoder/decoder");
linux-master
lib/bch.c
// SPDX-License-Identifier: GPL-2.0 // error-inject.c: Function-level error injection table #include <linux/error-injection.h> #include <linux/debugfs.h> #include <linux/kallsyms.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/sections.h> /* Whitelist of symbols that can be overridden for error injection. */ static LIST_HEAD(error_injection_list); static DEFINE_MUTEX(ei_mutex); struct ei_entry { struct list_head list; unsigned long start_addr; unsigned long end_addr; int etype; void *priv; }; bool within_error_injection_list(unsigned long addr) { struct ei_entry *ent; bool ret = false; mutex_lock(&ei_mutex); list_for_each_entry(ent, &error_injection_list, list) { if (addr >= ent->start_addr && addr < ent->end_addr) { ret = true; break; } } mutex_unlock(&ei_mutex); return ret; } int get_injectable_error_type(unsigned long addr) { struct ei_entry *ent; int ei_type = -EINVAL; mutex_lock(&ei_mutex); list_for_each_entry(ent, &error_injection_list, list) { if (addr >= ent->start_addr && addr < ent->end_addr) { ei_type = ent->etype; break; } } mutex_unlock(&ei_mutex); return ei_type; } /* * Lookup and populate the error_injection_list. * * For safety reasons we only allow certain functions to be overridden with * bpf_error_injection, so we need to populate the list of the symbols that have * been marked as safe for overriding. */ static void populate_error_injection_list(struct error_injection_entry *start, struct error_injection_entry *end, void *priv) { struct error_injection_entry *iter; struct ei_entry *ent; unsigned long entry, offset = 0, size = 0; mutex_lock(&ei_mutex); for (iter = start; iter < end; iter++) { entry = (unsigned long)dereference_symbol_descriptor((void *)iter->addr); if (!kernel_text_address(entry) || !kallsyms_lookup_size_offset(entry, &size, &offset)) { pr_err("Failed to find error inject entry at %p\n", (void *)entry); continue; } ent = kmalloc(sizeof(*ent), GFP_KERNEL); if (!ent) break; ent->start_addr = entry; ent->end_addr = entry + size; ent->etype = iter->etype; ent->priv = priv; INIT_LIST_HEAD(&ent->list); list_add_tail(&ent->list, &error_injection_list); } mutex_unlock(&ei_mutex); } /* Markers of the _error_inject_whitelist section */ extern struct error_injection_entry __start_error_injection_whitelist[]; extern struct error_injection_entry __stop_error_injection_whitelist[]; static void __init populate_kernel_ei_list(void) { populate_error_injection_list(__start_error_injection_whitelist, __stop_error_injection_whitelist, NULL); } #ifdef CONFIG_MODULES static void module_load_ei_list(struct module *mod) { if (!mod->num_ei_funcs) return; populate_error_injection_list(mod->ei_funcs, mod->ei_funcs + mod->num_ei_funcs, mod); } static void module_unload_ei_list(struct module *mod) { struct ei_entry *ent, *n; if (!mod->num_ei_funcs) return; mutex_lock(&ei_mutex); list_for_each_entry_safe(ent, n, &error_injection_list, list) { if (ent->priv == mod) { list_del_init(&ent->list); kfree(ent); } } mutex_unlock(&ei_mutex); } /* Module notifier call back, checking error injection table on the module */ static int ei_module_callback(struct notifier_block *nb, unsigned long val, void *data) { struct module *mod = data; if (val == MODULE_STATE_COMING) module_load_ei_list(mod); else if (val == MODULE_STATE_GOING) module_unload_ei_list(mod); return NOTIFY_DONE; } static struct notifier_block ei_module_nb = { .notifier_call = ei_module_callback, .priority = 0 }; static __init int module_ei_init(void) { return register_module_notifier(&ei_module_nb); } #else /* !CONFIG_MODULES */ #define module_ei_init() (0) #endif /* * error_injection/whitelist -- shows which functions can be overridden for * error injection. */ static void *ei_seq_start(struct seq_file *m, loff_t *pos) { mutex_lock(&ei_mutex); return seq_list_start(&error_injection_list, *pos); } static void ei_seq_stop(struct seq_file *m, void *v) { mutex_unlock(&ei_mutex); } static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos) { return seq_list_next(v, &error_injection_list, pos); } static const char *error_type_string(int etype) { switch (etype) { case EI_ETYPE_NULL: return "NULL"; case EI_ETYPE_ERRNO: return "ERRNO"; case EI_ETYPE_ERRNO_NULL: return "ERRNO_NULL"; case EI_ETYPE_TRUE: return "TRUE"; default: return "(unknown)"; } } static int ei_seq_show(struct seq_file *m, void *v) { struct ei_entry *ent = list_entry(v, struct ei_entry, list); seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, error_type_string(ent->etype)); return 0; } static const struct seq_operations ei_sops = { .start = ei_seq_start, .next = ei_seq_next, .stop = ei_seq_stop, .show = ei_seq_show, }; DEFINE_SEQ_ATTRIBUTE(ei); static int __init ei_debugfs_init(void) { struct dentry *dir, *file; dir = debugfs_create_dir("error_injection", NULL); file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops); if (!file) { debugfs_remove(dir); return -ENOMEM; } return 0; } static int __init init_error_injection(void) { populate_kernel_ei_list(); if (!module_ei_init()) ei_debugfs_init(); return 0; } late_initcall(init_error_injection);
linux-master
lib/error-inject.c
// SPDX-License-Identifier: GPL-2.0 /* * Test module to generate lockups */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/sched/clock.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/file.h> static unsigned int time_secs; module_param(time_secs, uint, 0600); MODULE_PARM_DESC(time_secs, "lockup time in seconds, default 0"); static unsigned int time_nsecs; module_param(time_nsecs, uint, 0600); MODULE_PARM_DESC(time_nsecs, "nanoseconds part of lockup time, default 0"); static unsigned int cooldown_secs; module_param(cooldown_secs, uint, 0600); MODULE_PARM_DESC(cooldown_secs, "cooldown time between iterations in seconds, default 0"); static unsigned int cooldown_nsecs; module_param(cooldown_nsecs, uint, 0600); MODULE_PARM_DESC(cooldown_nsecs, "nanoseconds part of cooldown, default 0"); static unsigned int iterations = 1; module_param(iterations, uint, 0600); MODULE_PARM_DESC(iterations, "lockup iterations, default 1"); static bool all_cpus; module_param(all_cpus, bool, 0400); MODULE_PARM_DESC(all_cpus, "trigger lockup at all cpus at once"); static int wait_state; static char *state = "R"; module_param(state, charp, 0400); MODULE_PARM_DESC(state, "wait in 'R' running (default), 'D' uninterruptible, 'K' killable, 'S' interruptible state"); static bool use_hrtimer; module_param(use_hrtimer, bool, 0400); MODULE_PARM_DESC(use_hrtimer, "use high-resolution timer for sleeping"); static bool iowait; module_param(iowait, bool, 0400); MODULE_PARM_DESC(iowait, "account sleep time as iowait"); static bool lock_read; module_param(lock_read, bool, 0400); MODULE_PARM_DESC(lock_read, "lock read-write locks for read"); static bool lock_single; module_param(lock_single, bool, 0400); MODULE_PARM_DESC(lock_single, "acquire locks only at one cpu"); static bool reacquire_locks; module_param(reacquire_locks, bool, 0400); MODULE_PARM_DESC(reacquire_locks, "release and reacquire locks/irq/preempt between iterations"); static bool touch_softlockup; module_param(touch_softlockup, bool, 0600); MODULE_PARM_DESC(touch_softlockup, "touch soft-lockup watchdog between iterations"); static bool touch_hardlockup; module_param(touch_hardlockup, bool, 0600); MODULE_PARM_DESC(touch_hardlockup, "touch hard-lockup watchdog between iterations"); static bool call_cond_resched; module_param(call_cond_resched, bool, 0600); MODULE_PARM_DESC(call_cond_resched, "call cond_resched() between iterations"); static bool measure_lock_wait; module_param(measure_lock_wait, bool, 0400); MODULE_PARM_DESC(measure_lock_wait, "measure lock wait time"); static unsigned long lock_wait_threshold = ULONG_MAX; module_param(lock_wait_threshold, ulong, 0400); MODULE_PARM_DESC(lock_wait_threshold, "print lock wait time longer than this in nanoseconds, default off"); static bool test_disable_irq; module_param_named(disable_irq, test_disable_irq, bool, 0400); MODULE_PARM_DESC(disable_irq, "disable interrupts: generate hard-lockups"); static bool disable_softirq; module_param(disable_softirq, bool, 0400); MODULE_PARM_DESC(disable_softirq, "disable bottom-half irq handlers"); static bool disable_preempt; module_param(disable_preempt, bool, 0400); MODULE_PARM_DESC(disable_preempt, "disable preemption: generate soft-lockups"); static bool lock_rcu; module_param(lock_rcu, bool, 0400); MODULE_PARM_DESC(lock_rcu, "grab rcu_read_lock: generate rcu stalls"); static bool lock_mmap_sem; module_param(lock_mmap_sem, bool, 0400); MODULE_PARM_DESC(lock_mmap_sem, "lock mm->mmap_lock: block procfs interfaces"); static unsigned long lock_rwsem_ptr; module_param_unsafe(lock_rwsem_ptr, ulong, 0400); MODULE_PARM_DESC(lock_rwsem_ptr, "lock rw_semaphore at address"); static unsigned long lock_mutex_ptr; module_param_unsafe(lock_mutex_ptr, ulong, 0400); MODULE_PARM_DESC(lock_mutex_ptr, "lock mutex at address"); static unsigned long lock_spinlock_ptr; module_param_unsafe(lock_spinlock_ptr, ulong, 0400); MODULE_PARM_DESC(lock_spinlock_ptr, "lock spinlock at address"); static unsigned long lock_rwlock_ptr; module_param_unsafe(lock_rwlock_ptr, ulong, 0400); MODULE_PARM_DESC(lock_rwlock_ptr, "lock rwlock at address"); static unsigned int alloc_pages_nr; module_param_unsafe(alloc_pages_nr, uint, 0600); MODULE_PARM_DESC(alloc_pages_nr, "allocate and free pages under locks"); static unsigned int alloc_pages_order; module_param(alloc_pages_order, uint, 0400); MODULE_PARM_DESC(alloc_pages_order, "page order to allocate"); static gfp_t alloc_pages_gfp = GFP_KERNEL; module_param_unsafe(alloc_pages_gfp, uint, 0400); MODULE_PARM_DESC(alloc_pages_gfp, "allocate pages with this gfp_mask, default GFP_KERNEL"); static bool alloc_pages_atomic; module_param(alloc_pages_atomic, bool, 0400); MODULE_PARM_DESC(alloc_pages_atomic, "allocate pages with GFP_ATOMIC"); static bool reallocate_pages; module_param(reallocate_pages, bool, 0400); MODULE_PARM_DESC(reallocate_pages, "free and allocate pages between iterations"); struct file *test_file; static struct inode *test_inode; static char test_file_path[256]; module_param_string(file_path, test_file_path, sizeof(test_file_path), 0400); MODULE_PARM_DESC(file_path, "file path to test"); static bool test_lock_inode; module_param_named(lock_inode, test_lock_inode, bool, 0400); MODULE_PARM_DESC(lock_inode, "lock file -> inode -> i_rwsem"); static bool test_lock_mapping; module_param_named(lock_mapping, test_lock_mapping, bool, 0400); MODULE_PARM_DESC(lock_mapping, "lock file -> mapping -> i_mmap_rwsem"); static bool test_lock_sb_umount; module_param_named(lock_sb_umount, test_lock_sb_umount, bool, 0400); MODULE_PARM_DESC(lock_sb_umount, "lock file -> sb -> s_umount"); static atomic_t alloc_pages_failed = ATOMIC_INIT(0); static atomic64_t max_lock_wait = ATOMIC64_INIT(0); static struct task_struct *main_task; static int master_cpu; static void test_lock(bool master, bool verbose) { u64 wait_start; if (measure_lock_wait) wait_start = local_clock(); if (lock_mutex_ptr && master) { if (verbose) pr_notice("lock mutex %ps\n", (void *)lock_mutex_ptr); mutex_lock((struct mutex *)lock_mutex_ptr); } if (lock_rwsem_ptr && master) { if (verbose) pr_notice("lock rw_semaphore %ps\n", (void *)lock_rwsem_ptr); if (lock_read) down_read((struct rw_semaphore *)lock_rwsem_ptr); else down_write((struct rw_semaphore *)lock_rwsem_ptr); } if (lock_mmap_sem && master) { if (verbose) pr_notice("lock mmap_lock pid=%d\n", main_task->pid); if (lock_read) mmap_read_lock(main_task->mm); else mmap_write_lock(main_task->mm); } if (test_disable_irq) local_irq_disable(); if (disable_softirq) local_bh_disable(); if (disable_preempt) preempt_disable(); if (lock_rcu) rcu_read_lock(); if (lock_spinlock_ptr && master) { if (verbose) pr_notice("lock spinlock %ps\n", (void *)lock_spinlock_ptr); spin_lock((spinlock_t *)lock_spinlock_ptr); } if (lock_rwlock_ptr && master) { if (verbose) pr_notice("lock rwlock %ps\n", (void *)lock_rwlock_ptr); if (lock_read) read_lock((rwlock_t *)lock_rwlock_ptr); else write_lock((rwlock_t *)lock_rwlock_ptr); } if (measure_lock_wait) { s64 cur_wait = local_clock() - wait_start; s64 max_wait = atomic64_read(&max_lock_wait); do { if (cur_wait < max_wait) break; max_wait = atomic64_cmpxchg(&max_lock_wait, max_wait, cur_wait); } while (max_wait != cur_wait); if (cur_wait > lock_wait_threshold) pr_notice_ratelimited("lock wait %lld ns\n", cur_wait); } } static void test_unlock(bool master, bool verbose) { if (lock_rwlock_ptr && master) { if (lock_read) read_unlock((rwlock_t *)lock_rwlock_ptr); else write_unlock((rwlock_t *)lock_rwlock_ptr); if (verbose) pr_notice("unlock rwlock %ps\n", (void *)lock_rwlock_ptr); } if (lock_spinlock_ptr && master) { spin_unlock((spinlock_t *)lock_spinlock_ptr); if (verbose) pr_notice("unlock spinlock %ps\n", (void *)lock_spinlock_ptr); } if (lock_rcu) rcu_read_unlock(); if (disable_preempt) preempt_enable(); if (disable_softirq) local_bh_enable(); if (test_disable_irq) local_irq_enable(); if (lock_mmap_sem && master) { if (lock_read) mmap_read_unlock(main_task->mm); else mmap_write_unlock(main_task->mm); if (verbose) pr_notice("unlock mmap_lock pid=%d\n", main_task->pid); } if (lock_rwsem_ptr && master) { if (lock_read) up_read((struct rw_semaphore *)lock_rwsem_ptr); else up_write((struct rw_semaphore *)lock_rwsem_ptr); if (verbose) pr_notice("unlock rw_semaphore %ps\n", (void *)lock_rwsem_ptr); } if (lock_mutex_ptr && master) { mutex_unlock((struct mutex *)lock_mutex_ptr); if (verbose) pr_notice("unlock mutex %ps\n", (void *)lock_mutex_ptr); } } static void test_alloc_pages(struct list_head *pages) { struct page *page; unsigned int i; for (i = 0; i < alloc_pages_nr; i++) { page = alloc_pages(alloc_pages_gfp, alloc_pages_order); if (!page) { atomic_inc(&alloc_pages_failed); break; } list_add(&page->lru, pages); } } static void test_free_pages(struct list_head *pages) { struct page *page, *next; list_for_each_entry_safe(page, next, pages, lru) __free_pages(page, alloc_pages_order); INIT_LIST_HEAD(pages); } static void test_wait(unsigned int secs, unsigned int nsecs) { if (wait_state == TASK_RUNNING) { if (secs) mdelay(secs * MSEC_PER_SEC); if (nsecs) ndelay(nsecs); return; } __set_current_state(wait_state); if (use_hrtimer) { ktime_t time; time = ns_to_ktime((u64)secs * NSEC_PER_SEC + nsecs); schedule_hrtimeout(&time, HRTIMER_MODE_REL); } else { schedule_timeout(secs * HZ + nsecs_to_jiffies(nsecs)); } } static void test_lockup(bool master) { u64 lockup_start = local_clock(); unsigned int iter = 0; LIST_HEAD(pages); pr_notice("Start on CPU%d\n", raw_smp_processor_id()); test_lock(master, true); test_alloc_pages(&pages); while (iter++ < iterations && !signal_pending(main_task)) { if (iowait) current->in_iowait = 1; test_wait(time_secs, time_nsecs); if (iowait) current->in_iowait = 0; if (reallocate_pages) test_free_pages(&pages); if (reacquire_locks) test_unlock(master, false); if (touch_softlockup) touch_softlockup_watchdog(); if (touch_hardlockup) touch_nmi_watchdog(); if (call_cond_resched) cond_resched(); test_wait(cooldown_secs, cooldown_nsecs); if (reacquire_locks) test_lock(master, false); if (reallocate_pages) test_alloc_pages(&pages); } pr_notice("Finish on CPU%d in %lld ns\n", raw_smp_processor_id(), local_clock() - lockup_start); test_free_pages(&pages); test_unlock(master, true); } static DEFINE_PER_CPU(struct work_struct, test_works); static void test_work_fn(struct work_struct *work) { test_lockup(!lock_single || work == per_cpu_ptr(&test_works, master_cpu)); } static bool test_kernel_ptr(unsigned long addr, int size) { void *ptr = (void *)addr; char buf; if (!addr) return false; /* should be at least readable kernel address */ if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) && (access_ok((void __user *)ptr, 1) || access_ok((void __user *)ptr + size - 1, 1))) { pr_err("user space ptr invalid in kernel: %#lx\n", addr); return true; } if (get_kernel_nofault(buf, ptr) || get_kernel_nofault(buf, ptr + size - 1)) { pr_err("invalid kernel ptr: %#lx\n", addr); return true; } return false; } static bool __maybe_unused test_magic(unsigned long addr, int offset, unsigned int expected) { void *ptr = (void *)addr + offset; unsigned int magic = 0; if (!addr) return false; if (get_kernel_nofault(magic, ptr) || magic != expected) { pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n", addr, offset, magic, expected); return true; } return false; } static int __init test_lockup_init(void) { u64 test_start = local_clock(); main_task = current; switch (state[0]) { case 'S': wait_state = TASK_INTERRUPTIBLE; break; case 'D': wait_state = TASK_UNINTERRUPTIBLE; break; case 'K': wait_state = TASK_KILLABLE; break; case 'R': wait_state = TASK_RUNNING; break; default: pr_err("unknown state=%s\n", state); return -EINVAL; } if (alloc_pages_atomic) alloc_pages_gfp = GFP_ATOMIC; if (test_kernel_ptr(lock_spinlock_ptr, sizeof(spinlock_t)) || test_kernel_ptr(lock_rwlock_ptr, sizeof(rwlock_t)) || test_kernel_ptr(lock_mutex_ptr, sizeof(struct mutex)) || test_kernel_ptr(lock_rwsem_ptr, sizeof(struct rw_semaphore))) return -EINVAL; #ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_PREEMPT_RT if (test_magic(lock_spinlock_ptr, offsetof(spinlock_t, lock.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwlock_ptr, offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_mutex_ptr, offsetof(struct mutex, rtmutex.wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwsem_ptr, offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic), SPINLOCK_MAGIC)) return -EINVAL; #else if (test_magic(lock_spinlock_ptr, offsetof(spinlock_t, rlock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwlock_ptr, offsetof(rwlock_t, magic), RWLOCK_MAGIC) || test_magic(lock_mutex_ptr, offsetof(struct mutex, wait_lock.magic), SPINLOCK_MAGIC) || test_magic(lock_rwsem_ptr, offsetof(struct rw_semaphore, wait_lock.magic), SPINLOCK_MAGIC)) return -EINVAL; #endif #endif if ((wait_state != TASK_RUNNING || (call_cond_resched && !reacquire_locks) || (alloc_pages_nr && gfpflags_allow_blocking(alloc_pages_gfp))) && (test_disable_irq || disable_softirq || disable_preempt || lock_rcu || lock_spinlock_ptr || lock_rwlock_ptr)) { pr_err("refuse to sleep in atomic context\n"); return -EINVAL; } if (lock_mmap_sem && !main_task->mm) { pr_err("no mm to lock mmap_lock\n"); return -EINVAL; } if (test_file_path[0]) { test_file = filp_open(test_file_path, O_RDONLY, 0); if (IS_ERR(test_file)) { pr_err("failed to open %s: %ld\n", test_file_path, PTR_ERR(test_file)); return PTR_ERR(test_file); } test_inode = file_inode(test_file); } else if (test_lock_inode || test_lock_mapping || test_lock_sb_umount) { pr_err("no file to lock\n"); return -EINVAL; } if (test_lock_inode && test_inode) lock_rwsem_ptr = (unsigned long)&test_inode->i_rwsem; if (test_lock_mapping && test_file && test_file->f_mapping) lock_rwsem_ptr = (unsigned long)&test_file->f_mapping->i_mmap_rwsem; if (test_lock_sb_umount && test_inode) lock_rwsem_ptr = (unsigned long)&test_inode->i_sb->s_umount; pr_notice("START pid=%d time=%u +%u ns cooldown=%u +%u ns iterations=%u state=%s %s%s%s%s%s%s%s%s%s%s%s\n", main_task->pid, time_secs, time_nsecs, cooldown_secs, cooldown_nsecs, iterations, state, all_cpus ? "all_cpus " : "", iowait ? "iowait " : "", test_disable_irq ? "disable_irq " : "", disable_softirq ? "disable_softirq " : "", disable_preempt ? "disable_preempt " : "", lock_rcu ? "lock_rcu " : "", lock_read ? "lock_read " : "", touch_softlockup ? "touch_softlockup " : "", touch_hardlockup ? "touch_hardlockup " : "", call_cond_resched ? "call_cond_resched " : "", reacquire_locks ? "reacquire_locks " : ""); if (alloc_pages_nr) pr_notice("ALLOCATE PAGES nr=%u order=%u gfp=%pGg %s\n", alloc_pages_nr, alloc_pages_order, &alloc_pages_gfp, reallocate_pages ? "reallocate_pages " : ""); if (all_cpus) { unsigned int cpu; cpus_read_lock(); preempt_disable(); master_cpu = smp_processor_id(); for_each_online_cpu(cpu) { INIT_WORK(per_cpu_ptr(&test_works, cpu), test_work_fn); queue_work_on(cpu, system_highpri_wq, per_cpu_ptr(&test_works, cpu)); } preempt_enable(); for_each_online_cpu(cpu) flush_work(per_cpu_ptr(&test_works, cpu)); cpus_read_unlock(); } else { test_lockup(true); } if (measure_lock_wait) pr_notice("Maximum lock wait: %lld ns\n", atomic64_read(&max_lock_wait)); if (alloc_pages_nr) pr_notice("Page allocation failed %u times\n", atomic_read(&alloc_pages_failed)); pr_notice("FINISH in %llu ns\n", local_clock() - test_start); if (test_file) fput(test_file); if (signal_pending(main_task)) return -EINTR; return -EAGAIN; } module_init(test_lockup_init); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Konstantin Khlebnikov <[email protected]>"); MODULE_DESCRIPTION("Test module to generate lockups");
linux-master
lib/test_lockup.c
// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1 /* * proc sysctl test driver * * Copyright (C) 2017 Luis R. Rodriguez <[email protected]> */ /* * This module provides an interface to the proc sysctl interfaces. This * driver requires CONFIG_PROC_SYSCTL. It will not normally be loaded by the * system unless explicitly requested by name. You can also build this driver * into your kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/async.h> #include <linux/delay.h> #include <linux/vmalloc.h> static int i_zero; static int i_one_hundred = 100; static int match_int_ok = 1; static struct { struct ctl_table_header *test_h_setup_node; struct ctl_table_header *test_h_mnt; struct ctl_table_header *test_h_mnterror; } sysctl_test_headers; struct test_sysctl_data { int int_0001; int int_0002; int int_0003[4]; int boot_int; unsigned int uint_0001; char string_0001[65]; #define SYSCTL_TEST_BITMAP_SIZE 65536 unsigned long *bitmap_0001; }; static struct test_sysctl_data test_data = { .int_0001 = 60, .int_0002 = 1, .int_0003[0] = 0, .int_0003[1] = 1, .int_0003[2] = 2, .int_0003[3] = 3, .boot_int = 0, .uint_0001 = 314, .string_0001 = "(none)", }; /* These are all under /proc/sys/debug/test_sysctl/ */ static struct ctl_table test_table[] = { { .procname = "int_0001", .data = &test_data.int_0001, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &i_zero, .extra2 = &i_one_hundred, }, { .procname = "int_0002", .data = &test_data.int_0002, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "int_0003", .data = &test_data.int_0003, .maxlen = sizeof(test_data.int_0003), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "match_int", .data = &match_int_ok, .maxlen = sizeof(match_int_ok), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "boot_int", .data = &test_data.boot_int, .maxlen = sizeof(test_data.boot_int), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { .procname = "uint_0001", .data = &test_data.uint_0001, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec, }, { .procname = "string_0001", .data = &test_data.string_0001, .maxlen = sizeof(test_data.string_0001), .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "bitmap_0001", .data = &test_data.bitmap_0001, .maxlen = SYSCTL_TEST_BITMAP_SIZE, .mode = 0644, .proc_handler = proc_do_large_bitmap, }, { } }; static void test_sysctl_calc_match_int_ok(void) { int i; struct { int defined; int wanted; } match_int[] = { {.defined = *(int *)SYSCTL_ZERO, .wanted = 0}, {.defined = *(int *)SYSCTL_ONE, .wanted = 1}, {.defined = *(int *)SYSCTL_TWO, .wanted = 2}, {.defined = *(int *)SYSCTL_THREE, .wanted = 3}, {.defined = *(int *)SYSCTL_FOUR, .wanted = 4}, {.defined = *(int *)SYSCTL_ONE_HUNDRED, .wanted = 100}, {.defined = *(int *)SYSCTL_TWO_HUNDRED, .wanted = 200}, {.defined = *(int *)SYSCTL_ONE_THOUSAND, .wanted = 1000}, {.defined = *(int *)SYSCTL_THREE_THOUSAND, .wanted = 3000}, {.defined = *(int *)SYSCTL_INT_MAX, .wanted = INT_MAX}, {.defined = *(int *)SYSCTL_MAXOLDUID, .wanted = 65535}, {.defined = *(int *)SYSCTL_NEG_ONE, .wanted = -1}, }; for (i = 0; i < ARRAY_SIZE(match_int); i++) if (match_int[i].defined != match_int[i].wanted) match_int_ok = 0; } static int test_sysctl_setup_node_tests(void) { test_sysctl_calc_match_int_ok(); test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL); if (!test_data.bitmap_0001) return -ENOMEM; sysctl_test_headers.test_h_setup_node = register_sysctl("debug/test_sysctl", test_table); if (!sysctl_test_headers.test_h_setup_node) { kfree(test_data.bitmap_0001); return -ENOMEM; } return 0; } /* Used to test that unregister actually removes the directory */ static struct ctl_table test_table_unregister[] = { { .procname = "unregister_error", .data = &test_data.int_0001, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, }, {} }; static int test_sysctl_run_unregister_nested(void) { struct ctl_table_header *unregister; unregister = register_sysctl("debug/test_sysctl/unregister_error", test_table_unregister); if (!unregister) return -ENOMEM; unregister_sysctl_table(unregister); return 0; } static int test_sysctl_run_register_mount_point(void) { sysctl_test_headers.test_h_mnt = register_sysctl_mount_point("debug/test_sysctl/mnt"); if (!sysctl_test_headers.test_h_mnt) return -ENOMEM; sysctl_test_headers.test_h_mnterror = register_sysctl("debug/test_sysctl/mnt/mnt_error", test_table_unregister); /* * Don't check the result.: * If it fails (expected behavior), return 0. * If successful (missbehavior of register mount point), we want to see * mnt_error when we run the sysctl test script */ return 0; } static int __init test_sysctl_init(void) { int err; err = test_sysctl_setup_node_tests(); if (err) goto out; err = test_sysctl_run_unregister_nested(); if (err) goto out; err = test_sysctl_run_register_mount_point(); out: return err; } module_init(test_sysctl_init); static void __exit test_sysctl_exit(void) { kfree(test_data.bitmap_0001); if (sysctl_test_headers.test_h_setup_node) unregister_sysctl_table(sysctl_test_headers.test_h_setup_node); if (sysctl_test_headers.test_h_mnt) unregister_sysctl_table(sysctl_test_headers.test_h_mnt); if (sysctl_test_headers.test_h_mnterror) unregister_sysctl_table(sysctl_test_headers.test_h_mnterror); } module_exit(test_sysctl_exit); MODULE_AUTHOR("Luis R. Rodriguez <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_sysctl.c
/* * lib/test_parman.c - Test module for parman * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Jiri Pirko <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/random.h> #include <linux/parman.h> #define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */ #define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT) #define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1) #define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number * of items for testing */ #define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT) #define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1) #define TEST_PARMAN_BASE_SHIFT 8 #define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT) #define TEST_PARMAN_RESIZE_STEP_SHIFT 7 #define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT) #define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT) #define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT) #define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1) #define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256) struct test_parman_prio { struct parman_prio parman_prio; unsigned long priority; }; struct test_parman_item { struct parman_item parman_item; struct test_parman_prio *prio; bool used; }; struct test_parman { struct parman *parman; struct test_parman_item **prio_array; unsigned long prio_array_limit; struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT]; struct test_parman_item items[TEST_PARMAN_ITEM_COUNT]; struct rnd_state rnd; unsigned long run_budget; unsigned long bulk_budget; bool bulk_noop; unsigned int used_items; }; #define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count)) static int test_parman_resize(void *priv, unsigned long new_count) { struct test_parman *test_parman = priv; struct test_parman_item **prio_array; unsigned long old_count; prio_array = krealloc(test_parman->prio_array, ITEM_PTRS_SIZE(new_count), GFP_KERNEL); if (new_count == 0) return 0; if (!prio_array) return -ENOMEM; old_count = test_parman->prio_array_limit; if (new_count > old_count) memset(&prio_array[old_count], 0, ITEM_PTRS_SIZE(new_count - old_count)); test_parman->prio_array = prio_array; test_parman->prio_array_limit = new_count; return 0; } static void test_parman_move(void *priv, unsigned long from_index, unsigned long to_index, unsigned long count) { struct test_parman *test_parman = priv; struct test_parman_item **prio_array = test_parman->prio_array; memmove(&prio_array[to_index], &prio_array[from_index], ITEM_PTRS_SIZE(count)); memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count)); } static const struct parman_ops test_parman_lsort_ops = { .base_count = TEST_PARMAN_BASE_COUNT, .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT, .resize = test_parman_resize, .move = test_parman_move, .algo = PARMAN_ALGO_TYPE_LSORT, }; static void test_parman_rnd_init(struct test_parman *test_parman) { prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL); } static u32 test_parman_rnd_get(struct test_parman *test_parman) { return prandom_u32_state(&test_parman->rnd); } static unsigned long test_parman_priority_gen(struct test_parman *test_parman) { unsigned long priority; int i; again: priority = test_parman_rnd_get(test_parman); if (priority == 0) goto again; for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { struct test_parman_prio *prio = &test_parman->prios[i]; if (prio->priority == 0) break; if (prio->priority == priority) goto again; } return priority; } static void test_parman_prios_init(struct test_parman *test_parman) { int i; for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { struct test_parman_prio *prio = &test_parman->prios[i]; /* Assign random uniqueue priority to each prio structure */ prio->priority = test_parman_priority_gen(test_parman); parman_prio_init(test_parman->parman, &prio->parman_prio, prio->priority); } } static void test_parman_prios_fini(struct test_parman *test_parman) { int i; for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) { struct test_parman_prio *prio = &test_parman->prios[i]; parman_prio_fini(&prio->parman_prio); } } static void test_parman_items_init(struct test_parman *test_parman) { int i; for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { struct test_parman_item *item = &test_parman->items[i]; unsigned int prio_index = test_parman_rnd_get(test_parman) & TEST_PARMAN_PRIO_MASK; /* Assign random prio to each item structure */ item->prio = &test_parman->prios[prio_index]; } } static void test_parman_items_fini(struct test_parman *test_parman) { int i; for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) { struct test_parman_item *item = &test_parman->items[i]; if (!item->used) continue; parman_item_remove(test_parman->parman, &item->prio->parman_prio, &item->parman_item); } } static struct test_parman *test_parman_create(const struct parman_ops *ops) { struct test_parman *test_parman; int err; test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL); if (!test_parman) return ERR_PTR(-ENOMEM); err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT); if (err) goto err_resize; test_parman->parman = parman_create(ops, test_parman); if (!test_parman->parman) { err = -ENOMEM; goto err_parman_create; } test_parman_rnd_init(test_parman); test_parman_prios_init(test_parman); test_parman_items_init(test_parman); test_parman->run_budget = TEST_PARMAN_RUN_BUDGET; return test_parman; err_parman_create: test_parman_resize(test_parman, 0); err_resize: kfree(test_parman); return ERR_PTR(err); } static void test_parman_destroy(struct test_parman *test_parman) { test_parman_items_fini(test_parman); test_parman_prios_fini(test_parman); parman_destroy(test_parman->parman); test_parman_resize(test_parman, 0); kfree(test_parman); } static bool test_parman_run_check_budgets(struct test_parman *test_parman) { if (test_parman->run_budget-- == 0) return false; if (test_parman->bulk_budget-- != 0) return true; test_parman->bulk_budget = test_parman_rnd_get(test_parman) & TEST_PARMAN_BULK_MAX_MASK; test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1; return true; } static int test_parman_run(struct test_parman *test_parman) { unsigned int i = test_parman_rnd_get(test_parman); int err; while (test_parman_run_check_budgets(test_parman)) { unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK; struct test_parman_item *item = &test_parman->items[item_index]; if (test_parman->bulk_noop) continue; if (!item->used) { err = parman_item_add(test_parman->parman, &item->prio->parman_prio, &item->parman_item); if (err) return err; test_parman->prio_array[item->parman_item.index] = item; test_parman->used_items++; } else { test_parman->prio_array[item->parman_item.index] = NULL; parman_item_remove(test_parman->parman, &item->prio->parman_prio, &item->parman_item); test_parman->used_items--; } item->used = !item->used; } return 0; } static int test_parman_check_array(struct test_parman *test_parman, bool gaps_allowed) { unsigned int last_unused_items = 0; unsigned long last_priority = 0; unsigned int used_items = 0; int i; if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) { pr_err("Array limit is lower than the base count (%lu < %lu)\n", test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT); return -EINVAL; } for (i = 0; i < test_parman->prio_array_limit; i++) { struct test_parman_item *item = test_parman->prio_array[i]; if (!item) { last_unused_items++; continue; } if (last_unused_items && !gaps_allowed) { pr_err("Gap found in array even though they are forbidden\n"); return -EINVAL; } last_unused_items = 0; used_items++; if (item->prio->priority < last_priority) { pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n", item->prio->priority, last_priority); return -EINVAL; } last_priority = item->prio->priority; if (item->parman_item.index != i) { pr_err("Item has different index in compare to where it actually is (%lu != %d)\n", item->parman_item.index, i); return -EINVAL; } } if (used_items != test_parman->used_items) { pr_err("Number of used items in array does not match (%u != %u)\n", used_items, test_parman->used_items); return -EINVAL; } if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) { pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n", last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT); return -EINVAL; } pr_info("Priority array check successful\n"); return 0; } static int test_parman_lsort(void) { struct test_parman *test_parman; int err; test_parman = test_parman_create(&test_parman_lsort_ops); if (IS_ERR(test_parman)) return PTR_ERR(test_parman); err = test_parman_run(test_parman); if (err) goto out; err = test_parman_check_array(test_parman, false); if (err) goto out; out: test_parman_destroy(test_parman); return err; } static int __init test_parman_init(void) { return test_parman_lsort(); } static void __exit test_parman_exit(void) { } module_init(test_parman_init); module_exit(test_parman_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <[email protected]>"); MODULE_DESCRIPTION("Test module for parman");
linux-master
lib/test_parman.c
// SPDX-License-Identifier: GPL-2.0-only /* * crc16.c */ #include <linux/types.h> #include <linux/module.h> #include <linux/crc16.h> /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ u16 const crc16_table[256] = { 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 }; EXPORT_SYMBOL(crc16_table); /** * crc16 - compute the CRC-16 for the data buffer * @crc: previous CRC value * @buffer: data pointer * @len: number of bytes in the buffer * * Returns the updated CRC value. */ u16 crc16(u16 crc, u8 const *buffer, size_t len) { while (len--) crc = crc16_byte(crc, *buffer++); return crc; } EXPORT_SYMBOL(crc16); MODULE_DESCRIPTION("CRC16 calculations"); MODULE_LICENSE("GPL");
linux-master
lib/crc16.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kernel module for testing static keys. * * Copyright 2015 Akamai Technologies Inc. All Rights Reserved * * Authors: * Jason Baron <[email protected]> */ #include <linux/module.h> #include <linux/jump_label.h> /* old keys */ struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE; EXPORT_SYMBOL_GPL(base_old_true_key); struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE; EXPORT_SYMBOL_GPL(base_inv_old_true_key); struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE; EXPORT_SYMBOL_GPL(base_old_false_key); struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE; EXPORT_SYMBOL_GPL(base_inv_old_false_key); /* new keys */ DEFINE_STATIC_KEY_TRUE(base_true_key); EXPORT_SYMBOL_GPL(base_true_key); DEFINE_STATIC_KEY_TRUE(base_inv_true_key); EXPORT_SYMBOL_GPL(base_inv_true_key); DEFINE_STATIC_KEY_FALSE(base_false_key); EXPORT_SYMBOL_GPL(base_false_key); DEFINE_STATIC_KEY_FALSE(base_inv_false_key); EXPORT_SYMBOL_GPL(base_inv_false_key); static void invert_key(struct static_key *key) { if (static_key_enabled(key)) static_key_disable(key); else static_key_enable(key); } static int __init test_static_key_base_init(void) { invert_key(&base_inv_old_true_key); invert_key(&base_inv_old_false_key); invert_key(&base_inv_true_key.key); invert_key(&base_inv_false_key.key); return 0; } static void __exit test_static_key_base_exit(void) { } module_init(test_static_key_base_init); module_exit(test_static_key_base_exit); MODULE_AUTHOR("Jason Baron <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_static_key_base.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/err.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/gfp.h> #include <linux/export.h> #include <linux/of_address.h> enum devm_ioremap_type { DEVM_IOREMAP = 0, DEVM_IOREMAP_UC, DEVM_IOREMAP_WC, DEVM_IOREMAP_NP, }; void devm_ioremap_release(struct device *dev, void *res) { iounmap(*(void __iomem **)res); } static int devm_ioremap_match(struct device *dev, void *res, void *match_data) { return *(void **)res == match_data; } static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size, enum devm_ioremap_type type) { void __iomem **ptr, *addr = NULL; ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, dev_to_node(dev)); if (!ptr) return NULL; switch (type) { case DEVM_IOREMAP: addr = ioremap(offset, size); break; case DEVM_IOREMAP_UC: addr = ioremap_uc(offset, size); break; case DEVM_IOREMAP_WC: addr = ioremap_wc(offset, size); break; case DEVM_IOREMAP_NP: addr = ioremap_np(offset, size); break; } if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } /** * devm_ioremap - Managed ioremap() * @dev: Generic device to remap IO address for * @offset: Resource address to map * @size: Size of map * * Managed ioremap(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size) { return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); } EXPORT_SYMBOL(devm_ioremap); /** * devm_ioremap_uc - Managed ioremap_uc() * @dev: Generic device to remap IO address for * @offset: Resource address to map * @size: Size of map * * Managed ioremap_uc(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, resource_size_t size) { return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC); } EXPORT_SYMBOL_GPL(devm_ioremap_uc); /** * devm_ioremap_wc - Managed ioremap_wc() * @dev: Generic device to remap IO address for * @offset: Resource address to map * @size: Size of map * * Managed ioremap_wc(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size) { return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC); } EXPORT_SYMBOL(devm_ioremap_wc); /** * devm_iounmap - Managed iounmap() * @dev: Generic device to unmap for * @addr: Address to unmap * * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). */ void devm_iounmap(struct device *dev, void __iomem *addr) { WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, (__force void *)addr)); iounmap(addr); } EXPORT_SYMBOL(devm_iounmap); static void __iomem * __devm_ioremap_resource(struct device *dev, const struct resource *res, enum devm_ioremap_type type) { resource_size_t size; void __iomem *dest_ptr; char *pretty_name; BUG_ON(!dev); if (!res || resource_type(res) != IORESOURCE_MEM) { dev_err(dev, "invalid resource %pR\n", res); return IOMEM_ERR_PTR(-EINVAL); } if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) type = DEVM_IOREMAP_NP; size = resource_size(res); if (res->name) pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev), res->name); else pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); if (!pretty_name) { dev_err(dev, "can't generate pretty name for resource %pR\n", res); return IOMEM_ERR_PTR(-ENOMEM); } if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } dest_ptr = __devm_ioremap(dev, res->start, size, type); if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); dest_ptr = IOMEM_ERR_PTR(-ENOMEM); } return dest_ptr; } /** * devm_ioremap_resource() - check, request region, and ioremap resource * @dev: generic device to handle the resource for * @res: resource to be handled * * Checks that a resource is a valid memory region, requests the memory * region and ioremaps it. All operations are managed and will be undone * on driver detach. * * Usage example: * * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); * base = devm_ioremap_resource(&pdev->dev, res); * if (IS_ERR(base)) * return PTR_ERR(base); * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res) { return __devm_ioremap_resource(dev, res, DEVM_IOREMAP); } EXPORT_SYMBOL(devm_ioremap_resource); /** * devm_ioremap_resource_wc() - write-combined variant of * devm_ioremap_resource() * @dev: generic device to handle the resource for * @res: resource to be handled * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res) { return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC); } /* * devm_of_iomap - Requests a resource and maps the memory mapped IO * for a given device_node managed by a given device * * Checks that a resource is a valid memory region, requests the memory * region and ioremaps it. All operations are managed and will be undone * on driver detach of the device. * * This is to be used when a device requests/maps resources described * by other device tree nodes (children or otherwise). * * @dev: The device "managing" the resource * @node: The device-tree node where the resource resides * @index: index of the MMIO range in the "reg" property * @size: Returns the size of the resource (pass NULL if not needed) * * Usage example: * * base = devm_of_iomap(&pdev->dev, node, 0, NULL); * if (IS_ERR(base)) * return PTR_ERR(base); * * Please Note: This is not a one-to-one replacement for of_iomap() because the * of_iomap() function does not track whether the region is already mapped. If * two drivers try to map the same memory, the of_iomap() function will succeed * but the devm_of_iomap() function will return -EBUSY. * * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded * error code on failure. */ void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, resource_size_t *size) { struct resource res; if (of_address_to_resource(node, index, &res)) return IOMEM_ERR_PTR(-EINVAL); if (size) *size = resource_size(&res); return devm_ioremap_resource(dev, &res); } EXPORT_SYMBOL(devm_of_iomap); #ifdef CONFIG_HAS_IOPORT_MAP /* * Generic iomap devres */ static void devm_ioport_map_release(struct device *dev, void *res) { ioport_unmap(*(void __iomem **)res); } static int devm_ioport_map_match(struct device *dev, void *res, void *match_data) { return *(void **)res == match_data; } /** * devm_ioport_map - Managed ioport_map() * @dev: Generic device to map ioport for * @port: Port to map * @nr: Number of ports to map * * Managed ioport_map(). Map is automatically unmapped on driver * detach. * * Return: a pointer to the remapped memory or NULL on failure. */ void __iomem *devm_ioport_map(struct device *dev, unsigned long port, unsigned int nr) { void __iomem **ptr, *addr; ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, dev_to_node(dev)); if (!ptr) return NULL; addr = ioport_map(port, nr); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } EXPORT_SYMBOL(devm_ioport_map); /** * devm_ioport_unmap - Managed ioport_unmap() * @dev: Generic device to unmap for * @addr: Address to unmap * * Managed ioport_unmap(). @addr must have been mapped using * devm_ioport_map(). */ void devm_ioport_unmap(struct device *dev, void __iomem *addr) { ioport_unmap(addr); WARN_ON(devres_destroy(dev, devm_ioport_map_release, devm_ioport_map_match, (__force void *)addr)); } EXPORT_SYMBOL(devm_ioport_unmap); #endif /* CONFIG_HAS_IOPORT_MAP */ #ifdef CONFIG_PCI /* * PCI iomap devres */ #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS struct pcim_iomap_devres { void __iomem *table[PCIM_IOMAP_MAX]; }; static void pcim_iomap_release(struct device *gendev, void *res) { struct pci_dev *dev = to_pci_dev(gendev); struct pcim_iomap_devres *this = res; int i; for (i = 0; i < PCIM_IOMAP_MAX; i++) if (this->table[i]) pci_iounmap(dev, this->table[i]); } /** * pcim_iomap_table - access iomap allocation table * @pdev: PCI device to access iomap table for * * Access iomap allocation table for @dev. If iomap table doesn't * exist and @pdev is managed, it will be allocated. All iomaps * recorded in the iomap table are automatically unmapped on driver * detach. * * This function might sleep when the table is first allocated but can * be safely called without context and guaranteed to succeed once * allocated. */ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) { struct pcim_iomap_devres *dr, *new_dr; dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); if (dr) return dr->table; new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, dev_to_node(&pdev->dev)); if (!new_dr) return NULL; dr = devres_get(&pdev->dev, new_dr, NULL, NULL); return dr->table; } EXPORT_SYMBOL(pcim_iomap_table); /** * pcim_iomap - Managed pcim_iomap() * @pdev: PCI device to iomap for * @bar: BAR to iomap * @maxlen: Maximum length of iomap * * Managed pci_iomap(). Map is automatically unmapped on driver * detach. */ void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) { void __iomem **tbl; BUG_ON(bar >= PCIM_IOMAP_MAX); tbl = (void __iomem **)pcim_iomap_table(pdev); if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ return NULL; tbl[bar] = pci_iomap(pdev, bar, maxlen); return tbl[bar]; } EXPORT_SYMBOL(pcim_iomap); /** * pcim_iounmap - Managed pci_iounmap() * @pdev: PCI device to iounmap for * @addr: Address to unmap * * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). */ void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) { void __iomem **tbl; int i; pci_iounmap(pdev, addr); tbl = (void __iomem **)pcim_iomap_table(pdev); BUG_ON(!tbl); for (i = 0; i < PCIM_IOMAP_MAX; i++) if (tbl[i] == addr) { tbl[i] = NULL; return; } WARN_ON(1); } EXPORT_SYMBOL(pcim_iounmap); /** * pcim_iomap_regions - Request and iomap PCI BARs * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to request and iomap * @name: Name used when requesting regions * * Request and iomap regions specified by @mask. */ int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) { void __iomem * const *iomap; int i, rc; iomap = pcim_iomap_table(pdev); if (!iomap) return -ENOMEM; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { unsigned long len; if (!(mask & (1 << i))) continue; rc = -EINVAL; len = pci_resource_len(pdev, i); if (!len) goto err_inval; rc = pci_request_region(pdev, i, name); if (rc) goto err_inval; rc = -ENOMEM; if (!pcim_iomap(pdev, i, 0)) goto err_region; } return 0; err_region: pci_release_region(pdev, i); err_inval: while (--i >= 0) { if (!(mask & (1 << i))) continue; pcim_iounmap(pdev, iomap[i]); pci_release_region(pdev, i); } return rc; } EXPORT_SYMBOL(pcim_iomap_regions); /** * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to iomap * @name: Name used when requesting regions * * Request all PCI BARs and iomap regions specified by @mask. */ int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, const char *name) { int request_mask = ((1 << 6) - 1) & ~mask; int rc; rc = pci_request_selected_regions(pdev, request_mask, name); if (rc) return rc; rc = pcim_iomap_regions(pdev, mask, name); if (rc) pci_release_selected_regions(pdev, request_mask); return rc; } EXPORT_SYMBOL(pcim_iomap_regions_request_all); /** * pcim_iounmap_regions - Unmap and release PCI BARs * @pdev: PCI device to map IO resources for * @mask: Mask of BARs to unmap and release * * Unmap and release regions specified by @mask. */ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) { void __iomem * const *iomap; int i; iomap = pcim_iomap_table(pdev); if (!iomap) return; for (i = 0; i < PCIM_IOMAP_MAX; i++) { if (!(mask & (1 << i))) continue; pcim_iounmap(pdev, iomap[i]); pci_release_region(pdev, i); } } EXPORT_SYMBOL(pcim_iounmap_regions); #endif /* CONFIG_PCI */ static void devm_arch_phys_ac_add_release(struct device *dev, void *res) { arch_phys_wc_del(*((int *)res)); } /** * devm_arch_phys_wc_add - Managed arch_phys_wc_add() * @dev: Managed device * @base: Memory base address * @size: Size of memory range * * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback. * See arch_phys_wc_add() for more information. */ int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size) { int *mtrr; int ret; mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, dev_to_node(dev)); if (!mtrr) return -ENOMEM; ret = arch_phys_wc_add(base, size); if (ret < 0) { devres_free(mtrr); return ret; } *mtrr = ret; devres_add(dev, mtrr); return ret; } EXPORT_SYMBOL(devm_arch_phys_wc_add); struct arch_io_reserve_memtype_wc_devres { resource_size_t start; resource_size_t size; }; static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res) { const struct arch_io_reserve_memtype_wc_devres *this = res; arch_io_free_memtype_wc(this->start, this->size); } /** * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc() * @dev: Managed device * @start: Memory base address * @size: Size of memory range * * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc() * and sets up a release callback See arch_io_reserve_memtype_wc() for more * information. */ int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, resource_size_t size) { struct arch_io_reserve_memtype_wc_devres *dr; int ret; dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, dev_to_node(dev)); if (!dr) return -ENOMEM; ret = arch_io_reserve_memtype_wc(start, size); if (ret < 0) { devres_free(dr); return ret; } dr->start = start; dr->size = size; devres_add(dev, dr); return ret; } EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);
linux-master
lib/devres.c
// SPDX-License-Identifier: GPL-2.0-or-later OR copyleft-next-0.3.1 /* * kmod stress test driver * * Copyright (C) 2017 Luis R. Rodriguez <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* * This driver provides an interface to trigger and test the kernel's * module loader through a series of configurations and a few triggers. * To test this driver use the following script as root: * * tools/testing/selftests/kmod/kmod.sh --help */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/printk.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/device.h> #define TEST_START_NUM_THREADS 50 #define TEST_START_DRIVER "test_module" #define TEST_START_TEST_FS "xfs" #define TEST_START_TEST_CASE TEST_KMOD_DRIVER static bool force_init_test = false; module_param(force_init_test, bool_enable_only, 0644); MODULE_PARM_DESC(force_init_test, "Force kicking a test immediately after driver loads"); /* * For device allocation / registration */ static DEFINE_MUTEX(reg_dev_mutex); static LIST_HEAD(reg_test_devs); /* * num_test_devs actually represents the *next* ID of the next * device we will allow to create. */ static int num_test_devs; /** * enum kmod_test_case - linker table test case * @TEST_KMOD_DRIVER: stress tests request_module() * @TEST_KMOD_FS_TYPE: stress tests get_fs_type() * * If you add a test case, please be sure to review if you need to set * @need_mod_put for your tests case. */ enum kmod_test_case { __TEST_KMOD_INVALID = 0, TEST_KMOD_DRIVER, TEST_KMOD_FS_TYPE, __TEST_KMOD_MAX, }; struct test_config { char *test_driver; char *test_fs; unsigned int num_threads; enum kmod_test_case test_case; int test_result; }; struct kmod_test_device; /** * struct kmod_test_device_info - thread info * * @ret_sync: return value if request_module() is used, sync request for * @TEST_KMOD_DRIVER * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE * @thread_idx: thread ID * @test_dev: test device test is being performed under * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module * (module_put(fs_sync->owner)) when done, otherwise you will not be able * to unload the respective modules and re-test. We use this to keep * accounting of when we need this and to help out in case we need to * error out and deal with module_put() on error. */ struct kmod_test_device_info { int ret_sync; struct file_system_type *fs_sync; struct task_struct *task_sync; unsigned int thread_idx; struct kmod_test_device *test_dev; bool need_mod_put; }; /** * struct kmod_test_device - test device to help test kmod * * @dev_idx: unique ID for test device * @config: configuration for the test * @misc_dev: we use a misc device under the hood * @dev: pointer to misc_dev's own struct device * @config_mutex: protects configuration of test * @trigger_mutex: the test trigger can only be fired once at a time * @thread_lock: protects @done count, and the @info per each thread * @done: number of threads which have completed or failed * @test_is_oom: when we run out of memory, use this to halt moving forward * @kthreads_done: completion used to signal when all work is done * @list: needed to be part of the reg_test_devs * @info: array of info for each thread */ struct kmod_test_device { int dev_idx; struct test_config config; struct miscdevice misc_dev; struct device *dev; struct mutex config_mutex; struct mutex trigger_mutex; struct mutex thread_mutex; unsigned int done; bool test_is_oom; struct completion kthreads_done; struct list_head list; struct kmod_test_device_info *info; }; static const char *test_case_str(enum kmod_test_case test_case) { switch (test_case) { case TEST_KMOD_DRIVER: return "TEST_KMOD_DRIVER"; case TEST_KMOD_FS_TYPE: return "TEST_KMOD_FS_TYPE"; default: return "invalid"; } } static struct miscdevice *dev_to_misc_dev(struct device *dev) { return dev_get_drvdata(dev); } static struct kmod_test_device *misc_dev_to_test_dev(struct miscdevice *misc_dev) { return container_of(misc_dev, struct kmod_test_device, misc_dev); } static struct kmod_test_device *dev_to_test_dev(struct device *dev) { struct miscdevice *misc_dev; misc_dev = dev_to_misc_dev(dev); return misc_dev_to_test_dev(misc_dev); } /* Must run with thread_mutex held */ static void kmod_test_done_check(struct kmod_test_device *test_dev, unsigned int idx) { struct test_config *config = &test_dev->config; test_dev->done++; dev_dbg(test_dev->dev, "Done thread count: %u\n", test_dev->done); if (test_dev->done == config->num_threads) { dev_info(test_dev->dev, "Done: %u threads have all run now\n", test_dev->done); dev_info(test_dev->dev, "Last thread to run: %u\n", idx); complete(&test_dev->kthreads_done); } } static void test_kmod_put_module(struct kmod_test_device_info *info) { struct kmod_test_device *test_dev = info->test_dev; struct test_config *config = &test_dev->config; if (!info->need_mod_put) return; switch (config->test_case) { case TEST_KMOD_DRIVER: break; case TEST_KMOD_FS_TYPE: if (info->fs_sync && info->fs_sync->owner) module_put(info->fs_sync->owner); break; default: BUG(); } info->need_mod_put = true; } static int run_request(void *data) { struct kmod_test_device_info *info = data; struct kmod_test_device *test_dev = info->test_dev; struct test_config *config = &test_dev->config; switch (config->test_case) { case TEST_KMOD_DRIVER: info->ret_sync = request_module("%s", config->test_driver); break; case TEST_KMOD_FS_TYPE: info->fs_sync = get_fs_type(config->test_fs); info->need_mod_put = true; break; default: /* __trigger_config_run() already checked for test sanity */ BUG(); return -EINVAL; } dev_dbg(test_dev->dev, "Ran thread %u\n", info->thread_idx); test_kmod_put_module(info); mutex_lock(&test_dev->thread_mutex); info->task_sync = NULL; kmod_test_done_check(test_dev, info->thread_idx); mutex_unlock(&test_dev->thread_mutex); return 0; } static int tally_work_test(struct kmod_test_device_info *info) { struct kmod_test_device *test_dev = info->test_dev; struct test_config *config = &test_dev->config; int err_ret = 0; switch (config->test_case) { case TEST_KMOD_DRIVER: /* * Only capture errors, if one is found that's * enough, for now. */ if (info->ret_sync != 0) err_ret = info->ret_sync; dev_info(test_dev->dev, "Sync thread %d return status: %d\n", info->thread_idx, info->ret_sync); break; case TEST_KMOD_FS_TYPE: /* For now we make this simple */ if (!info->fs_sync) err_ret = -EINVAL; dev_info(test_dev->dev, "Sync thread %u fs: %s\n", info->thread_idx, info->fs_sync ? config->test_fs : "NULL"); break; default: BUG(); } return err_ret; } /* * XXX: add result option to display if all errors did not match. * For now we just keep any error code if one was found. * * If this ran it means *all* tasks were created fine and we * are now just collecting results. * * Only propagate errors, do not override with a subsequent success case. */ static void tally_up_work(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; struct kmod_test_device_info *info; unsigned int idx; int err_ret = 0; int ret = 0; mutex_lock(&test_dev->thread_mutex); dev_info(test_dev->dev, "Results:\n"); for (idx=0; idx < config->num_threads; idx++) { info = &test_dev->info[idx]; ret = tally_work_test(info); if (ret) err_ret = ret; } /* * Note: request_module() returns 256 for a module not found even * though modprobe itself returns 1. */ config->test_result = err_ret; mutex_unlock(&test_dev->thread_mutex); } static int try_one_request(struct kmod_test_device *test_dev, unsigned int idx) { struct kmod_test_device_info *info = &test_dev->info[idx]; int fail_ret = -ENOMEM; mutex_lock(&test_dev->thread_mutex); info->thread_idx = idx; info->test_dev = test_dev; info->task_sync = kthread_run(run_request, info, "%s-%u", KBUILD_MODNAME, idx); if (!info->task_sync || IS_ERR(info->task_sync)) { test_dev->test_is_oom = true; dev_err(test_dev->dev, "Setting up thread %u failed\n", idx); info->task_sync = NULL; goto err_out; } else dev_dbg(test_dev->dev, "Kicked off thread %u\n", idx); mutex_unlock(&test_dev->thread_mutex); return 0; err_out: info->ret_sync = fail_ret; mutex_unlock(&test_dev->thread_mutex); return fail_ret; } static void test_dev_kmod_stop_tests(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; struct kmod_test_device_info *info; unsigned int i; dev_info(test_dev->dev, "Ending request_module() tests\n"); mutex_lock(&test_dev->thread_mutex); for (i=0; i < config->num_threads; i++) { info = &test_dev->info[i]; if (info->task_sync && !IS_ERR(info->task_sync)) { dev_info(test_dev->dev, "Stopping still-running thread %i\n", i); kthread_stop(info->task_sync); } /* * info->task_sync is well protected, it can only be * NULL or a pointer to a struct. If its NULL we either * never ran, or we did and we completed the work. Completed * tasks *always* put the module for us. This is a sanity * check -- just in case. */ if (info->task_sync && info->need_mod_put) test_kmod_put_module(info); } mutex_unlock(&test_dev->thread_mutex); } /* * Only wait *iff* we did not run into any errors during all of our thread * set up. If run into any issues we stop threads and just bail out with * an error to the trigger. This also means we don't need any tally work * for any threads which fail. */ static int try_requests(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; unsigned int idx; int ret; bool any_error = false; for (idx=0; idx < config->num_threads; idx++) { if (test_dev->test_is_oom) { any_error = true; break; } ret = try_one_request(test_dev, idx); if (ret) { any_error = true; break; } } if (!any_error) { test_dev->test_is_oom = false; dev_info(test_dev->dev, "No errors were found while initializing threads\n"); wait_for_completion(&test_dev->kthreads_done); tally_up_work(test_dev); } else { test_dev->test_is_oom = true; dev_info(test_dev->dev, "At least one thread failed to start, stop all work\n"); test_dev_kmod_stop_tests(test_dev); return -ENOMEM; } return 0; } static int run_test_driver(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; dev_info(test_dev->dev, "Test case: %s (%u)\n", test_case_str(config->test_case), config->test_case); dev_info(test_dev->dev, "Test driver to load: %s\n", config->test_driver); dev_info(test_dev->dev, "Number of threads to run: %u\n", config->num_threads); dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n", config->num_threads - 1); return try_requests(test_dev); } static int run_test_fs_type(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; dev_info(test_dev->dev, "Test case: %s (%u)\n", test_case_str(config->test_case), config->test_case); dev_info(test_dev->dev, "Test filesystem to load: %s\n", config->test_fs); dev_info(test_dev->dev, "Number of threads to run: %u\n", config->num_threads); dev_info(test_dev->dev, "Thread IDs will range from 0 - %u\n", config->num_threads - 1); return try_requests(test_dev); } static ssize_t config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; int len = 0; mutex_lock(&test_dev->config_mutex); len += snprintf(buf, PAGE_SIZE, "Custom trigger configuration for: %s\n", dev_name(dev)); len += snprintf(buf+len, PAGE_SIZE - len, "Number of threads:\t%u\n", config->num_threads); len += snprintf(buf+len, PAGE_SIZE - len, "Test_case:\t%s (%u)\n", test_case_str(config->test_case), config->test_case); if (config->test_driver) len += snprintf(buf+len, PAGE_SIZE - len, "driver:\t%s\n", config->test_driver); else len += snprintf(buf+len, PAGE_SIZE - len, "driver:\tEMPTY\n"); if (config->test_fs) len += snprintf(buf+len, PAGE_SIZE - len, "fs:\t%s\n", config->test_fs); else len += snprintf(buf+len, PAGE_SIZE - len, "fs:\tEMPTY\n"); mutex_unlock(&test_dev->config_mutex); return len; } static DEVICE_ATTR_RO(config); /* * This ensures we don't allow kicking threads through if our configuration * is faulty. */ static int __trigger_config_run(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; test_dev->done = 0; switch (config->test_case) { case TEST_KMOD_DRIVER: return run_test_driver(test_dev); case TEST_KMOD_FS_TYPE: return run_test_fs_type(test_dev); default: dev_warn(test_dev->dev, "Invalid test case requested: %u\n", config->test_case); return -EINVAL; } } static int trigger_config_run(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; int ret; mutex_lock(&test_dev->trigger_mutex); mutex_lock(&test_dev->config_mutex); ret = __trigger_config_run(test_dev); if (ret < 0) goto out; dev_info(test_dev->dev, "General test result: %d\n", config->test_result); /* * We must return 0 after a trigger even unless something went * wrong with the setup of the test. If the test setup went fine * then userspace must just check the result of config->test_result. * One issue with relying on the return from a call in the kernel * is if the kernel returns a positive value using this trigger * will not return the value to userspace, it would be lost. * * By not relying on capturing the return value of tests we are using * through the trigger it also us to run tests with set -e and only * fail when something went wrong with the driver upon trigger * requests. */ ret = 0; out: mutex_unlock(&test_dev->config_mutex); mutex_unlock(&test_dev->trigger_mutex); return ret; } static ssize_t trigger_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); int ret; if (test_dev->test_is_oom) return -ENOMEM; /* For all intents and purposes we don't care what userspace * sent this trigger, we care only that we were triggered. * We treat the return value only for caputuring issues with * the test setup. At this point all the test variables should * have been allocated so typically this should never fail. */ ret = trigger_config_run(test_dev); if (unlikely(ret < 0)) goto out; /* * Note: any return > 0 will be treated as success * and the error value will not be available to userspace. * Do not rely on trying to send to userspace a test value * return value as positive return errors will be lost. */ if (WARN_ON(ret > 0)) return -EINVAL; ret = count; out: return ret; } static DEVICE_ATTR_WO(trigger_config); /* * XXX: move to kstrncpy() once merged. * * Users should use kfree_const() when freeing these. */ static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp) { *dst = kstrndup(name, count, gfp); if (!*dst) return -ENOSPC; return count; } static int config_copy_test_driver_name(struct test_config *config, const char *name, size_t count) { return __kstrncpy(&config->test_driver, name, count, GFP_KERNEL); } static int config_copy_test_fs(struct test_config *config, const char *name, size_t count) { return __kstrncpy(&config->test_fs, name, count, GFP_KERNEL); } static void __kmod_config_free(struct test_config *config) { if (!config) return; kfree_const(config->test_driver); config->test_driver = NULL; kfree_const(config->test_fs); config->test_fs = NULL; } static void kmod_config_free(struct kmod_test_device *test_dev) { struct test_config *config; if (!test_dev) return; config = &test_dev->config; mutex_lock(&test_dev->config_mutex); __kmod_config_free(config); mutex_unlock(&test_dev->config_mutex); } static ssize_t config_test_driver_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; int copied; mutex_lock(&test_dev->config_mutex); kfree_const(config->test_driver); config->test_driver = NULL; copied = config_copy_test_driver_name(config, buf, count); mutex_unlock(&test_dev->config_mutex); return copied; } /* * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE. */ static ssize_t config_test_show_str(struct mutex *config_mutex, char *dst, char *src) { int len; mutex_lock(config_mutex); len = snprintf(dst, PAGE_SIZE, "%s\n", src); mutex_unlock(config_mutex); return len; } static ssize_t config_test_driver_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return config_test_show_str(&test_dev->config_mutex, buf, config->test_driver); } static DEVICE_ATTR_RW(config_test_driver); static ssize_t config_test_fs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; int copied; mutex_lock(&test_dev->config_mutex); kfree_const(config->test_fs); config->test_fs = NULL; copied = config_copy_test_fs(config, buf, count); mutex_unlock(&test_dev->config_mutex); return copied; } static ssize_t config_test_fs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return config_test_show_str(&test_dev->config_mutex, buf, config->test_fs); } static DEVICE_ATTR_RW(config_test_fs); static int trigger_config_run_type(struct kmod_test_device *test_dev, enum kmod_test_case test_case, const char *test_str) { int copied = 0; struct test_config *config = &test_dev->config; mutex_lock(&test_dev->config_mutex); switch (test_case) { case TEST_KMOD_DRIVER: kfree_const(config->test_driver); config->test_driver = NULL; copied = config_copy_test_driver_name(config, test_str, strlen(test_str)); break; case TEST_KMOD_FS_TYPE: kfree_const(config->test_fs); config->test_fs = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); break; default: mutex_unlock(&test_dev->config_mutex); return -EINVAL; } config->test_case = test_case; mutex_unlock(&test_dev->config_mutex); if (copied <= 0 || copied != strlen(test_str)) { test_dev->test_is_oom = true; return -ENOMEM; } test_dev->test_is_oom = false; return trigger_config_run(test_dev); } static void free_test_dev_info(struct kmod_test_device *test_dev) { vfree(test_dev->info); test_dev->info = NULL; } static int kmod_config_sync_info(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; free_test_dev_info(test_dev); test_dev->info = vzalloc(array_size(sizeof(struct kmod_test_device_info), config->num_threads)); if (!test_dev->info) return -ENOMEM; return 0; } /* * Old kernels may not have this, if you want to port this code to * test it on older kernels. */ #ifdef get_kmod_umh_limit static unsigned int kmod_init_test_thread_limit(void) { return get_kmod_umh_limit(); } #else static unsigned int kmod_init_test_thread_limit(void) { return TEST_START_NUM_THREADS; } #endif static int __kmod_config_init(struct kmod_test_device *test_dev) { struct test_config *config = &test_dev->config; int ret = -ENOMEM, copied; __kmod_config_free(config); copied = config_copy_test_driver_name(config, TEST_START_DRIVER, strlen(TEST_START_DRIVER)); if (copied != strlen(TEST_START_DRIVER)) goto err_out; copied = config_copy_test_fs(config, TEST_START_TEST_FS, strlen(TEST_START_TEST_FS)); if (copied != strlen(TEST_START_TEST_FS)) goto err_out; config->num_threads = kmod_init_test_thread_limit(); config->test_result = 0; config->test_case = TEST_START_TEST_CASE; ret = kmod_config_sync_info(test_dev); if (ret) goto err_out; test_dev->test_is_oom = false; return 0; err_out: test_dev->test_is_oom = true; WARN_ON(test_dev->test_is_oom); __kmod_config_free(config); return ret; } static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); int ret; mutex_lock(&test_dev->trigger_mutex); mutex_lock(&test_dev->config_mutex); ret = __kmod_config_init(test_dev); if (ret < 0) { ret = -ENOMEM; dev_err(dev, "could not alloc settings for config trigger: %d\n", ret); goto out; } dev_info(dev, "reset\n"); ret = count; out: mutex_unlock(&test_dev->config_mutex); mutex_unlock(&test_dev->trigger_mutex); return ret; } static DEVICE_ATTR_WO(reset); static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev, const char *buf, size_t size, unsigned int *config, int (*test_sync)(struct kmod_test_device *test_dev)) { int ret; unsigned int val; unsigned int old_val; ret = kstrtouint(buf, 10, &val); if (ret) return ret; mutex_lock(&test_dev->config_mutex); old_val = *config; *(unsigned int *)config = val; ret = test_sync(test_dev); if (ret) { *(unsigned int *)config = old_val; ret = test_sync(test_dev); WARN_ON(ret); mutex_unlock(&test_dev->config_mutex); return -EINVAL; } mutex_unlock(&test_dev->config_mutex); /* Always return full write size even if we didn't consume all */ return size; } static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev, const char *buf, size_t size, unsigned int *config, unsigned int min, unsigned int max) { unsigned int val; int ret; ret = kstrtouint(buf, 10, &val); if (ret) return ret; if (val < min || val > max) return -EINVAL; mutex_lock(&test_dev->config_mutex); *config = val; mutex_unlock(&test_dev->config_mutex); /* Always return full write size even if we didn't consume all */ return size; } static int test_dev_config_update_int(struct kmod_test_device *test_dev, const char *buf, size_t size, int *config) { int val; int ret; ret = kstrtoint(buf, 10, &val); if (ret) return ret; mutex_lock(&test_dev->config_mutex); *config = val; mutex_unlock(&test_dev->config_mutex); /* Always return full write size even if we didn't consume all */ return size; } static ssize_t test_dev_config_show_int(struct kmod_test_device *test_dev, char *buf, int config) { int val; mutex_lock(&test_dev->config_mutex); val = config; mutex_unlock(&test_dev->config_mutex); return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t test_dev_config_show_uint(struct kmod_test_device *test_dev, char *buf, unsigned int config) { unsigned int val; mutex_lock(&test_dev->config_mutex); val = config; mutex_unlock(&test_dev->config_mutex); return snprintf(buf, PAGE_SIZE, "%u\n", val); } static ssize_t test_result_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_update_int(test_dev, buf, count, &config->test_result); } static ssize_t config_num_threads_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_update_uint_sync(test_dev, buf, count, &config->num_threads, kmod_config_sync_info); } static ssize_t config_num_threads_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_show_int(test_dev, buf, config->num_threads); } static DEVICE_ATTR_RW(config_num_threads); static ssize_t config_test_case_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_update_uint_range(test_dev, buf, count, &config->test_case, __TEST_KMOD_INVALID + 1, __TEST_KMOD_MAX - 1); } static ssize_t config_test_case_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_show_uint(test_dev, buf, config->test_case); } static DEVICE_ATTR_RW(config_test_case); static ssize_t test_result_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kmod_test_device *test_dev = dev_to_test_dev(dev); struct test_config *config = &test_dev->config; return test_dev_config_show_int(test_dev, buf, config->test_result); } static DEVICE_ATTR_RW(test_result); #define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr static struct attribute *test_dev_attrs[] = { TEST_KMOD_DEV_ATTR(trigger_config), TEST_KMOD_DEV_ATTR(config), TEST_KMOD_DEV_ATTR(reset), TEST_KMOD_DEV_ATTR(config_test_driver), TEST_KMOD_DEV_ATTR(config_test_fs), TEST_KMOD_DEV_ATTR(config_num_threads), TEST_KMOD_DEV_ATTR(config_test_case), TEST_KMOD_DEV_ATTR(test_result), NULL, }; ATTRIBUTE_GROUPS(test_dev); static int kmod_config_init(struct kmod_test_device *test_dev) { int ret; mutex_lock(&test_dev->config_mutex); ret = __kmod_config_init(test_dev); mutex_unlock(&test_dev->config_mutex); return ret; } static struct kmod_test_device *alloc_test_dev_kmod(int idx) { int ret; struct kmod_test_device *test_dev; struct miscdevice *misc_dev; test_dev = vzalloc(sizeof(struct kmod_test_device)); if (!test_dev) goto err_out; mutex_init(&test_dev->config_mutex); mutex_init(&test_dev->trigger_mutex); mutex_init(&test_dev->thread_mutex); init_completion(&test_dev->kthreads_done); ret = kmod_config_init(test_dev); if (ret < 0) { pr_err("Cannot alloc kmod_config_init()\n"); goto err_out_free; } test_dev->dev_idx = idx; misc_dev = &test_dev->misc_dev; misc_dev->minor = MISC_DYNAMIC_MINOR; misc_dev->name = kasprintf(GFP_KERNEL, "test_kmod%d", idx); if (!misc_dev->name) { pr_err("Cannot alloc misc_dev->name\n"); goto err_out_free_config; } misc_dev->groups = test_dev_groups; return test_dev; err_out_free_config: free_test_dev_info(test_dev); kmod_config_free(test_dev); err_out_free: vfree(test_dev); test_dev = NULL; err_out: return NULL; } static void free_test_dev_kmod(struct kmod_test_device *test_dev) { if (test_dev) { kfree_const(test_dev->misc_dev.name); test_dev->misc_dev.name = NULL; free_test_dev_info(test_dev); kmod_config_free(test_dev); vfree(test_dev); test_dev = NULL; } } static struct kmod_test_device *register_test_dev_kmod(void) { struct kmod_test_device *test_dev = NULL; int ret; mutex_lock(&reg_dev_mutex); /* int should suffice for number of devices, test for wrap */ if (num_test_devs + 1 == INT_MAX) { pr_err("reached limit of number of test devices\n"); goto out; } test_dev = alloc_test_dev_kmod(num_test_devs); if (!test_dev) goto out; ret = misc_register(&test_dev->misc_dev); if (ret) { pr_err("could not register misc device: %d\n", ret); free_test_dev_kmod(test_dev); test_dev = NULL; goto out; } test_dev->dev = test_dev->misc_dev.this_device; list_add_tail(&test_dev->list, &reg_test_devs); dev_info(test_dev->dev, "interface ready\n"); num_test_devs++; out: mutex_unlock(&reg_dev_mutex); return test_dev; } static int __init test_kmod_init(void) { struct kmod_test_device *test_dev; int ret; test_dev = register_test_dev_kmod(); if (!test_dev) { pr_err("Cannot add first test kmod device\n"); return -ENODEV; } /* * With some work we might be able to gracefully enable * testing with this driver built-in, for now this seems * rather risky. For those willing to try have at it, * and enable the below. Good luck! If that works, try * lowering the init level for more fun. */ if (force_init_test) { ret = trigger_config_run_type(test_dev, TEST_KMOD_DRIVER, "tun"); if (WARN_ON(ret)) return ret; ret = trigger_config_run_type(test_dev, TEST_KMOD_FS_TYPE, "btrfs"); if (WARN_ON(ret)) return ret; } return 0; } late_initcall(test_kmod_init); static void unregister_test_dev_kmod(struct kmod_test_device *test_dev) { mutex_lock(&test_dev->trigger_mutex); mutex_lock(&test_dev->config_mutex); test_dev_kmod_stop_tests(test_dev); dev_info(test_dev->dev, "removing interface\n"); misc_deregister(&test_dev->misc_dev); mutex_unlock(&test_dev->config_mutex); mutex_unlock(&test_dev->trigger_mutex); free_test_dev_kmod(test_dev); } static void __exit test_kmod_exit(void) { struct kmod_test_device *test_dev, *tmp; mutex_lock(&reg_dev_mutex); list_for_each_entry_safe(test_dev, tmp, &reg_test_devs, list) { list_del(&test_dev->list); unregister_test_dev_kmod(test_dev); } mutex_unlock(&reg_dev_mutex); } module_exit(test_kmod_exit); MODULE_AUTHOR("Luis R. Rodriguez <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_kmod.c
// SPDX-License-Identifier: GPL-2.0 /* * Implement the default iomap interfaces * * (C) Copyright 2004 Linus Torvalds */ #include <linux/pci.h> #include <linux/io.h> #include <linux/export.h> #ifdef CONFIG_PCI /** * pci_iomap_range - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @offset: map memory at the given offset in BAR * @maxlen: max length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. * */ void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (len <= offset || !start) return NULL; len -= offset; start += offset; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_IO) return __pci_ioport_map(dev, start, len); if (flags & IORESOURCE_MEM) return ioremap(start, len); /* What? */ return NULL; } EXPORT_SYMBOL(pci_iomap_range); /** * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @offset: map memory at the given offset in BAR * @maxlen: max length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. When possible write combining * is used. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. * */ void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (flags & IORESOURCE_IO) return NULL; if (len <= offset || !start) return NULL; len -= offset; start += offset; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_MEM) return ioremap_wc(start, len); /* What? */ return NULL; } EXPORT_SYMBOL_GPL(pci_iomap_wc_range); /** * pci_iomap - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @maxlen: length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR without checking for its length first, pass %0 here. * */ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { return pci_iomap_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL(pci_iomap); /** * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @maxlen: length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. When possible write combining * is used. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR without checking for its length first, pass %0 here. * */ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) { return pci_iomap_wc_range(dev, bar, 0, maxlen); } EXPORT_SYMBOL_GPL(pci_iomap_wc); /* * pci_iounmap() somewhat illogically comes from lib/iomap.c for the * CONFIG_GENERIC_IOMAP case, because that's the code that knows about * the different IOMAP ranges. * * But if the architecture does not use the generic iomap code, and if * it has _not_ defined it's own private pci_iounmap function, we define * it here. * * NOTE! This default implementation assumes that if the architecture * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [, * and does not need unmapping with 'ioport_unmap()'. * * If you have different rules for your architecture, you need to * implement your own pci_iounmap() that knows the rules for where * and how IO vs MEM get mapped. * * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes * from legacy <asm-generic/io.h> header file behavior. In particular, * it would seem to make sense to do the iounmap(p) for the non-IO-space * case here regardless, but that's not what the old header file code * did. Probably incorrectly, but this is meant to be bug-for-bug * compatible. */ #if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP) void pci_iounmap(struct pci_dev *dev, void __iomem *p) { #ifdef ARCH_HAS_GENERIC_IOPORT_MAP uintptr_t start = (uintptr_t) PCI_IOBASE; uintptr_t addr = (uintptr_t) p; if (addr >= start && addr < start + IO_SPACE_LIMIT) return; iounmap(p); #endif } EXPORT_SYMBOL(pci_iounmap); #endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */ #endif /* CONFIG_PCI */
linux-master
lib/pci_iomap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Function to determine if a thread group is single threaded or not * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) * - Derived from security/selinux/hooks.c */ #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/sched/mm.h> /* * Returns true if the task does not share ->mm with another thread/process. */ bool current_is_single_threaded(void) { struct task_struct *task = current; struct mm_struct *mm = task->mm; struct task_struct *p, *t; bool ret; if (atomic_read(&task->signal->live) != 1) return false; if (atomic_read(&mm->mm_users) == 1) return true; ret = false; rcu_read_lock(); for_each_process(p) { if (unlikely(p->flags & PF_KTHREAD)) continue; if (unlikely(p == task->group_leader)) continue; for_each_thread(p, t) { if (unlikely(t->mm == mm)) goto found; if (likely(t->mm)) break; /* * t->mm == NULL. Make sure next_thread/next_task * will see other CLONE_VM tasks which might be * forked before exiting. */ smp_rmb(); } } ret = true; found: rcu_read_unlock(); return ret; }
linux-master
lib/is_single_threaded.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * lib/textsearch.c Generic text search interface * * Authors: Thomas Graf <[email protected]> * Pablo Neira Ayuso <[email protected]> * * ========================================================================== */ /** * DOC: ts_intro * INTRODUCTION * * The textsearch infrastructure provides text searching facilities for * both linear and non-linear data. Individual search algorithms are * implemented in modules and chosen by the user. * * ARCHITECTURE * * .. code-block:: none * * User * +----------------+ * | finish()|<--------------(6)-----------------+ * |get_next_block()|<--------------(5)---------------+ | * | | Algorithm | | * | | +------------------------------+ * | | | init() find() destroy() | * | | +------------------------------+ * | | Core API ^ ^ ^ * | | +---------------+ (2) (4) (8) * | (1)|----->| prepare() |---+ | | * | (3)|----->| find()/next() |-----------+ | * | (7)|----->| destroy() |----------------------+ * +----------------+ +---------------+ * * (1) User configures a search by calling textsearch_prepare() specifying * the search parameters such as the pattern and algorithm name. * (2) Core requests the algorithm to allocate and initialize a search * configuration according to the specified parameters. * (3) User starts the search(es) by calling textsearch_find() or * textsearch_next() to fetch subsequent occurrences. A state variable * is provided to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. * (5) Algorithm calls get_next_block() provided by the user continuously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) * (7) User destroys the configuration by calling textsearch_destroy(). * (8) Core notifies the algorithm to destroy algorithm specific * allocations. (Optional) * * USAGE * * Before a search can be performed, a configuration must be created * by calling textsearch_prepare() specifying the searching algorithm, * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE * to perform case insensitive matching. But it might slow down * performance of algorithm, so you should use it at own your risk. * The returned configuration may then be used for an arbitrary * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * * The actual search is performed by either calling * textsearch_find_continuous() for linear data or by providing * an own get_next_block() implementation and * calling textsearch_find(). Both functions return * the position of the first occurrence of the pattern or UINT_MAX if * no match was found. Subsequent occurrences can be found by calling * textsearch_next() regardless of the linearity of the data. * * Once you're done using a configuration it must be given back via * textsearch_destroy. * * EXAMPLE:: * * int pos; * struct ts_config *conf; * struct ts_state state; * const char *pattern = "chicken"; * const char *example = "We dance the funky chicken"; * * conf = textsearch_prepare("kmp", pattern, strlen(pattern), * GFP_KERNEL, TS_AUTOLOAD); * if (IS_ERR(conf)) { * err = PTR_ERR(conf); * goto errout; * } * * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); * if (pos != UINT_MAX) * panic("Oh my god, dancing chickens at %d\n", pos); * * textsearch_destroy(conf); */ /* ========================================================================== */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/err.h> #include <linux/textsearch.h> #include <linux/slab.h> static LIST_HEAD(ts_ops); static DEFINE_SPINLOCK(ts_mod_lock); static inline struct ts_ops *lookup_ts_algo(const char *name) { struct ts_ops *o; rcu_read_lock(); list_for_each_entry_rcu(o, &ts_ops, list) { if (!strcmp(name, o->name)) { if (!try_module_get(o->owner)) o = NULL; rcu_read_unlock(); return o; } } rcu_read_unlock(); return NULL; } /** * textsearch_register - register a textsearch module * @ops: operations lookup table * * This function must be called by textsearch modules to announce * their presence. The specified &@ops must have %name set to a * unique identifier and the callbacks find(), init(), get_pattern(), * and get_pattern_len() must be implemented. * * Returns 0 or -EEXISTS if another module has already registered * with same name. */ int textsearch_register(struct ts_ops *ops) { int err = -EEXIST; struct ts_ops *o; if (ops->name == NULL || ops->find == NULL || ops->init == NULL || ops->get_pattern == NULL || ops->get_pattern_len == NULL) return -EINVAL; spin_lock(&ts_mod_lock); list_for_each_entry(o, &ts_ops, list) { if (!strcmp(ops->name, o->name)) goto errout; } list_add_tail_rcu(&ops->list, &ts_ops); err = 0; errout: spin_unlock(&ts_mod_lock); return err; } EXPORT_SYMBOL(textsearch_register); /** * textsearch_unregister - unregister a textsearch module * @ops: operations lookup table * * This function must be called by textsearch modules to announce * their disappearance for examples when the module gets unloaded. * The &ops parameter must be the same as the one during the * registration. * * Returns 0 on success or -ENOENT if no matching textsearch * registration was found. */ int textsearch_unregister(struct ts_ops *ops) { int err = 0; struct ts_ops *o; spin_lock(&ts_mod_lock); list_for_each_entry(o, &ts_ops, list) { if (o == ops) { list_del_rcu(&o->list); goto out; } } err = -ENOENT; out: spin_unlock(&ts_mod_lock); return err; } EXPORT_SYMBOL(textsearch_unregister); struct ts_linear_state { unsigned int len; const void *data; }; static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, struct ts_config *conf, struct ts_state *state) { struct ts_linear_state *st = (struct ts_linear_state *) state->cb; if (likely(consumed < st->len)) { *dst = st->data + consumed; return st->len - consumed; } return 0; } /** * textsearch_find_continuous - search a pattern in continuous/linear data * @conf: search configuration * @state: search state * @data: data to search in * @len: length of data * * A simplified version of textsearch_find() for continuous/linear data. * Call textsearch_next() to retrieve subsequent matches. * * Returns the position of first occurrence of the pattern or * %UINT_MAX if no occurrence was found. */ unsigned int textsearch_find_continuous(struct ts_config *conf, struct ts_state *state, const void *data, unsigned int len) { struct ts_linear_state *st = (struct ts_linear_state *) state->cb; conf->get_next_block = get_linear_data; st->data = data; st->len = len; return textsearch_find(conf, state); } EXPORT_SYMBOL(textsearch_find_continuous); /** * textsearch_prepare - Prepare a search * @algo: name of search algorithm * @pattern: pattern data * @len: length of pattern * @gfp_mask: allocation mask * @flags: search flags * * Looks up the search algorithm module and creates a new textsearch * configuration for the specified pattern. * * Note: The format of the pattern may not be compatible between * the various search algorithms. * * Returns a new textsearch configuration according to the specified * parameters or a ERR_PTR(). If a zero length pattern is passed, this * function returns EINVAL. */ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) { int err = -ENOENT; struct ts_config *conf; struct ts_ops *ops; if (len == 0) return ERR_PTR(-EINVAL); ops = lookup_ts_algo(algo); #ifdef CONFIG_MODULES /* * Why not always autoload you may ask. Some users are * in a situation where requesting a module may deadlock, * especially when the module is located on a NFS mount. */ if (ops == NULL && flags & TS_AUTOLOAD) { request_module("ts_%s", algo); ops = lookup_ts_algo(algo); } #endif if (ops == NULL) goto errout; conf = ops->init(pattern, len, gfp_mask, flags); if (IS_ERR(conf)) { err = PTR_ERR(conf); goto errout; } conf->ops = ops; return conf; errout: if (ops) module_put(ops->owner); return ERR_PTR(err); } EXPORT_SYMBOL(textsearch_prepare); /** * textsearch_destroy - destroy a search configuration * @conf: search configuration * * Releases all references of the configuration and frees * up the memory. */ void textsearch_destroy(struct ts_config *conf) { if (conf->ops) { if (conf->ops->destroy) conf->ops->destroy(conf); module_put(conf->ops->owner); } kfree(conf); } EXPORT_SYMBOL(textsearch_destroy);
linux-master
lib/textsearch.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/interval_tree.h> #include <linux/random.h> #include <linux/slab.h> #include <asm/timex.h> #define __param(type, name, init, msg) \ static type name = init; \ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg); __param(int, nnodes, 100, "Number of nodes in the interval tree"); __param(int, perf_loops, 1000, "Number of iterations modifying the tree"); __param(int, nsearches, 100, "Number of searches to the interval tree"); __param(int, search_loops, 1000, "Number of iterations searching the tree"); __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); static struct rb_root_cached root = RB_ROOT_CACHED; static struct interval_tree_node *nodes = NULL; static u32 *queries = NULL; static struct rnd_state rnd; static inline unsigned long search(struct rb_root_cached *root, unsigned long start, unsigned long last) { struct interval_tree_node *node; unsigned long results = 0; for (node = interval_tree_iter_first(root, start, last); node; node = interval_tree_iter_next(node, start, last)) results++; return results; } static void init(void) { int i; for (i = 0; i < nnodes; i++) { u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint; u32 a = (prandom_u32_state(&rnd) >> 4) % b; nodes[i].start = a; nodes[i].last = b; } /* * Limit the search scope to what the user defined. * Otherwise we are merely measuring empty walks, * which is pointless. */ for (i = 0; i < nsearches; i++) queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint; } static int interval_tree_test_init(void) { int i, j; unsigned long results; cycles_t time1, time2, time; nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), GFP_KERNEL); if (!nodes) return -ENOMEM; queries = kmalloc_array(nsearches, sizeof(int), GFP_KERNEL); if (!queries) { kfree(nodes); return -ENOMEM; } printk(KERN_ALERT "interval tree insert/remove"); prandom_seed_state(&rnd, 3141592653589793238ULL); init(); time1 = get_cycles(); for (i = 0; i < perf_loops; i++) { for (j = 0; j < nnodes; j++) interval_tree_insert(nodes + j, &root); for (j = 0; j < nnodes; j++) interval_tree_remove(nodes + j, &root); } time2 = get_cycles(); time = time2 - time1; time = div_u64(time, perf_loops); printk(" -> %llu cycles\n", (unsigned long long)time); printk(KERN_ALERT "interval tree search"); for (j = 0; j < nnodes; j++) interval_tree_insert(nodes + j, &root); time1 = get_cycles(); results = 0; for (i = 0; i < search_loops; i++) for (j = 0; j < nsearches; j++) { unsigned long start = search_all ? 0 : queries[j]; unsigned long last = search_all ? max_endpoint : queries[j]; results += search(&root, start, last); } time2 = get_cycles(); time = time2 - time1; time = div_u64(time, search_loops); results = div_u64(results, search_loops); printk(" -> %llu cycles (%lu results)\n", (unsigned long long)time, results); kfree(queries); kfree(nodes); return -EAGAIN; /* Fail will directly unload the module */ } static void interval_tree_test_exit(void) { printk(KERN_ALERT "test exit\n"); } module_init(interval_tree_test_init) module_exit(interval_tree_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michel Lespinasse"); MODULE_DESCRIPTION("Interval Tree test");
linux-master
lib/interval_tree_test.c
// SPDX-License-Identifier: GPL-2.0-only /* * Register read and write tracepoints * * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/ftrace.h> #include <linux/module.h> #include <asm-generic/io.h> #define CREATE_TRACE_POINTS #include <trace/events/rwmmio.h> #ifdef CONFIG_TRACE_MMIO_ACCESS void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) { trace_rwmmio_write(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_write_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_write); void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) { trace_rwmmio_post_write(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_post_write_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_write); void log_read_mmio(u8 width, const volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) { trace_rwmmio_read(caller_addr, caller_addr0, width, addr); } EXPORT_SYMBOL_GPL(log_read_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_read); void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, unsigned long caller_addr, unsigned long caller_addr0) { trace_rwmmio_post_read(caller_addr, caller_addr0, val, width, addr); } EXPORT_SYMBOL_GPL(log_post_read_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(rwmmio_post_read); #endif /* CONFIG_TRACE_MMIO_ACCESS */
linux-master
lib/trace_readwrite.c
#include <linux/libfdt_env.h> #include "../scripts/dtc/libfdt/fdt_wip.c"
linux-master
lib/fdt_wip.c
// SPDX-License-Identifier: GPL-2.0 /* * Test module for stress and analyze performance of vmalloc allocator. * (C) 2018 Uladzislau Rezki (Sony) <[email protected]> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/random.h> #include <linux/kthread.h> #include <linux/moduleparam.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/rwsem.h> #include <linux/mm.h> #include <linux/rcupdate.h> #include <linux/slab.h> #define __param(type, name, init, msg) \ static type name = init; \ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) \ __param(int, nr_threads, 0, "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); __param(bool, sequential_test_order, false, "Use sequential stress tests order"); __param(int, test_repeat_count, 1, "Set test repeat counter"); __param(int, test_loop_count, 1000000, "Set test loop counter"); __param(int, nr_pages, 0, "Set number of pages for fix_size_alloc_test(default: 1)"); __param(bool, use_huge, false, "Use vmalloc_huge in fix_size_alloc_test"); __param(int, run_test_mask, INT_MAX, "Set tests specified in the mask.\n\n" "\t\tid: 1, name: fix_size_alloc_test\n" "\t\tid: 2, name: full_fit_alloc_test\n" "\t\tid: 4, name: long_busy_list_alloc_test\n" "\t\tid: 8, name: random_size_alloc_test\n" "\t\tid: 16, name: fix_align_alloc_test\n" "\t\tid: 32, name: random_size_align_alloc_test\n" "\t\tid: 64, name: align_shift_alloc_test\n" "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" "\t\tid: 1024, name: vm_map_ram_test\n" /* Add a new test case description here. */ ); /* * Read write semaphore for synchronization of setup * phase that is done in main thread and workers. */ static DECLARE_RWSEM(prepare_for_test_rwsem); /* * Completion tracking for worker threads. */ static DECLARE_COMPLETION(test_all_done_comp); static atomic_t test_n_undone = ATOMIC_INIT(0); static inline void test_report_one_done(void) { if (atomic_dec_and_test(&test_n_undone)) complete(&test_all_done_comp); } static int random_size_align_alloc_test(void) { unsigned long size, align; unsigned int rnd; void *ptr; int i; for (i = 0; i < test_loop_count; i++) { rnd = get_random_u8(); /* * Maximum 1024 pages, if PAGE_SIZE is 4096. */ align = 1 << (rnd % 23); /* * Maximum 10 pages. */ size = ((rnd % 10) + 1) * PAGE_SIZE; ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } /* * This test case is supposed to be failed. */ static int align_shift_alloc_test(void) { unsigned long align; void *ptr; int i; for (i = 0; i < BITS_PER_LONG; i++) { align = ((unsigned long) 1) << i; ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } static int fix_align_alloc_test(void) { void *ptr; int i; for (i = 0; i < test_loop_count; i++) { ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1, GFP_KERNEL | __GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } static int random_size_alloc_test(void) { unsigned int n; void *p; int i; for (i = 0; i < test_loop_count; i++) { n = get_random_u32_inclusive(1, 100); p = vmalloc(n * PAGE_SIZE); if (!p) return -1; *((__u8 *)p) = 1; vfree(p); } return 0; } static int long_busy_list_alloc_test(void) { void *ptr_1, *ptr_2; void **ptr; int rv = -1; int i; ptr = vmalloc(sizeof(void *) * 15000); if (!ptr) return rv; for (i = 0; i < 15000; i++) ptr[i] = vmalloc(1 * PAGE_SIZE); for (i = 0; i < test_loop_count; i++) { ptr_1 = vmalloc(100 * PAGE_SIZE); if (!ptr_1) goto leave; ptr_2 = vmalloc(1 * PAGE_SIZE); if (!ptr_2) { vfree(ptr_1); goto leave; } *((__u8 *)ptr_1) = 0; *((__u8 *)ptr_2) = 1; vfree(ptr_1); vfree(ptr_2); } /* Success */ rv = 0; leave: for (i = 0; i < 15000; i++) vfree(ptr[i]); vfree(ptr); return rv; } static int full_fit_alloc_test(void) { void **ptr, **junk_ptr, *tmp; int junk_length; int rv = -1; int i; junk_length = fls(num_online_cpus()); junk_length *= (32 * 1024 * 1024 / PAGE_SIZE); ptr = vmalloc(sizeof(void *) * junk_length); if (!ptr) return rv; junk_ptr = vmalloc(sizeof(void *) * junk_length); if (!junk_ptr) { vfree(ptr); return rv; } for (i = 0; i < junk_length; i++) { ptr[i] = vmalloc(1 * PAGE_SIZE); junk_ptr[i] = vmalloc(1 * PAGE_SIZE); } for (i = 0; i < junk_length; i++) vfree(junk_ptr[i]); for (i = 0; i < test_loop_count; i++) { tmp = vmalloc(1 * PAGE_SIZE); if (!tmp) goto error; *((__u8 *)tmp) = 1; vfree(tmp); } /* Success */ rv = 0; error: for (i = 0; i < junk_length; i++) vfree(ptr[i]); vfree(ptr); vfree(junk_ptr); return rv; } static int fix_size_alloc_test(void) { void *ptr; int i; for (i = 0; i < test_loop_count; i++) { if (use_huge) ptr = vmalloc_huge((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE, GFP_KERNEL); else ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE); if (!ptr) return -1; *((__u8 *)ptr) = 0; vfree(ptr); } return 0; } static int pcpu_alloc_test(void) { int rv = 0; #ifndef CONFIG_NEED_PER_CPU_KM void __percpu **pcpu; size_t size, align; int i; pcpu = vmalloc(sizeof(void __percpu *) * 35000); if (!pcpu) return -1; for (i = 0; i < 35000; i++) { size = get_random_u32_inclusive(1, PAGE_SIZE / 4); /* * Maximum PAGE_SIZE */ align = 1 << get_random_u32_inclusive(1, 11); pcpu[i] = __alloc_percpu(size, align); if (!pcpu[i]) rv = -1; } for (i = 0; i < 35000; i++) free_percpu(pcpu[i]); vfree(pcpu); #endif return rv; } struct test_kvfree_rcu { struct rcu_head rcu; unsigned char array[20]; }; static int kvfree_rcu_1_arg_vmalloc_test(void) { struct test_kvfree_rcu *p; int i; for (i = 0; i < test_loop_count; i++) { p = vmalloc(1 * PAGE_SIZE); if (!p) return -1; p->array[0] = 'a'; kvfree_rcu_mightsleep(p); } return 0; } static int kvfree_rcu_2_arg_vmalloc_test(void) { struct test_kvfree_rcu *p; int i; for (i = 0; i < test_loop_count; i++) { p = vmalloc(1 * PAGE_SIZE); if (!p) return -1; p->array[0] = 'a'; kvfree_rcu(p, rcu); } return 0; } static int vm_map_ram_test(void) { unsigned long nr_allocated; unsigned int map_nr_pages; unsigned char *v_ptr; struct page **pages; int i; map_nr_pages = nr_pages > 0 ? nr_pages:1; pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -1; nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); if (nr_allocated != map_nr_pages) goto cleanup; /* Run the test loop. */ for (i = 0; i < test_loop_count; i++) { v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE); *v_ptr = 'a'; vm_unmap_ram(v_ptr, map_nr_pages); } cleanup: for (i = 0; i < nr_allocated; i++) __free_page(pages[i]); kfree(pages); /* 0 indicates success. */ return nr_allocated != map_nr_pages; } struct test_case_desc { const char *test_name; int (*test_func)(void); }; static struct test_case_desc test_case_array[] = { { "fix_size_alloc_test", fix_size_alloc_test }, { "full_fit_alloc_test", full_fit_alloc_test }, { "long_busy_list_alloc_test", long_busy_list_alloc_test }, { "random_size_alloc_test", random_size_alloc_test }, { "fix_align_alloc_test", fix_align_alloc_test }, { "random_size_align_alloc_test", random_size_align_alloc_test }, { "align_shift_alloc_test", align_shift_alloc_test }, { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, { "vm_map_ram_test", vm_map_ram_test }, /* Add a new test case here. */ }; struct test_case_data { int test_failed; int test_passed; u64 time; }; static struct test_driver { struct task_struct *task; struct test_case_data data[ARRAY_SIZE(test_case_array)]; unsigned long start; unsigned long stop; } *tdriver; static void shuffle_array(int *arr, int n) { int i, j; for (i = n - 1; i > 0; i--) { /* Cut the range. */ j = get_random_u32_below(i); /* Swap indexes. */ swap(arr[i], arr[j]); } } static int test_func(void *private) { struct test_driver *t = private; int random_array[ARRAY_SIZE(test_case_array)]; int index, i, j; ktime_t kt; u64 delta; for (i = 0; i < ARRAY_SIZE(test_case_array); i++) random_array[i] = i; if (!sequential_test_order) shuffle_array(random_array, ARRAY_SIZE(test_case_array)); /* * Block until initialization is done. */ down_read(&prepare_for_test_rwsem); t->start = get_cycles(); for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { index = random_array[i]; /* * Skip tests if run_test_mask has been specified. */ if (!((run_test_mask & (1 << index)) >> index)) continue; kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { if (!test_case_array[index].test_func()) t->data[index].test_passed++; else t->data[index].test_failed++; } /* * Take an average time that test took. */ delta = (u64) ktime_us_delta(ktime_get(), kt); do_div(delta, (u32) test_repeat_count); t->data[index].time = delta; } t->stop = get_cycles(); up_read(&prepare_for_test_rwsem); test_report_one_done(); /* * Wait for the kthread_stop() call. */ while (!kthread_should_stop()) msleep(10); return 0; } static int init_test_configurtion(void) { /* * A maximum number of workers is defined as hard-coded * value and set to USHRT_MAX. We add such gap just in * case and for potential heavy stressing. */ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); /* Allocate the space for test instances. */ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); if (tdriver == NULL) return -1; if (test_repeat_count <= 0) test_repeat_count = 1; if (test_loop_count <= 0) test_loop_count = 1; return 0; } static void do_concurrent_test(void) { int i, ret; /* * Set some basic configurations plus sanity check. */ ret = init_test_configurtion(); if (ret < 0) return; /* * Put on hold all workers. */ down_write(&prepare_for_test_rwsem); for (i = 0; i < nr_threads; i++) { struct test_driver *t = &tdriver[i]; t->task = kthread_run(test_func, t, "vmalloc_test/%d", i); if (!IS_ERR(t->task)) /* Success. */ atomic_inc(&test_n_undone); else pr_err("Failed to start %d kthread\n", i); } /* * Now let the workers do their job. */ up_write(&prepare_for_test_rwsem); /* * Sleep quiet until all workers are done with 1 second * interval. Since the test can take a lot of time we * can run into a stack trace of the hung task. That is * why we go with completion_timeout and HZ value. */ do { ret = wait_for_completion_timeout(&test_all_done_comp, HZ); } while (!ret); for (i = 0; i < nr_threads; i++) { struct test_driver *t = &tdriver[i]; int j; if (!IS_ERR(t->task)) kthread_stop(t->task); for (j = 0; j < ARRAY_SIZE(test_case_array); j++) { if (!((run_test_mask & (1 << j)) >> j)) continue; pr_info( "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", test_case_array[j].test_name, t->data[j].test_passed, t->data[j].test_failed, test_repeat_count, test_loop_count, t->data[j].time); } pr_info("All test took worker%d=%lu cycles\n", i, t->stop - t->start); } kvfree(tdriver); } static int vmalloc_test_init(void) { do_concurrent_test(); return -EAGAIN; /* Fail will directly unload the module */ } static void vmalloc_test_exit(void) { } module_init(vmalloc_test_init) module_exit(vmalloc_test_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Uladzislau Rezki"); MODULE_DESCRIPTION("vmalloc test module");
linux-master
lib/test_vmalloc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Kernel module for testing copy_to/from_user infrastructure. * * Copyright 2013 Google Inc. All Rights Reserved * * Authors: * Kees Cook <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mman.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> /* * Several 32-bit architectures support 64-bit {get,put}_user() calls. * As there doesn't appear to be anything that can safely determine * their capability at compile-time, we just have to opt-out certain archs. */ #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ !defined(CONFIG_M68K) && \ !defined(CONFIG_MICROBLAZE) && \ !defined(CONFIG_NIOS2) && \ !defined(CONFIG_PPC32) && \ !defined(CONFIG_SUPERH)) # define TEST_U64 #endif #define test(condition, msg, ...) \ ({ \ int cond = (condition); \ if (cond) \ pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \ cond; \ }) static bool is_zeroed(void *from, size_t size) { return memchr_inv(from, 0x0, size) == NULL; } static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) { int ret = 0; size_t start, end, i, zero_start, zero_end; if (test(size < 2 * PAGE_SIZE, "buffer too small")) return -EINVAL; /* * We want to cross a page boundary to exercise the code more * effectively. We also don't want to make the size we scan too large, * otherwise the test can take a long time and cause soft lockups. So * scan a 1024 byte region across the page boundary. */ size = 1024; start = PAGE_SIZE - (size / 2); kmem += start; umem += start; zero_start = size / 4; zero_end = size - zero_start; /* * We conduct a series of check_nonzero_user() tests on a block of * memory with the following byte-pattern (trying every possible * [start,end] pair): * * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] * * And we verify that check_nonzero_user() acts identically to * memchr_inv(). */ memset(kmem, 0x0, size); for (i = 1; i < zero_start; i += 2) kmem[i] = 0xff; for (i = zero_end; i < size; i += 2) kmem[i] = 0xff; ret |= test(copy_to_user(umem, kmem, size), "legitimate copy_to_user failed"); for (start = 0; start <= size; start++) { for (end = start; end <= size; end++) { size_t len = end - start; int retval = check_zeroed_user(umem + start, len); int expected = is_zeroed(kmem + start, len); ret |= test(retval != expected, "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)", retval, expected, start, end); } } return ret; } static int test_copy_struct_from_user(char *kmem, char __user *umem, size_t size) { int ret = 0; char *umem_src = NULL, *expected = NULL; size_t ksize, usize; umem_src = kmalloc(size, GFP_KERNEL); ret = test(umem_src == NULL, "kmalloc failed"); if (ret) goto out_free; expected = kmalloc(size, GFP_KERNEL); ret = test(expected == NULL, "kmalloc failed"); if (ret) goto out_free; /* Fill umem with a fixed byte pattern. */ memset(umem_src, 0x3e, size); ret |= test(copy_to_user(umem, umem_src, size), "legitimate copy_to_user failed"); /* Check basic case -- (usize == ksize). */ ksize = size; usize = size; memcpy(expected, umem_src, ksize); memset(kmem, 0x0, size); ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), "copy_struct_from_user(usize == ksize) failed"); ret |= test(memcmp(kmem, expected, ksize), "copy_struct_from_user(usize == ksize) gives unexpected copy"); /* Old userspace case -- (usize < ksize). */ ksize = size; usize = size / 2; memcpy(expected, umem_src, usize); memset(expected + usize, 0x0, ksize - usize); memset(kmem, 0x0, size); ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), "copy_struct_from_user(usize < ksize) failed"); ret |= test(memcmp(kmem, expected, ksize), "copy_struct_from_user(usize < ksize) gives unexpected copy"); /* New userspace (-E2BIG) case -- (usize > ksize). */ ksize = size / 2; usize = size; memset(kmem, 0x0, size); ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, "copy_struct_from_user(usize > ksize) didn't give E2BIG"); /* New userspace (success) case -- (usize > ksize). */ ksize = size / 2; usize = size; memcpy(expected, umem_src, ksize); ret |= test(clear_user(umem + ksize, usize - ksize), "legitimate clear_user failed"); memset(kmem, 0x0, size); ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), "copy_struct_from_user(usize > ksize) failed"); ret |= test(memcmp(kmem, expected, ksize), "copy_struct_from_user(usize > ksize) gives unexpected copy"); out_free: kfree(expected); kfree(umem_src); return ret; } static int __init test_user_copy_init(void) { int ret = 0; char *kmem; char __user *usermem; char *bad_usermem; unsigned long user_addr; u8 val_u8; u16 val_u16; u32 val_u32; #ifdef TEST_U64 u64 val_u64; #endif kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); if (!kmem) return -ENOMEM; user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= (unsigned long)(TASK_SIZE)) { pr_warn("Failed to allocate user memory\n"); kfree(kmem); return -ENOMEM; } usermem = (char __user *)user_addr; bad_usermem = (char *)user_addr; /* * Legitimate usage: none of these copies should fail. */ memset(kmem, 0x3a, PAGE_SIZE * 2); ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), "legitimate copy_to_user failed"); memset(kmem, 0x0, PAGE_SIZE); ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), "legitimate copy_from_user failed"); ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE), "legitimate usercopy failed to copy data"); #define test_legit(size, check) \ do { \ val_##size = check; \ ret |= test(put_user(val_##size, (size __user *)usermem), \ "legitimate put_user (" #size ") failed"); \ val_##size = 0; \ ret |= test(get_user(val_##size, (size __user *)usermem), \ "legitimate get_user (" #size ") failed"); \ ret |= test(val_##size != check, \ "legitimate get_user (" #size ") failed to do copy"); \ if (val_##size != check) { \ pr_info("0x%llx != 0x%llx\n", \ (unsigned long long)val_##size, \ (unsigned long long)check); \ } \ } while (0) test_legit(u8, 0x5a); test_legit(u16, 0x5a5b); test_legit(u32, 0x5a5b5c5d); #ifdef TEST_U64 test_legit(u64, 0x5a5b5c5d6a6b6c6d); #endif #undef test_legit /* Test usage of check_nonzero_user(). */ ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE); /* Test usage of copy_struct_from_user(). */ ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE); /* * Invalid usage: none of these copies should succeed. */ /* Prepare kernel memory with check values. */ memset(kmem, 0x5a, PAGE_SIZE); memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); /* Reject kernel-to-kernel copies through copy_from_user(). */ ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), PAGE_SIZE), "illegal all-kernel copy_from_user passed"); /* Destination half of buffer should have been zeroed. */ ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE), "zeroing failure for illegal all-kernel copy_from_user"); #if 0 /* * When running with SMAP/PAN/etc, this will Oops the kernel * due to the zeroing of userspace memory on failure. This needs * to be tested in LKDTM instead, since this test module does not * expect to explode. */ ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, PAGE_SIZE), "illegal reversed copy_from_user passed"); #endif ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, PAGE_SIZE), "illegal all-kernel copy_to_user passed"); ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, PAGE_SIZE), "illegal reversed copy_to_user passed"); #define test_illegal(size, check) \ do { \ val_##size = (check); \ ret |= test(!get_user(val_##size, (size __user *)kmem), \ "illegal get_user (" #size ") passed"); \ ret |= test(val_##size != (size)0, \ "zeroing failure for illegal get_user (" #size ")"); \ if (val_##size != (size)0) { \ pr_info("0x%llx != 0\n", \ (unsigned long long)val_##size); \ } \ ret |= test(!put_user(val_##size, (size __user *)kmem), \ "illegal put_user (" #size ") passed"); \ } while (0) test_illegal(u8, 0x5a); test_illegal(u16, 0x5a5b); test_illegal(u32, 0x5a5b5c5d); #ifdef TEST_U64 test_illegal(u64, 0x5a5b5c5d6a6b6c6d); #endif #undef test_illegal vm_munmap(user_addr, PAGE_SIZE * 2); kfree(kmem); if (ret == 0) { pr_info("tests passed.\n"); return 0; } return -EINVAL; } module_init(test_user_copy_init); static void __exit test_user_copy_exit(void) { pr_info("unloaded.\n"); } module_exit(test_user_copy_exit); MODULE_AUTHOR("Kees Cook <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_user_copy.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Intel Corporation * Author: Johannes Berg <[email protected]> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/logic_iomem.h> #include <asm/io.h> struct logic_iomem_region { const struct resource *res; const struct logic_iomem_region_ops *ops; struct list_head list; }; struct logic_iomem_area { const struct logic_iomem_ops *ops; void *priv; }; #define AREA_SHIFT 24 #define MAX_AREA_SIZE (1 << AREA_SHIFT) #define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE) #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) #define AREA_MASK (MAX_AREA_SIZE - 1) #ifdef CONFIG_64BIT #define IOREMAP_BIAS 0xDEAD000000000000UL #define IOREMAP_MASK 0xFFFFFFFF00000000UL #else #define IOREMAP_BIAS 0x80000000UL #define IOREMAP_MASK 0x80000000UL #endif static DEFINE_MUTEX(regions_mtx); static LIST_HEAD(regions_list); static struct logic_iomem_area mapped_areas[MAX_AREAS]; int logic_iomem_add_region(struct resource *resource, const struct logic_iomem_region_ops *ops) { struct logic_iomem_region *rreg; int err; if (WARN_ON(!resource || !ops)) return -EINVAL; if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)) return -EINVAL; rreg = kzalloc(sizeof(*rreg), GFP_KERNEL); if (!rreg) return -ENOMEM; err = request_resource(&iomem_resource, resource); if (err) { kfree(rreg); return -ENOMEM; } mutex_lock(&regions_mtx); rreg->res = resource; rreg->ops = ops; list_add_tail(&rreg->list, &regions_list); mutex_unlock(&regions_mtx); return 0; } EXPORT_SYMBOL(logic_iomem_add_region); #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK static void __iomem *real_ioremap(phys_addr_t offset, size_t size) { WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n", (unsigned long long)offset, size); return NULL; } static void real_iounmap(volatile void __iomem *addr) { WARN(1, "invalid iounmap for addr 0x%llx\n", (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ void __iomem *ioremap(phys_addr_t offset, size_t size) { void __iomem *ret = NULL; struct logic_iomem_region *rreg, *found = NULL; int i; mutex_lock(&regions_mtx); list_for_each_entry(rreg, &regions_list, list) { if (rreg->res->start > offset) continue; if (rreg->res->end < offset + size - 1) continue; found = rreg; break; } if (!found) goto out; for (i = 0; i < MAX_AREAS; i++) { long offs; if (mapped_areas[i].ops) continue; offs = rreg->ops->map(offset - found->res->start, size, &mapped_areas[i].ops, &mapped_areas[i].priv); if (offs < 0) { mapped_areas[i].ops = NULL; break; } if (WARN_ON(!mapped_areas[i].ops)) { mapped_areas[i].ops = NULL; break; } ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs); break; } out: mutex_unlock(&regions_mtx); if (ret) return ret; return real_ioremap(offset, size); } EXPORT_SYMBOL(ioremap); static inline struct logic_iomem_area * get_area(const volatile void __iomem *addr) { unsigned long a = (unsigned long)addr; unsigned int idx; if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS)) return NULL; idx = (a & AREA_BITS) >> AREA_SHIFT; if (mapped_areas[idx].ops) return &mapped_areas[idx]; return NULL; } void iounmap(volatile void __iomem *addr) { struct logic_iomem_area *area = get_area(addr); if (!area) { real_iounmap(addr); return; } if (area->ops->unmap) area->ops->unmap(area->priv); mutex_lock(&regions_mtx); area->ops = NULL; area->priv = NULL; mutex_unlock(&regions_mtx); } EXPORT_SYMBOL(iounmap); #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK #define MAKE_FALLBACK(op, sz) \ static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ { \ WARN(1, "Invalid read" #op " at address %llx\n", \ (unsigned long long)(uintptr_t __force)addr); \ return (u ## sz)~0ULL; \ } \ \ static void real_raw_write ## op(u ## sz val, \ volatile void __iomem *addr) \ { \ WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ (unsigned long long)val, \ (unsigned long long)(uintptr_t __force)addr);\ } \ MAKE_FALLBACK(b, 8); MAKE_FALLBACK(w, 16); MAKE_FALLBACK(l, 32); #ifdef CONFIG_64BIT MAKE_FALLBACK(q, 64); #endif static void real_memset_io(volatile void __iomem *addr, int value, size_t size) { WARN(1, "Invalid memset_io at address 0x%llx\n", (unsigned long long)(uintptr_t __force)addr); } static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", (unsigned long long)(uintptr_t __force)addr); memset(buffer, 0xff, size); } static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { WARN(1, "Invalid memcpy_toio at address 0x%llx\n", (unsigned long long)(uintptr_t __force)addr); } #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ #define MAKE_OP(op, sz) \ u##sz __raw_read ## op(const volatile void __iomem *addr) \ { \ struct logic_iomem_area *area = get_area(addr); \ \ if (!area) \ return real_raw_read ## op(addr); \ \ return (u ## sz) area->ops->read(area->priv, \ (unsigned long)addr & AREA_MASK,\ sz / 8); \ } \ EXPORT_SYMBOL(__raw_read ## op); \ \ void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \ { \ struct logic_iomem_area *area = get_area(addr); \ \ if (!area) { \ real_raw_write ## op(val, addr); \ return; \ } \ \ area->ops->write(area->priv, \ (unsigned long)addr & AREA_MASK, \ sz / 8, val); \ } \ EXPORT_SYMBOL(__raw_write ## op) MAKE_OP(b, 8); MAKE_OP(w, 16); MAKE_OP(l, 32); #ifdef CONFIG_64BIT MAKE_OP(q, 64); #endif void memset_io(volatile void __iomem *addr, int value, size_t size) { struct logic_iomem_area *area = get_area(addr); unsigned long offs, start; if (!area) { real_memset_io(addr, value, size); return; } start = (unsigned long)addr & AREA_MASK; if (area->ops->set) { area->ops->set(area->priv, start, value, size); return; } for (offs = 0; offs < size; offs++) area->ops->write(area->priv, start + offs, 1, value); } EXPORT_SYMBOL(memset_io); void memcpy_fromio(void *buffer, const volatile void __iomem *addr, size_t size) { struct logic_iomem_area *area = get_area(addr); u8 *buf = buffer; unsigned long offs, start; if (!area) { real_memcpy_fromio(buffer, addr, size); return; } start = (unsigned long)addr & AREA_MASK; if (area->ops->copy_from) { area->ops->copy_from(area->priv, buffer, start, size); return; } for (offs = 0; offs < size; offs++) buf[offs] = area->ops->read(area->priv, start + offs, 1); } EXPORT_SYMBOL(memcpy_fromio); void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) { struct logic_iomem_area *area = get_area(addr); const u8 *buf = buffer; unsigned long offs, start; if (!area) { real_memcpy_toio(addr, buffer, size); return; } start = (unsigned long)addr & AREA_MASK; if (area->ops->copy_to) { area->ops->copy_to(area->priv, start, buffer, size); return; } for (offs = 0; offs < size; offs++) area->ops->write(area->priv, start + offs, 1, buf[offs]); } EXPORT_SYMBOL(memcpy_toio);
linux-master
lib/logic_iomem.c
#include <linux/libfdt_env.h> #include "../scripts/dtc/libfdt/fdt_rw.c"
linux-master
lib/fdt_rw.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Test cases for arithmetic overflow checks. See: * "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst * ./tools/testing/kunit/kunit.py run overflow [--raw_output] */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <kunit/test.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/overflow.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/vmalloc.h> #define SKIP(cond, reason) do { \ if (cond) { \ kunit_skip(test, reason); \ return; \ } \ } while (0) /* * Clang 11 and earlier generate unwanted libcalls for signed output * on unsigned input. */ #if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 11 # define SKIP_SIGN_MISMATCH(t) SKIP(t, "Clang 11 unwanted libcalls") #else # define SKIP_SIGN_MISMATCH(t) do { } while (0) #endif /* * Clang 13 and earlier generate unwanted libcalls for 64-bit tests on * 32-bit hosts. */ #if defined(CONFIG_CC_IS_CLANG) && __clang_major__ <= 13 && \ BITS_PER_LONG != 64 # define SKIP_64_ON_32(t) SKIP(t, "Clang 13 unwanted libcalls") #else # define SKIP_64_ON_32(t) do { } while (0) #endif #define DEFINE_TEST_ARRAY_TYPED(t1, t2, t) \ static const struct test_ ## t1 ## _ ## t2 ## __ ## t { \ t1 a; \ t2 b; \ t sum, diff, prod; \ bool s_of, d_of, p_of; \ } t1 ## _ ## t2 ## __ ## t ## _tests[] #define DEFINE_TEST_ARRAY(t) DEFINE_TEST_ARRAY_TYPED(t, t, t) DEFINE_TEST_ARRAY(u8) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, {0, 1, 1, U8_MAX, 0, false, true, false}, {1, 0, 1, 1, 0, false, false, false}, {0, U8_MAX, U8_MAX, 1, 0, false, true, false}, {U8_MAX, 0, U8_MAX, U8_MAX, 0, false, false, false}, {1, U8_MAX, 0, 2, U8_MAX, true, true, false}, {U8_MAX, 1, 0, U8_MAX-1, U8_MAX, true, false, false}, {U8_MAX, U8_MAX, U8_MAX-1, 0, 1, true, false, true}, {U8_MAX, U8_MAX-1, U8_MAX-2, 1, 2, true, false, true}, {U8_MAX-1, U8_MAX, U8_MAX-2, U8_MAX, 2, true, true, true}, {1U << 3, 1U << 3, 1U << 4, 0, 1U << 6, false, false, false}, {1U << 4, 1U << 4, 1U << 5, 0, 0, false, false, true}, {1U << 4, 1U << 3, 3*(1U << 3), 1U << 3, 1U << 7, false, false, false}, {1U << 7, 1U << 7, 0, 0, 0, true, false, true}, {48, 32, 80, 16, 0, false, false, true}, {128, 128, 0, 0, 0, true, false, true}, {123, 234, 101, 145, 110, true, true, true}, }; DEFINE_TEST_ARRAY(u16) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, {0, 1, 1, U16_MAX, 0, false, true, false}, {1, 0, 1, 1, 0, false, false, false}, {0, U16_MAX, U16_MAX, 1, 0, false, true, false}, {U16_MAX, 0, U16_MAX, U16_MAX, 0, false, false, false}, {1, U16_MAX, 0, 2, U16_MAX, true, true, false}, {U16_MAX, 1, 0, U16_MAX-1, U16_MAX, true, false, false}, {U16_MAX, U16_MAX, U16_MAX-1, 0, 1, true, false, true}, {U16_MAX, U16_MAX-1, U16_MAX-2, 1, 2, true, false, true}, {U16_MAX-1, U16_MAX, U16_MAX-2, U16_MAX, 2, true, true, true}, {1U << 7, 1U << 7, 1U << 8, 0, 1U << 14, false, false, false}, {1U << 8, 1U << 8, 1U << 9, 0, 0, false, false, true}, {1U << 8, 1U << 7, 3*(1U << 7), 1U << 7, 1U << 15, false, false, false}, {1U << 15, 1U << 15, 0, 0, 0, true, false, true}, {123, 234, 357, 65425, 28782, false, true, false}, {1234, 2345, 3579, 64425, 10146, false, true, true}, }; DEFINE_TEST_ARRAY(u32) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, {0, 1, 1, U32_MAX, 0, false, true, false}, {1, 0, 1, 1, 0, false, false, false}, {0, U32_MAX, U32_MAX, 1, 0, false, true, false}, {U32_MAX, 0, U32_MAX, U32_MAX, 0, false, false, false}, {1, U32_MAX, 0, 2, U32_MAX, true, true, false}, {U32_MAX, 1, 0, U32_MAX-1, U32_MAX, true, false, false}, {U32_MAX, U32_MAX, U32_MAX-1, 0, 1, true, false, true}, {U32_MAX, U32_MAX-1, U32_MAX-2, 1, 2, true, false, true}, {U32_MAX-1, U32_MAX, U32_MAX-2, U32_MAX, 2, true, true, true}, {1U << 15, 1U << 15, 1U << 16, 0, 1U << 30, false, false, false}, {1U << 16, 1U << 16, 1U << 17, 0, 0, false, false, true}, {1U << 16, 1U << 15, 3*(1U << 15), 1U << 15, 1U << 31, false, false, false}, {1U << 31, 1U << 31, 0, 0, 0, true, false, true}, {-2U, 1U, -1U, -3U, -2U, false, false, false}, {-4U, 5U, 1U, -9U, -20U, true, false, true}, }; DEFINE_TEST_ARRAY(u64) = { {0, 0, 0, 0, 0, false, false, false}, {1, 1, 2, 0, 1, false, false, false}, {0, 1, 1, U64_MAX, 0, false, true, false}, {1, 0, 1, 1, 0, false, false, false}, {0, U64_MAX, U64_MAX, 1, 0, false, true, false}, {U64_MAX, 0, U64_MAX, U64_MAX, 0, false, false, false}, {1, U64_MAX, 0, 2, U64_MAX, true, true, false}, {U64_MAX, 1, 0, U64_MAX-1, U64_MAX, true, false, false}, {U64_MAX, U64_MAX, U64_MAX-1, 0, 1, true, false, true}, {U64_MAX, U64_MAX-1, U64_MAX-2, 1, 2, true, false, true}, {U64_MAX-1, U64_MAX, U64_MAX-2, U64_MAX, 2, true, true, true}, {1ULL << 31, 1ULL << 31, 1ULL << 32, 0, 1ULL << 62, false, false, false}, {1ULL << 32, 1ULL << 32, 1ULL << 33, 0, 0, false, false, true}, {1ULL << 32, 1ULL << 31, 3*(1ULL << 31), 1ULL << 31, 1ULL << 63, false, false, false}, {1ULL << 63, 1ULL << 63, 0, 0, 0, true, false, true}, {1000000000ULL /* 10^9 */, 10000000000ULL /* 10^10 */, 11000000000ULL, 18446744064709551616ULL, 10000000000000000000ULL, false, true, false}, {-15ULL, 10ULL, -5ULL, -25ULL, -150ULL, false, false, true}, }; DEFINE_TEST_ARRAY(s8) = { {0, 0, 0, 0, 0, false, false, false}, {0, S8_MAX, S8_MAX, -S8_MAX, 0, false, false, false}, {S8_MAX, 0, S8_MAX, S8_MAX, 0, false, false, false}, {0, S8_MIN, S8_MIN, S8_MIN, 0, false, true, false}, {S8_MIN, 0, S8_MIN, S8_MIN, 0, false, false, false}, {-1, S8_MIN, S8_MAX, S8_MAX, S8_MIN, true, false, true}, {S8_MIN, -1, S8_MAX, -S8_MAX, S8_MIN, true, false, true}, {-1, S8_MAX, S8_MAX-1, S8_MIN, -S8_MAX, false, false, false}, {S8_MAX, -1, S8_MAX-1, S8_MIN, -S8_MAX, false, true, false}, {-1, -S8_MAX, S8_MIN, S8_MAX-1, S8_MAX, false, false, false}, {-S8_MAX, -1, S8_MIN, S8_MIN+2, S8_MAX, false, false, false}, {1, S8_MIN, -S8_MAX, -S8_MAX, S8_MIN, false, true, false}, {S8_MIN, 1, -S8_MAX, S8_MAX, S8_MIN, false, true, false}, {1, S8_MAX, S8_MIN, S8_MIN+2, S8_MAX, true, false, false}, {S8_MAX, 1, S8_MIN, S8_MAX-1, S8_MAX, true, false, false}, {S8_MIN, S8_MIN, 0, 0, 0, true, false, true}, {S8_MAX, S8_MAX, -2, 0, 1, true, false, true}, {-4, -32, -36, 28, -128, false, false, true}, {-4, 32, 28, -36, -128, false, false, false}, }; DEFINE_TEST_ARRAY(s16) = { {0, 0, 0, 0, 0, false, false, false}, {0, S16_MAX, S16_MAX, -S16_MAX, 0, false, false, false}, {S16_MAX, 0, S16_MAX, S16_MAX, 0, false, false, false}, {0, S16_MIN, S16_MIN, S16_MIN, 0, false, true, false}, {S16_MIN, 0, S16_MIN, S16_MIN, 0, false, false, false}, {-1, S16_MIN, S16_MAX, S16_MAX, S16_MIN, true, false, true}, {S16_MIN, -1, S16_MAX, -S16_MAX, S16_MIN, true, false, true}, {-1, S16_MAX, S16_MAX-1, S16_MIN, -S16_MAX, false, false, false}, {S16_MAX, -1, S16_MAX-1, S16_MIN, -S16_MAX, false, true, false}, {-1, -S16_MAX, S16_MIN, S16_MAX-1, S16_MAX, false, false, false}, {-S16_MAX, -1, S16_MIN, S16_MIN+2, S16_MAX, false, false, false}, {1, S16_MIN, -S16_MAX, -S16_MAX, S16_MIN, false, true, false}, {S16_MIN, 1, -S16_MAX, S16_MAX, S16_MIN, false, true, false}, {1, S16_MAX, S16_MIN, S16_MIN+2, S16_MAX, true, false, false}, {S16_MAX, 1, S16_MIN, S16_MAX-1, S16_MAX, true, false, false}, {S16_MIN, S16_MIN, 0, 0, 0, true, false, true}, {S16_MAX, S16_MAX, -2, 0, 1, true, false, true}, }; DEFINE_TEST_ARRAY(s32) = { {0, 0, 0, 0, 0, false, false, false}, {0, S32_MAX, S32_MAX, -S32_MAX, 0, false, false, false}, {S32_MAX, 0, S32_MAX, S32_MAX, 0, false, false, false}, {0, S32_MIN, S32_MIN, S32_MIN, 0, false, true, false}, {S32_MIN, 0, S32_MIN, S32_MIN, 0, false, false, false}, {-1, S32_MIN, S32_MAX, S32_MAX, S32_MIN, true, false, true}, {S32_MIN, -1, S32_MAX, -S32_MAX, S32_MIN, true, false, true}, {-1, S32_MAX, S32_MAX-1, S32_MIN, -S32_MAX, false, false, false}, {S32_MAX, -1, S32_MAX-1, S32_MIN, -S32_MAX, false, true, false}, {-1, -S32_MAX, S32_MIN, S32_MAX-1, S32_MAX, false, false, false}, {-S32_MAX, -1, S32_MIN, S32_MIN+2, S32_MAX, false, false, false}, {1, S32_MIN, -S32_MAX, -S32_MAX, S32_MIN, false, true, false}, {S32_MIN, 1, -S32_MAX, S32_MAX, S32_MIN, false, true, false}, {1, S32_MAX, S32_MIN, S32_MIN+2, S32_MAX, true, false, false}, {S32_MAX, 1, S32_MIN, S32_MAX-1, S32_MAX, true, false, false}, {S32_MIN, S32_MIN, 0, 0, 0, true, false, true}, {S32_MAX, S32_MAX, -2, 0, 1, true, false, true}, }; DEFINE_TEST_ARRAY(s64) = { {0, 0, 0, 0, 0, false, false, false}, {0, S64_MAX, S64_MAX, -S64_MAX, 0, false, false, false}, {S64_MAX, 0, S64_MAX, S64_MAX, 0, false, false, false}, {0, S64_MIN, S64_MIN, S64_MIN, 0, false, true, false}, {S64_MIN, 0, S64_MIN, S64_MIN, 0, false, false, false}, {-1, S64_MIN, S64_MAX, S64_MAX, S64_MIN, true, false, true}, {S64_MIN, -1, S64_MAX, -S64_MAX, S64_MIN, true, false, true}, {-1, S64_MAX, S64_MAX-1, S64_MIN, -S64_MAX, false, false, false}, {S64_MAX, -1, S64_MAX-1, S64_MIN, -S64_MAX, false, true, false}, {-1, -S64_MAX, S64_MIN, S64_MAX-1, S64_MAX, false, false, false}, {-S64_MAX, -1, S64_MIN, S64_MIN+2, S64_MAX, false, false, false}, {1, S64_MIN, -S64_MAX, -S64_MAX, S64_MIN, false, true, false}, {S64_MIN, 1, -S64_MAX, S64_MAX, S64_MIN, false, true, false}, {1, S64_MAX, S64_MIN, S64_MIN+2, S64_MAX, true, false, false}, {S64_MAX, 1, S64_MIN, S64_MAX-1, S64_MAX, true, false, false}, {S64_MIN, S64_MIN, 0, 0, 0, true, false, true}, {S64_MAX, S64_MAX, -2, 0, 1, true, false, true}, {-1, -1, -2, 0, 1, false, false, false}, {-1, -128, -129, 127, 128, false, false, false}, {-128, -1, -129, -127, 128, false, false, false}, {0, -S64_MAX, -S64_MAX, S64_MAX, 0, false, false, false}, }; #define check_one_op(t, fmt, op, sym, a, b, r, of) do { \ int _a_orig = a, _a_bump = a + 1; \ int _b_orig = b, _b_bump = b + 1; \ bool _of; \ t _r; \ \ _of = check_ ## op ## _overflow(a, b, &_r); \ KUNIT_EXPECT_EQ_MSG(test, _of, of, \ "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \ a, b, of ? "" : " not", #t); \ KUNIT_EXPECT_EQ_MSG(test, _r, r, \ "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \ a, b, r, _r, #t); \ /* Check for internal macro side-effects. */ \ _of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \ } while (0) #define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \ static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \ { \ check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \ check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \ check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \ check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \ check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \ } \ \ static void n ## _overflow_test(struct kunit *test) { \ unsigned i; \ \ SKIP_64_ON_32(__same_type(t, u64)); \ SKIP_64_ON_32(__same_type(t, s64)); \ SKIP_SIGN_MISMATCH(__same_type(n ## _tests[0].a, u32) && \ __same_type(n ## _tests[0].b, u32) && \ __same_type(n ## _tests[0].sum, int)); \ \ for (i = 0; i < ARRAY_SIZE(n ## _tests); ++i) \ do_test_ ## n(test, &n ## _tests[i]); \ kunit_info(test, "%zu %s arithmetic tests finished\n", \ ARRAY_SIZE(n ## _tests), #n); \ } #define DEFINE_TEST_FUNC(t, fmt) \ DEFINE_TEST_FUNC_TYPED(t ## _ ## t ## __ ## t, t, fmt) DEFINE_TEST_FUNC(u8, "%d"); DEFINE_TEST_FUNC(s8, "%d"); DEFINE_TEST_FUNC(u16, "%d"); DEFINE_TEST_FUNC(s16, "%d"); DEFINE_TEST_FUNC(u32, "%u"); DEFINE_TEST_FUNC(s32, "%d"); DEFINE_TEST_FUNC(u64, "%llu"); DEFINE_TEST_FUNC(s64, "%lld"); DEFINE_TEST_ARRAY_TYPED(u32, u32, u8) = { {0, 0, 0, 0, 0, false, false, false}, {U8_MAX, 2, 1, U8_MAX - 2, U8_MAX - 1, true, false, true}, {U8_MAX + 1, 0, 0, 0, 0, true, true, false}, }; DEFINE_TEST_FUNC_TYPED(u32_u32__u8, u8, "%d"); DEFINE_TEST_ARRAY_TYPED(u32, u32, int) = { {0, 0, 0, 0, 0, false, false, false}, {U32_MAX, 0, -1, -1, 0, true, true, false}, }; DEFINE_TEST_FUNC_TYPED(u32_u32__int, int, "%d"); DEFINE_TEST_ARRAY_TYPED(u8, u8, int) = { {0, 0, 0, 0, 0, false, false, false}, {U8_MAX, U8_MAX, 2 * U8_MAX, 0, U8_MAX * U8_MAX, false, false, false}, {1, 2, 3, -1, 2, false, false, false}, }; DEFINE_TEST_FUNC_TYPED(u8_u8__int, int, "%d"); DEFINE_TEST_ARRAY_TYPED(int, int, u8) = { {0, 0, 0, 0, 0, false, false, false}, {1, 2, 3, U8_MAX, 2, false, true, false}, {-1, 0, U8_MAX, U8_MAX, 0, true, true, false}, }; DEFINE_TEST_FUNC_TYPED(int_int__u8, u8, "%d"); /* Args are: value, shift, type, expected result, overflow expected */ #define TEST_ONE_SHIFT(a, s, t, expect, of) do { \ typeof(a) __a = (a); \ typeof(s) __s = (s); \ t __e = (expect); \ t __d; \ bool __of = check_shl_overflow(__a, __s, &__d); \ if (__of != of) { \ KUNIT_EXPECT_EQ_MSG(test, __of, of, \ "expected (%s)(%s << %s) to%s overflow\n", \ #t, #a, #s, of ? "" : " not"); \ } else if (!__of && __d != __e) { \ KUNIT_EXPECT_EQ_MSG(test, __d, __e, \ "expected (%s)(%s << %s) == %s\n", \ #t, #a, #s, #expect); \ if ((t)-1 < 0) \ kunit_info(test, "got %lld\n", (s64)__d); \ else \ kunit_info(test, "got %llu\n", (u64)__d); \ } \ count++; \ } while (0) static void shift_sane_test(struct kunit *test) { int count = 0; /* Sane shifts. */ TEST_ONE_SHIFT(1, 0, u8, 1 << 0, false); TEST_ONE_SHIFT(1, 4, u8, 1 << 4, false); TEST_ONE_SHIFT(1, 7, u8, 1 << 7, false); TEST_ONE_SHIFT(0xF, 4, u8, 0xF << 4, false); TEST_ONE_SHIFT(1, 0, u16, 1 << 0, false); TEST_ONE_SHIFT(1, 10, u16, 1 << 10, false); TEST_ONE_SHIFT(1, 15, u16, 1 << 15, false); TEST_ONE_SHIFT(0xFF, 8, u16, 0xFF << 8, false); TEST_ONE_SHIFT(1, 0, int, 1 << 0, false); TEST_ONE_SHIFT(1, 16, int, 1 << 16, false); TEST_ONE_SHIFT(1, 30, int, 1 << 30, false); TEST_ONE_SHIFT(1, 0, s32, 1 << 0, false); TEST_ONE_SHIFT(1, 16, s32, 1 << 16, false); TEST_ONE_SHIFT(1, 30, s32, 1 << 30, false); TEST_ONE_SHIFT(1, 0, unsigned int, 1U << 0, false); TEST_ONE_SHIFT(1, 20, unsigned int, 1U << 20, false); TEST_ONE_SHIFT(1, 31, unsigned int, 1U << 31, false); TEST_ONE_SHIFT(0xFFFFU, 16, unsigned int, 0xFFFFU << 16, false); TEST_ONE_SHIFT(1, 0, u32, 1U << 0, false); TEST_ONE_SHIFT(1, 20, u32, 1U << 20, false); TEST_ONE_SHIFT(1, 31, u32, 1U << 31, false); TEST_ONE_SHIFT(0xFFFFU, 16, u32, 0xFFFFU << 16, false); TEST_ONE_SHIFT(1, 0, u64, 1ULL << 0, false); TEST_ONE_SHIFT(1, 40, u64, 1ULL << 40, false); TEST_ONE_SHIFT(1, 63, u64, 1ULL << 63, false); TEST_ONE_SHIFT(0xFFFFFFFFULL, 32, u64, 0xFFFFFFFFULL << 32, false); /* Sane shift: start and end with 0, without a too-wide shift. */ TEST_ONE_SHIFT(0, 7, u8, 0, false); TEST_ONE_SHIFT(0, 15, u16, 0, false); TEST_ONE_SHIFT(0, 31, unsigned int, 0, false); TEST_ONE_SHIFT(0, 31, u32, 0, false); TEST_ONE_SHIFT(0, 63, u64, 0, false); /* Sane shift: start and end with 0, without reaching signed bit. */ TEST_ONE_SHIFT(0, 6, s8, 0, false); TEST_ONE_SHIFT(0, 14, s16, 0, false); TEST_ONE_SHIFT(0, 30, int, 0, false); TEST_ONE_SHIFT(0, 30, s32, 0, false); TEST_ONE_SHIFT(0, 62, s64, 0, false); kunit_info(test, "%d sane shift tests finished\n", count); } static void shift_overflow_test(struct kunit *test) { int count = 0; /* Overflow: shifted the bit off the end. */ TEST_ONE_SHIFT(1, 8, u8, 0, true); TEST_ONE_SHIFT(1, 16, u16, 0, true); TEST_ONE_SHIFT(1, 32, unsigned int, 0, true); TEST_ONE_SHIFT(1, 32, u32, 0, true); TEST_ONE_SHIFT(1, 64, u64, 0, true); /* Overflow: shifted into the signed bit. */ TEST_ONE_SHIFT(1, 7, s8, 0, true); TEST_ONE_SHIFT(1, 15, s16, 0, true); TEST_ONE_SHIFT(1, 31, int, 0, true); TEST_ONE_SHIFT(1, 31, s32, 0, true); TEST_ONE_SHIFT(1, 63, s64, 0, true); /* Overflow: high bit falls off unsigned types. */ /* 10010110 */ TEST_ONE_SHIFT(150, 1, u8, 0, true); /* 1000100010010110 */ TEST_ONE_SHIFT(34966, 1, u16, 0, true); /* 10000100000010001000100010010110 */ TEST_ONE_SHIFT(2215151766U, 1, u32, 0, true); TEST_ONE_SHIFT(2215151766U, 1, unsigned int, 0, true); /* 1000001000010000010000000100000010000100000010001000100010010110 */ TEST_ONE_SHIFT(9372061470395238550ULL, 1, u64, 0, true); /* Overflow: bit shifted into signed bit on signed types. */ /* 01001011 */ TEST_ONE_SHIFT(75, 1, s8, 0, true); /* 0100010001001011 */ TEST_ONE_SHIFT(17483, 1, s16, 0, true); /* 01000010000001000100010001001011 */ TEST_ONE_SHIFT(1107575883, 1, s32, 0, true); TEST_ONE_SHIFT(1107575883, 1, int, 0, true); /* 0100000100001000001000000010000001000010000001000100010001001011 */ TEST_ONE_SHIFT(4686030735197619275LL, 1, s64, 0, true); /* Overflow: bit shifted past signed bit on signed types. */ /* 01001011 */ TEST_ONE_SHIFT(75, 2, s8, 0, true); /* 0100010001001011 */ TEST_ONE_SHIFT(17483, 2, s16, 0, true); /* 01000010000001000100010001001011 */ TEST_ONE_SHIFT(1107575883, 2, s32, 0, true); TEST_ONE_SHIFT(1107575883, 2, int, 0, true); /* 0100000100001000001000000010000001000010000001000100010001001011 */ TEST_ONE_SHIFT(4686030735197619275LL, 2, s64, 0, true); kunit_info(test, "%d overflow shift tests finished\n", count); } static void shift_truncate_test(struct kunit *test) { int count = 0; /* Overflow: values larger than destination type. */ TEST_ONE_SHIFT(0x100, 0, u8, 0, true); TEST_ONE_SHIFT(0xFF, 0, s8, 0, true); TEST_ONE_SHIFT(0x10000U, 0, u16, 0, true); TEST_ONE_SHIFT(0xFFFFU, 0, s16, 0, true); TEST_ONE_SHIFT(0x100000000ULL, 0, u32, 0, true); TEST_ONE_SHIFT(0x100000000ULL, 0, unsigned int, 0, true); TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, s32, 0, true); TEST_ONE_SHIFT(0xFFFFFFFFUL, 0, int, 0, true); TEST_ONE_SHIFT(0xFFFFFFFFFFFFFFFFULL, 0, s64, 0, true); /* Overflow: shifted at or beyond entire type's bit width. */ TEST_ONE_SHIFT(0, 8, u8, 0, true); TEST_ONE_SHIFT(0, 9, u8, 0, true); TEST_ONE_SHIFT(0, 8, s8, 0, true); TEST_ONE_SHIFT(0, 9, s8, 0, true); TEST_ONE_SHIFT(0, 16, u16, 0, true); TEST_ONE_SHIFT(0, 17, u16, 0, true); TEST_ONE_SHIFT(0, 16, s16, 0, true); TEST_ONE_SHIFT(0, 17, s16, 0, true); TEST_ONE_SHIFT(0, 32, u32, 0, true); TEST_ONE_SHIFT(0, 33, u32, 0, true); TEST_ONE_SHIFT(0, 32, int, 0, true); TEST_ONE_SHIFT(0, 33, int, 0, true); TEST_ONE_SHIFT(0, 32, s32, 0, true); TEST_ONE_SHIFT(0, 33, s32, 0, true); TEST_ONE_SHIFT(0, 64, u64, 0, true); TEST_ONE_SHIFT(0, 65, u64, 0, true); TEST_ONE_SHIFT(0, 64, s64, 0, true); TEST_ONE_SHIFT(0, 65, s64, 0, true); kunit_info(test, "%d truncate shift tests finished\n", count); } static void shift_nonsense_test(struct kunit *test) { int count = 0; /* Nonsense: negative initial value. */ TEST_ONE_SHIFT(-1, 0, s8, 0, true); TEST_ONE_SHIFT(-1, 0, u8, 0, true); TEST_ONE_SHIFT(-5, 0, s16, 0, true); TEST_ONE_SHIFT(-5, 0, u16, 0, true); TEST_ONE_SHIFT(-10, 0, int, 0, true); TEST_ONE_SHIFT(-10, 0, unsigned int, 0, true); TEST_ONE_SHIFT(-100, 0, s32, 0, true); TEST_ONE_SHIFT(-100, 0, u32, 0, true); TEST_ONE_SHIFT(-10000, 0, s64, 0, true); TEST_ONE_SHIFT(-10000, 0, u64, 0, true); /* Nonsense: negative shift values. */ TEST_ONE_SHIFT(0, -5, s8, 0, true); TEST_ONE_SHIFT(0, -5, u8, 0, true); TEST_ONE_SHIFT(0, -10, s16, 0, true); TEST_ONE_SHIFT(0, -10, u16, 0, true); TEST_ONE_SHIFT(0, -15, int, 0, true); TEST_ONE_SHIFT(0, -15, unsigned int, 0, true); TEST_ONE_SHIFT(0, -20, s32, 0, true); TEST_ONE_SHIFT(0, -20, u32, 0, true); TEST_ONE_SHIFT(0, -30, s64, 0, true); TEST_ONE_SHIFT(0, -30, u64, 0, true); /* * Corner case: for unsigned types, we fail when we've shifted * through the entire width of bits. For signed types, we might * want to match this behavior, but that would mean noticing if * we shift through all but the signed bit, and this is not * currently detected (but we'll notice an overflow into the * signed bit). So, for now, we will test this condition but * mark it as not expected to overflow. */ TEST_ONE_SHIFT(0, 7, s8, 0, false); TEST_ONE_SHIFT(0, 15, s16, 0, false); TEST_ONE_SHIFT(0, 31, int, 0, false); TEST_ONE_SHIFT(0, 31, s32, 0, false); TEST_ONE_SHIFT(0, 63, s64, 0, false); kunit_info(test, "%d nonsense shift tests finished\n", count); } #undef TEST_ONE_SHIFT /* * Deal with the various forms of allocator arguments. See comments above * the DEFINE_TEST_ALLOC() instances for mapping of the "bits". */ #define alloc_GFP (GFP_KERNEL | __GFP_NOWARN) #define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP) #define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE) #define alloc000(alloc, arg, sz) alloc(sz) #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE) #define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP) #define free0(free, arg, ptr) free(ptr) #define free1(free, arg, ptr) free(arg, ptr) /* Wrap around to 16K */ #define TEST_SIZE (5 * 4096) #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\ static void test_ ## func (struct kunit *test, void *arg) \ { \ volatile size_t a = TEST_SIZE; \ volatile size_t b = (SIZE_MAX / TEST_SIZE) + 1; \ void *ptr; \ \ /* Tiny allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, 1);\ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ #func " failed regular allocation?!\n"); \ free ## want_arg (free_func, arg, ptr); \ \ /* Wrapped allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ a * b); \ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, \ #func " unexpectedly failed bad wrapping?!\n"); \ free ## want_arg (free_func, arg, ptr); \ \ /* Saturated allocation test. */ \ ptr = alloc ## want_arg ## want_gfp ## want_node (func, arg, \ array_size(a, b)); \ if (ptr) { \ KUNIT_FAIL(test, #func " missed saturation!\n"); \ free ## want_arg (free_func, arg, ptr); \ } \ } /* * Allocator uses a trailing node argument --------+ (e.g. kmalloc_node()) * Allocator uses the gfp_t argument -----------+ | (e.g. kmalloc()) * Allocator uses a special leading argument + | | (e.g. devm_kmalloc()) * | | | */ DEFINE_TEST_ALLOC(kmalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kmalloc_node, kfree, 0, 1, 1); DEFINE_TEST_ALLOC(kzalloc, kfree, 0, 1, 0); DEFINE_TEST_ALLOC(kzalloc_node, kfree, 0, 1, 1); DEFINE_TEST_ALLOC(__vmalloc, vfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc, kvfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvmalloc_node, kvfree, 0, 1, 1); DEFINE_TEST_ALLOC(kvzalloc, kvfree, 0, 1, 0); DEFINE_TEST_ALLOC(kvzalloc_node, kvfree, 0, 1, 1); DEFINE_TEST_ALLOC(devm_kmalloc, devm_kfree, 1, 1, 0); DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); static void overflow_allocation_test(struct kunit *test) { const char device_name[] = "overflow-test"; struct device *dev; int count = 0; #define check_allocation_overflow(alloc) do { \ count++; \ test_ ## alloc(test, dev); \ } while (0) /* Create dummy device for devm_kmalloc()-family tests. */ dev = root_device_register(device_name); KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), "Cannot register test device\n"); check_allocation_overflow(kmalloc); check_allocation_overflow(kmalloc_node); check_allocation_overflow(kzalloc); check_allocation_overflow(kzalloc_node); check_allocation_overflow(__vmalloc); check_allocation_overflow(kvmalloc); check_allocation_overflow(kvmalloc_node); check_allocation_overflow(kvzalloc); check_allocation_overflow(kvzalloc_node); check_allocation_overflow(devm_kmalloc); check_allocation_overflow(devm_kzalloc); device_unregister(dev); kunit_info(test, "%d allocation overflow tests finished\n", count); #undef check_allocation_overflow } struct __test_flex_array { unsigned long flags; size_t count; unsigned long data[]; }; static void overflow_size_helpers_test(struct kunit *test) { /* Make sure struct_size() can be used in a constant expression. */ u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)]; struct __test_flex_array *obj; int count = 0; int var; volatile int unconst = 0; /* Verify constant expression against runtime version. */ var = 55; OPTIMIZER_HIDE_VAR(var); KUNIT_EXPECT_EQ(test, sizeof(ce_array), struct_size(obj, data, var)); #define check_one_size_helper(expected, func, args...) do { \ size_t _r = func(args); \ KUNIT_EXPECT_EQ_MSG(test, _r, expected, \ "expected " #func "(" #args ") to return %zu but got %zu instead\n", \ (size_t)(expected), _r); \ count++; \ } while (0) var = 4; check_one_size_helper(20, size_mul, var++, 5); check_one_size_helper(20, size_mul, 4, var++); check_one_size_helper(0, size_mul, 0, 3); check_one_size_helper(0, size_mul, 3, 0); check_one_size_helper(6, size_mul, 2, 3); check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1); check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3); check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3); var = 4; check_one_size_helper(9, size_add, var++, 5); check_one_size_helper(9, size_add, 4, var++); check_one_size_helper(9, size_add, 9, 0); check_one_size_helper(9, size_add, 0, 9); check_one_size_helper(5, size_add, 2, 3); check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 1); check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, 3); check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3); var = 4; check_one_size_helper(1, size_sub, var--, 3); check_one_size_helper(1, size_sub, 4, var--); check_one_size_helper(1, size_sub, 3, 2); check_one_size_helper(9, size_sub, 9, 0); check_one_size_helper(SIZE_MAX, size_sub, 9, -3); check_one_size_helper(SIZE_MAX, size_sub, 0, 9); check_one_size_helper(SIZE_MAX, size_sub, 2, 3); check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 0); check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10); check_one_size_helper(SIZE_MAX, size_sub, 0, SIZE_MAX); check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX); check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1, 1); check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1, 3); check_one_size_helper(1, size_sub, SIZE_MAX - 1, -3); var = 4; check_one_size_helper(4 * sizeof(*obj->data), flex_array_size, obj, data, var++); check_one_size_helper(5 * sizeof(*obj->data), flex_array_size, obj, data, var++); check_one_size_helper(0, flex_array_size, obj, data, 0 + unconst); check_one_size_helper(sizeof(*obj->data), flex_array_size, obj, data, 1 + unconst); check_one_size_helper(7 * sizeof(*obj->data), flex_array_size, obj, data, 7 + unconst); check_one_size_helper(SIZE_MAX, flex_array_size, obj, data, -1 + unconst); check_one_size_helper(SIZE_MAX, flex_array_size, obj, data, SIZE_MAX - 4 + unconst); var = 4; check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)), struct_size, obj, data, var++); check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)), struct_size, obj, data, var++); check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0 + unconst); check_one_size_helper(sizeof(*obj) + sizeof(*obj->data), struct_size, obj, data, 1 + unconst); check_one_size_helper(SIZE_MAX, struct_size, obj, data, -3 + unconst); check_one_size_helper(SIZE_MAX, struct_size, obj, data, SIZE_MAX - 3 + unconst); kunit_info(test, "%d overflow size helper tests finished\n", count); #undef check_one_size_helper } static void overflows_type_test(struct kunit *test) { int count = 0; unsigned int var; #define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \ bool __of = func(arg1, arg2); \ KUNIT_EXPECT_EQ_MSG(test, __of, of, \ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\ of ? "" : " not"); \ count++; \ } while (0) /* Args are: first type, second type, value, overflow expected */ #define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \ __t1 t1 = (v); \ __t2 t2; \ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\ } while (0) TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false); TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true); TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false); TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, u8, -1, true); TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true); TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, u16, -1, true); TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true); TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, u32, -1, true); TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true); #if BITS_PER_LONG == 64 TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, u64, -1, true); TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true); #endif TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false); TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false); TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false); TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true); TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true); TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true); TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true); TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false); TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false); TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s16, u8, -1, true); TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true); TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false); TEST_OVERFLOWS_TYPE(s16, u16, -1, true); TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true); TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false); TEST_OVERFLOWS_TYPE(s16, u32, -1, true); TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true); #if BITS_PER_LONG == 64 TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false); TEST_OVERFLOWS_TYPE(s16, u64, -1, true); TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true); #endif TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false); TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true); TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true); TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true); TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false); TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false); TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false); TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true); TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true); TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false); TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true); TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true); TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true); TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true); TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false); TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false); TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true); TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true); #if BITS_PER_LONG == 64 TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false); TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false); #endif TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true); TEST_OVERFLOWS_TYPE(s32, u8, -1, true); TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true); TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false); TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true); TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true); TEST_OVERFLOWS_TYPE(s32, u16, -1, true); TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true); TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false); TEST_OVERFLOWS_TYPE(s32, u32, -1, true); TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true); #if BITS_PER_LONG == 64 TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false); TEST_OVERFLOWS_TYPE(s32, u64, -1, true); TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true); #endif TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false); TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true); TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true); TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true); TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false); TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true); TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true); TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true); TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true); TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false); TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false); #if BITS_PER_LONG == 64 TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false); TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false); TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false); TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false); TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false); TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false); TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true); TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false); TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true); TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true); TEST_OVERFLOWS_TYPE(s64, u8, -1, true); TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false); TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true); TEST_OVERFLOWS_TYPE(s64, u16, -1, true); TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false); TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true); TEST_OVERFLOWS_TYPE(s64, u32, -1, true); TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false); TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false); TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true); TEST_OVERFLOWS_TYPE(s64, u64, -1, true); TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false); TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false); TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true); TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false); TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false); TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true); TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false); TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false); TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true); TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true); TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true); TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false); TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false); #endif /* Check for macro side-effects. */ var = INT_MAX - 1; __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false); __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false); __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true); var = INT_MAX - 1; __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false); __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false); __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true); kunit_info(test, "%d overflows_type() tests finished\n", count); #undef TEST_OVERFLOWS_TYPE #undef __TEST_OVERFLOWS_TYPE } static void same_type_test(struct kunit *test) { int count = 0; int var; #define TEST_SAME_TYPE(t1, t2, same) do { \ typeof(t1) __t1h = type_max(t1); \ typeof(t1) __t1l = type_min(t1); \ typeof(t2) __t2h = type_max(t2); \ typeof(t2) __t2l = type_min(t2); \ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \ } while (0) #if BITS_PER_LONG == 64 # define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m) #else # define TEST_SAME_TYPE64(base, t, m) do { } while (0) #endif #define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \ do { \ TEST_SAME_TYPE(base, u8, mu8); \ TEST_SAME_TYPE(base, u16, mu16); \ TEST_SAME_TYPE(base, u32, mu32); \ TEST_SAME_TYPE(base, s8, ms8); \ TEST_SAME_TYPE(base, s16, ms16); \ TEST_SAME_TYPE(base, s32, ms32); \ TEST_SAME_TYPE64(base, u64, mu64); \ TEST_SAME_TYPE64(base, s64, ms64); \ } while (0) TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false); TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false); TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false); TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false); TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false); TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false); #if BITS_PER_LONG == 64 TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false); TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true); #endif /* Check for macro side-effects. */ var = 4; KUNIT_EXPECT_EQ(test, var, 4); KUNIT_EXPECT_TRUE(test, __same_type(var++, int)); KUNIT_EXPECT_EQ(test, var, 4); KUNIT_EXPECT_TRUE(test, __same_type(int, var++)); KUNIT_EXPECT_EQ(test, var, 4); KUNIT_EXPECT_TRUE(test, __same_type(var++, var++)); KUNIT_EXPECT_EQ(test, var, 4); kunit_info(test, "%d __same_type() tests finished\n", count); #undef TEST_TYPE_SETS #undef TEST_SAME_TYPE64 #undef TEST_SAME_TYPE } static void castable_to_type_test(struct kunit *test) { int count = 0; #define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \ bool __pass = castable_to_type(arg1, arg2); \ KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \ "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\ pass ? "" : " not"); \ count++; \ } while (0) TEST_CASTABLE_TO_TYPE(16, u8, true); TEST_CASTABLE_TO_TYPE(16, u16, true); TEST_CASTABLE_TO_TYPE(16, u32, true); TEST_CASTABLE_TO_TYPE(16, s8, true); TEST_CASTABLE_TO_TYPE(16, s16, true); TEST_CASTABLE_TO_TYPE(16, s32, true); TEST_CASTABLE_TO_TYPE(-16, s8, true); TEST_CASTABLE_TO_TYPE(-16, s16, true); TEST_CASTABLE_TO_TYPE(-16, s32, true); #if BITS_PER_LONG == 64 TEST_CASTABLE_TO_TYPE(16, u64, true); TEST_CASTABLE_TO_TYPE(-16, s64, true); #endif #define TEST_CASTABLE_TO_TYPE_VAR(width) do { \ u ## width u ## width ## var = 0; \ s ## width s ## width ## var = 0; \ \ /* Constant expressions that fit types. */ \ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \ TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \ TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \ /* Constant expressions that do not fit types. */ \ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \ /* Non-constant expression with mismatched type. */ \ TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \ TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \ } while (0) #define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \ unsigned long big = U ## width ## _MAX; \ signed long small = S ## width ## _MIN; \ u ## width u ## width ## var = 0; \ s ## width s ## width ## var = 0; \ \ /* Constant expression in range. */ \ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \ /* Constant expression out of range. */ \ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \ /* Non-constant expression with mismatched type. */ \ TEST_CASTABLE_TO_TYPE(big, u ## width, false); \ TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \ TEST_CASTABLE_TO_TYPE(small, s ## width, false); \ TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \ } while (0) TEST_CASTABLE_TO_TYPE_VAR(8); TEST_CASTABLE_TO_TYPE_VAR(16); TEST_CASTABLE_TO_TYPE_VAR(32); #if BITS_PER_LONG == 64 TEST_CASTABLE_TO_TYPE_VAR(64); #endif TEST_CASTABLE_TO_TYPE_RANGE(8); TEST_CASTABLE_TO_TYPE_RANGE(16); #if BITS_PER_LONG == 64 TEST_CASTABLE_TO_TYPE_RANGE(32); #endif kunit_info(test, "%d castable_to_type() tests finished\n", count); #undef TEST_CASTABLE_TO_TYPE_RANGE #undef TEST_CASTABLE_TO_TYPE_VAR #undef TEST_CASTABLE_TO_TYPE } static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test), KUNIT_CASE(u16_u16__u16_overflow_test), KUNIT_CASE(s16_s16__s16_overflow_test), KUNIT_CASE(u32_u32__u32_overflow_test), KUNIT_CASE(s32_s32__s32_overflow_test), KUNIT_CASE(u64_u64__u64_overflow_test), KUNIT_CASE(s64_s64__s64_overflow_test), KUNIT_CASE(u32_u32__int_overflow_test), KUNIT_CASE(u32_u32__u8_overflow_test), KUNIT_CASE(u8_u8__int_overflow_test), KUNIT_CASE(int_int__u8_overflow_test), KUNIT_CASE(shift_sane_test), KUNIT_CASE(shift_overflow_test), KUNIT_CASE(shift_truncate_test), KUNIT_CASE(shift_nonsense_test), KUNIT_CASE(overflow_allocation_test), KUNIT_CASE(overflow_size_helpers_test), KUNIT_CASE(overflows_type_test), KUNIT_CASE(same_type_test), KUNIT_CASE(castable_to_type_test), {} }; static struct kunit_suite overflow_test_suite = { .name = "overflow", .test_cases = overflow_test_cases, }; kunit_test_suite(overflow_test_suite); MODULE_LICENSE("Dual MIT/GPL");
linux-master
lib/overflow_kunit.c
// SPDX-License-Identifier: GPL-2.0+ /* * Test cases csum_partial and csum_fold */ #include <kunit/test.h> #include <asm/checksum.h> #define MAX_LEN 512 #define MAX_ALIGN 64 #define TEST_BUFLEN (MAX_LEN + MAX_ALIGN) /* Values for a little endian CPU. Byte swap each half on big endian CPU. */ static const u32 random_init_sum = 0x2847aab; static const u8 random_buf[] = { 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86, 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5, 0xd3, 0x4f, 0xf5, 0x60, 0x50, 0x5c, 0xc7, 0xfa, 0x6d, 0x1a, 0xc7, 0xf0, 0xd2, 0x2c, 0x12, 0x3d, 0x88, 0xe3, 0x14, 0x21, 0xb1, 0x5e, 0x45, 0x31, 0xa2, 0x85, 0x36, 0x76, 0xba, 0xd8, 0xad, 0xbb, 0x9e, 0x49, 0x8f, 0xf7, 0xce, 0xea, 0xef, 0xca, 0x2c, 0x29, 0xf7, 0x15, 0x5c, 0x1d, 0x4d, 0x09, 0x1f, 0xe2, 0x14, 0x31, 0x8c, 0x07, 0x57, 0x23, 0x1f, 0x6f, 0x03, 0xe1, 0x93, 0x19, 0x53, 0x03, 0x45, 0x49, 0x9a, 0x3b, 0x8e, 0x0c, 0x12, 0x5d, 0x8a, 0xb8, 0x9b, 0x8c, 0x9a, 0x03, 0xe5, 0xa2, 0x43, 0xd2, 0x3b, 0x4e, 0x7e, 0x30, 0x3c, 0x22, 0x2d, 0xc5, 0xfc, 0x9e, 0xdb, 0xc6, 0xf9, 0x69, 0x12, 0x39, 0x1f, 0xa0, 0x11, 0x0c, 0x3f, 0xf5, 0x53, 0xc9, 0x30, 0xfb, 0xb0, 0xdd, 0x21, 0x1d, 0x34, 0xe2, 0x65, 0x30, 0xf1, 0xe8, 0x1b, 0xe7, 0x55, 0x0d, 0xeb, 0xbd, 0xcc, 0x9d, 0x24, 0xa4, 0xad, 0xa7, 0x93, 0x47, 0x19, 0x2e, 0xc4, 0x5c, 0x3b, 0xc7, 0x6d, 0x95, 0x0c, 0x47, 0x60, 0xaf, 0x5b, 0x47, 0xee, 0xdc, 0x31, 0x31, 0x14, 0x12, 0x7e, 0x9e, 0x45, 0xb1, 0xc1, 0x69, 0x4b, 0x84, 0xfc, 0x88, 0xc1, 0x9e, 0x46, 0xb4, 0xc2, 0x25, 0xc5, 0x6c, 0x4c, 0x22, 0x58, 0x5c, 0xbe, 0xff, 0xea, 0x88, 0x88, 0x7a, 0xcb, 0x1c, 0x5d, 0x63, 0xa1, 0xf2, 0x33, 0x0c, 0xa2, 0x16, 0x0b, 0x6e, 0x2b, 0x79, 0x58, 0xf7, 0xac, 0xd3, 0x6a, 0x3f, 0x81, 0x57, 0x48, 0x45, 0xe3, 0x7c, 0xdc, 0xd6, 0x34, 0x7e, 0xe6, 0x73, 0xfa, 0xcb, 0x31, 0x18, 0xa9, 0x0b, 0xee, 0x6b, 0x99, 0xb9, 0x2d, 0xde, 0x22, 0x0e, 0x71, 0x57, 0x0e, 0x9b, 0x11, 0xd1, 0x15, 0x41, 0xd0, 0x6b, 0x50, 0x8a, 0x23, 0x64, 0xe3, 0x9c, 0xb3, 0x55, 0x09, 0xe9, 0x32, 0x67, 0xf9, 0xe0, 0x73, 0xf1, 0x60, 0x66, 0x0b, 0x88, 0x79, 0x8d, 0x4b, 0x52, 0x83, 0x20, 0x26, 0x78, 0x49, 0x27, 0xe7, 0x3e, 0x29, 0xa8, 0x18, 0x82, 0x41, 0xdd, 0x1e, 0xcc, 0x3b, 0xc4, 0x65, 0xd1, 0x21, 0x40, 0x72, 0xb2, 0x87, 0x5e, 0x16, 0x10, 0x80, 0x3f, 0x4b, 0x58, 0x1c, 0xc2, 0x79, 0x20, 0xf0, 0xe0, 0x80, 0xd3, 0x52, 0xa5, 0x19, 0x6e, 0x47, 0x90, 0x08, 0xf5, 0x50, 0xe2, 0xd6, 0xae, 0xe9, 0x2e, 0xdc, 0xd5, 0xb4, 0x90, 0x1f, 0x79, 0x49, 0x82, 0x21, 0x84, 0xa0, 0xb5, 0x2f, 0xff, 0x30, 0x71, 0xed, 0x80, 0x68, 0xb1, 0x6d, 0xef, 0xf6, 0xcf, 0xb8, 0x41, 0x79, 0xf5, 0x01, 0xbc, 0x0c, 0x9b, 0x0e, 0x06, 0xf3, 0xb0, 0xbb, 0x97, 0xb8, 0xb1, 0xfd, 0x51, 0x4e, 0xef, 0x0a, 0x3d, 0x7a, 0x3d, 0xbd, 0x61, 0x00, 0xa2, 0xb3, 0xf0, 0x1d, 0x77, 0x7b, 0x6c, 0x01, 0x61, 0xa5, 0xa3, 0xdb, 0xd5, 0xd5, 0xf4, 0xb5, 0x28, 0x9f, 0x0a, 0xa3, 0x82, 0x5f, 0x4b, 0x40, 0x0f, 0x05, 0x0e, 0x78, 0xed, 0xbf, 0x17, 0xf6, 0x5a, 0x8a, 0x7d, 0xf9, 0x45, 0xc1, 0xd7, 0x1b, 0x9d, 0x6c, 0x07, 0x88, 0xf3, 0xbc, 0xf1, 0xea, 0x28, 0x1f, 0xb8, 0x7a, 0x60, 0x3c, 0xce, 0x3e, 0x50, 0xb2, 0x0b, 0xcf, 0xe5, 0x08, 0x1f, 0x48, 0x04, 0xf9, 0x35, 0x29, 0x15, 0xbe, 0x82, 0x96, 0xc2, 0x55, 0x04, 0x6c, 0x19, 0x45, 0x29, 0x0b, 0xb6, 0x49, 0x12, 0xfb, 0x8d, 0x1b, 0x75, 0x8b, 0xd9, 0x6a, 0x5c, 0xbe, 0x46, 0x2b, 0x41, 0xfe, 0x21, 0xad, 0x1f, 0x75, 0xe7, 0x90, 0x3d, 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92, 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09 }; /* Values for a little endian CPU. Byte swap on big endian CPU. */ static const u16 expected_results[] = { 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab, 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654, 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527, 0xf432, 0x9432, 0x93e2, 0x37e2, 0x371b, 0x3d1a, 0x3cad, 0x22ad, 0x21e6, 0x31e5, 0x3113, 0x0513, 0x0501, 0xc800, 0xc778, 0xe477, 0xe463, 0xc363, 0xc2b2, 0x64b2, 0x646d, 0x336d, 0x32cb, 0xadca, 0xad94, 0x3794, 0x36da, 0x5ed9, 0x5e2c, 0xa32b, 0xa28d, 0x598d, 0x58fe, 0x61fd, 0x612f, 0x772e, 0x763f, 0xac3e, 0xac12, 0x8312, 0x821b, 0x6d1b, 0x6cbf, 0x4fbf, 0x4f72, 0x4672, 0x4653, 0x6452, 0x643e, 0x333e, 0x32b2, 0x2bb2, 0x2b5b, 0x085b, 0x083c, 0x993b, 0x9938, 0xb837, 0xb7a4, 0x9ea4, 0x9e51, 0x9b51, 0x9b0c, 0x520c, 0x5172, 0x1672, 0x15e4, 0x09e4, 0x09d2, 0xacd1, 0xac47, 0xf446, 0xf3ab, 0x67ab, 0x6711, 0x6411, 0x632c, 0xc12b, 0xc0e8, 0xeee7, 0xeeac, 0xa0ac, 0xa02e, 0x702e, 0x6ff2, 0x4df2, 0x4dc5, 0x88c4, 0x87c8, 0xe9c7, 0xe8ec, 0x22ec, 0x21f3, 0xb8f2, 0xb8e0, 0x7fe0, 0x7fc1, 0xdfc0, 0xdfaf, 0xd3af, 0xd370, 0xde6f, 0xde1c, 0x151c, 0x14ec, 0x19eb, 0x193b, 0x3c3a, 0x3c19, 0x1f19, 0x1ee5, 0x3ce4, 0x3c7f, 0x0c7f, 0x0b8e, 0x238d, 0x2372, 0x3c71, 0x3c1c, 0x2f1c, 0x2e31, 0x7130, 0x7064, 0xd363, 0xd33f, 0x2f3f, 0x2e92, 0x8791, 0x86fe, 0x3ffe, 0x3fe5, 0x11e5, 0x1121, 0xb520, 0xb4e5, 0xede4, 0xed77, 0x5877, 0x586b, 0x116b, 0x110b, 0x620a, 0x61af, 0x1aaf, 0x19c1, 0x3dc0, 0x3d8f, 0x0c8f, 0x0c7b, 0xfa7a, 0xf9fc, 0x5bfc, 0x5bb7, 0xaab6, 0xa9f5, 0x40f5, 0x40aa, 0xbca9, 0xbbad, 0x33ad, 0x32ec, 0x94eb, 0x94a5, 0xe0a4, 0xdfe2, 0xbae2, 0xba1d, 0x4e1d, 0x4dd1, 0x2bd1, 0x2b79, 0xcf78, 0xceba, 0xcfb9, 0xcecf, 0x46cf, 0x4647, 0xcc46, 0xcb7b, 0xaf7b, 0xaf1e, 0x4c1e, 0x4b7d, 0x597c, 0x5949, 0x4d49, 0x4ca7, 0x36a7, 0x369c, 0xc89b, 0xc870, 0x4f70, 0x4f18, 0x5817, 0x576b, 0x846a, 0x8400, 0x4500, 0x447f, 0xed7e, 0xed36, 0xa836, 0xa753, 0x2b53, 0x2a77, 0x5476, 0x5442, 0xd641, 0xd55b, 0x625b, 0x6161, 0x9660, 0x962f, 0x7e2f, 0x7d86, 0x7286, 0x7198, 0x0698, 0x05ff, 0x4cfe, 0x4cd1, 0x6ed0, 0x6eae, 0x60ae, 0x603d, 0x093d, 0x092f, 0x6e2e, 0x6e1d, 0x9d1c, 0x9d07, 0x5c07, 0x5b37, 0xf036, 0xefe6, 0x65e6, 0x65c3, 0x01c3, 0x00e0, 0x64df, 0x642c, 0x0f2c, 0x0f23, 0x2622, 0x25f0, 0xbeef, 0xbdf6, 0xddf5, 0xdd82, 0xec81, 0xec21, 0x8621, 0x8616, 0xfe15, 0xfd9c, 0x709c, 0x7051, 0x1e51, 0x1dce, 0xfdcd, 0xfda7, 0x85a7, 0x855e, 0x5e5e, 0x5d77, 0x1f77, 0x1f4e, 0x774d, 0x7735, 0xf534, 0xf4f3, 0x17f3, 0x17d5, 0x4bd4, 0x4b99, 0x8798, 0x8733, 0xb632, 0xb611, 0x7611, 0x759f, 0xc39e, 0xc317, 0x6517, 0x6501, 0x5501, 0x5481, 0x1581, 0x1536, 0xbd35, 0xbd19, 0xfb18, 0xfa9f, 0xda9f, 0xd9af, 0xf9ae, 0xf92e, 0x262e, 0x25dc, 0x80db, 0x80c2, 0x12c2, 0x127b, 0x827a, 0x8272, 0x8d71, 0x8d21, 0xab20, 0xaa4a, 0xfc49, 0xfb60, 0xcd60, 0xcc84, 0xf783, 0xf6cf, 0x66cf, 0x66b0, 0xedaf, 0xed66, 0x6b66, 0x6b45, 0xe744, 0xe6a4, 0x31a4, 0x3175, 0x3274, 0x3244, 0xc143, 0xc056, 0x4056, 0x3fee, 0x8eed, 0x8e80, 0x9f7f, 0x9e89, 0xcf88, 0xced0, 0x8dd0, 0x8d57, 0x9856, 0x9855, 0xdc54, 0xdc48, 0x4148, 0x413a, 0x3b3a, 0x3a47, 0x8a46, 0x898b, 0xf28a, 0xf1d2, 0x40d2, 0x3fd5, 0xeed4, 0xee86, 0xff85, 0xff7b, 0xc27b, 0xc201, 0x8501, 0x8444, 0x2344, 0x2344, 0x8143, 0x8090, 0x908f, 0x9072, 0x1972, 0x18f7, 0xacf6, 0xacf5, 0x4bf5, 0x4b50, 0xa84f, 0xa774, 0xd273, 0xd19e, 0xdd9d, 0xdce8, 0xb4e8, 0xb449, 0xaa49, 0xa9a6, 0x27a6, 0x2747, 0xdc46, 0xdc06, 0xcd06, 0xcd01, 0xbf01, 0xbe89, 0xd188, 0xd0c9, 0xb9c9, 0xb8d3, 0x5ed3, 0x5e49, 0xe148, 0xe04f, 0x9b4f, 0x9a8e, 0xc38d, 0xc372, 0x2672, 0x2606, 0x1f06, 0x1e7e, 0x2b7d, 0x2ac1, 0x39c0, 0x38d6, 0x10d6, 0x10b7, 0x58b6, 0x583c, 0xf83b, 0xf7ff, 0x29ff, 0x29c1, 0xd9c0, 0xd90e, 0xce0e, 0xcd3f, 0xe83e, 0xe836, 0xc936, 0xc8ee, 0xc4ee, 0xc3f5, 0x8ef5, 0x8ecc, 0x79cc, 0x790e, 0xf70d, 0xf677, 0x3477, 0x3422, 0x3022, 0x2fb6, 0x16b6, 0x1671, 0xed70, 0xed65, 0x3765, 0x371c, 0x251c, 0x2421, 0x9720, 0x9705, 0x2205, 0x217a, 0x4879, 0x480f, 0xec0e, 0xeb50, 0xa550, 0xa525, 0x6425, 0x6327, 0x4227, 0x417a, 0x227a, 0x2205, 0x3b04, 0x3a74, 0xfd73, 0xfc92, 0x1d92, 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6, 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6, }; /* Values for a little endian CPU. Byte swap each half on big endian CPU. */ static const u32 init_sums_no_overflow[] = { 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3, 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7, 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb, 0xffffdbdb, 0xffffdbd7, 0xffffd7d7, 0xffffd7d3, 0xffffd3d3, 0xffffd3cf, 0xffffcfcf, 0xffffcfcb, 0xffffcbcb, 0xffffcbc7, 0xffffc7c7, 0xffffc7c3, 0xffffc3c3, 0xffffc3bf, 0xffffbfbf, 0xffffbfbb, 0xffffbbbb, 0xffffbbb7, 0xffffb7b7, 0xffffb7b3, 0xffffb3b3, 0xffffb3af, 0xffffafaf, 0xffffafab, 0xffffabab, 0xffffaba7, 0xffffa7a7, 0xffffa7a3, 0xffffa3a3, 0xffffa39f, 0xffff9f9f, 0xffff9f9b, 0xffff9b9b, 0xffff9b97, 0xffff9797, 0xffff9793, 0xffff9393, 0xffff938f, 0xffff8f8f, 0xffff8f8b, 0xffff8b8b, 0xffff8b87, 0xffff8787, 0xffff8783, 0xffff8383, 0xffff837f, 0xffff7f7f, 0xffff7f7b, 0xffff7b7b, 0xffff7b77, 0xffff7777, 0xffff7773, 0xffff7373, 0xffff736f, 0xffff6f6f, 0xffff6f6b, 0xffff6b6b, 0xffff6b67, 0xffff6767, 0xffff6763, 0xffff6363, 0xffff635f, 0xffff5f5f, 0xffff5f5b, 0xffff5b5b, 0xffff5b57, 0xffff5757, 0xffff5753, 0xffff5353, 0xffff534f, 0xffff4f4f, 0xffff4f4b, 0xffff4b4b, 0xffff4b47, 0xffff4747, 0xffff4743, 0xffff4343, 0xffff433f, 0xffff3f3f, 0xffff3f3b, 0xffff3b3b, 0xffff3b37, 0xffff3737, 0xffff3733, 0xffff3333, 0xffff332f, 0xffff2f2f, 0xffff2f2b, 0xffff2b2b, 0xffff2b27, 0xffff2727, 0xffff2723, 0xffff2323, 0xffff231f, 0xffff1f1f, 0xffff1f1b, 0xffff1b1b, 0xffff1b17, 0xffff1717, 0xffff1713, 0xffff1313, 0xffff130f, 0xffff0f0f, 0xffff0f0b, 0xffff0b0b, 0xffff0b07, 0xffff0707, 0xffff0703, 0xffff0303, 0xffff02ff, 0xfffffefe, 0xfffffefa, 0xfffffafa, 0xfffffaf6, 0xfffff6f6, 0xfffff6f2, 0xfffff2f2, 0xfffff2ee, 0xffffeeee, 0xffffeeea, 0xffffeaea, 0xffffeae6, 0xffffe6e6, 0xffffe6e2, 0xffffe2e2, 0xffffe2de, 0xffffdede, 0xffffdeda, 0xffffdada, 0xffffdad6, 0xffffd6d6, 0xffffd6d2, 0xffffd2d2, 0xffffd2ce, 0xffffcece, 0xffffceca, 0xffffcaca, 0xffffcac6, 0xffffc6c6, 0xffffc6c2, 0xffffc2c2, 0xffffc2be, 0xffffbebe, 0xffffbeba, 0xffffbaba, 0xffffbab6, 0xffffb6b6, 0xffffb6b2, 0xffffb2b2, 0xffffb2ae, 0xffffaeae, 0xffffaeaa, 0xffffaaaa, 0xffffaaa6, 0xffffa6a6, 0xffffa6a2, 0xffffa2a2, 0xffffa29e, 0xffff9e9e, 0xffff9e9a, 0xffff9a9a, 0xffff9a96, 0xffff9696, 0xffff9692, 0xffff9292, 0xffff928e, 0xffff8e8e, 0xffff8e8a, 0xffff8a8a, 0xffff8a86, 0xffff8686, 0xffff8682, 0xffff8282, 0xffff827e, 0xffff7e7e, 0xffff7e7a, 0xffff7a7a, 0xffff7a76, 0xffff7676, 0xffff7672, 0xffff7272, 0xffff726e, 0xffff6e6e, 0xffff6e6a, 0xffff6a6a, 0xffff6a66, 0xffff6666, 0xffff6662, 0xffff6262, 0xffff625e, 0xffff5e5e, 0xffff5e5a, 0xffff5a5a, 0xffff5a56, 0xffff5656, 0xffff5652, 0xffff5252, 0xffff524e, 0xffff4e4e, 0xffff4e4a, 0xffff4a4a, 0xffff4a46, 0xffff4646, 0xffff4642, 0xffff4242, 0xffff423e, 0xffff3e3e, 0xffff3e3a, 0xffff3a3a, 0xffff3a36, 0xffff3636, 0xffff3632, 0xffff3232, 0xffff322e, 0xffff2e2e, 0xffff2e2a, 0xffff2a2a, 0xffff2a26, 0xffff2626, 0xffff2622, 0xffff2222, 0xffff221e, 0xffff1e1e, 0xffff1e1a, 0xffff1a1a, 0xffff1a16, 0xffff1616, 0xffff1612, 0xffff1212, 0xffff120e, 0xffff0e0e, 0xffff0e0a, 0xffff0a0a, 0xffff0a06, 0xffff0606, 0xffff0602, 0xffff0202, 0xffff01fe, 0xfffffdfd, 0xfffffdf9, 0xfffff9f9, 0xfffff9f5, 0xfffff5f5, 0xfffff5f1, 0xfffff1f1, 0xfffff1ed, 0xffffeded, 0xffffede9, 0xffffe9e9, 0xffffe9e5, 0xffffe5e5, 0xffffe5e1, 0xffffe1e1, 0xffffe1dd, 0xffffdddd, 0xffffddd9, 0xffffd9d9, 0xffffd9d5, 0xffffd5d5, 0xffffd5d1, 0xffffd1d1, 0xffffd1cd, 0xffffcdcd, 0xffffcdc9, 0xffffc9c9, 0xffffc9c5, 0xffffc5c5, 0xffffc5c1, 0xffffc1c1, 0xffffc1bd, 0xffffbdbd, 0xffffbdb9, 0xffffb9b9, 0xffffb9b5, 0xffffb5b5, 0xffffb5b1, 0xffffb1b1, 0xffffb1ad, 0xffffadad, 0xffffada9, 0xffffa9a9, 0xffffa9a5, 0xffffa5a5, 0xffffa5a1, 0xffffa1a1, 0xffffa19d, 0xffff9d9d, 0xffff9d99, 0xffff9999, 0xffff9995, 0xffff9595, 0xffff9591, 0xffff9191, 0xffff918d, 0xffff8d8d, 0xffff8d89, 0xffff8989, 0xffff8985, 0xffff8585, 0xffff8581, 0xffff8181, 0xffff817d, 0xffff7d7d, 0xffff7d79, 0xffff7979, 0xffff7975, 0xffff7575, 0xffff7571, 0xffff7171, 0xffff716d, 0xffff6d6d, 0xffff6d69, 0xffff6969, 0xffff6965, 0xffff6565, 0xffff6561, 0xffff6161, 0xffff615d, 0xffff5d5d, 0xffff5d59, 0xffff5959, 0xffff5955, 0xffff5555, 0xffff5551, 0xffff5151, 0xffff514d, 0xffff4d4d, 0xffff4d49, 0xffff4949, 0xffff4945, 0xffff4545, 0xffff4541, 0xffff4141, 0xffff413d, 0xffff3d3d, 0xffff3d39, 0xffff3939, 0xffff3935, 0xffff3535, 0xffff3531, 0xffff3131, 0xffff312d, 0xffff2d2d, 0xffff2d29, 0xffff2929, 0xffff2925, 0xffff2525, 0xffff2521, 0xffff2121, 0xffff211d, 0xffff1d1d, 0xffff1d19, 0xffff1919, 0xffff1915, 0xffff1515, 0xffff1511, 0xffff1111, 0xffff110d, 0xffff0d0d, 0xffff0d09, 0xffff0909, 0xffff0905, 0xffff0505, 0xffff0501, 0xffff0101, 0xffff00fd, 0xfffffcfc, 0xfffffcf8, 0xfffff8f8, 0xfffff8f4, 0xfffff4f4, 0xfffff4f0, 0xfffff0f0, 0xfffff0ec, 0xffffecec, 0xffffece8, 0xffffe8e8, 0xffffe8e4, 0xffffe4e4, 0xffffe4e0, 0xffffe0e0, 0xffffe0dc, 0xffffdcdc, 0xffffdcd8, 0xffffd8d8, 0xffffd8d4, 0xffffd4d4, 0xffffd4d0, 0xffffd0d0, 0xffffd0cc, 0xffffcccc, 0xffffccc8, 0xffffc8c8, 0xffffc8c4, 0xffffc4c4, 0xffffc4c0, 0xffffc0c0, 0xffffc0bc, 0xffffbcbc, 0xffffbcb8, 0xffffb8b8, 0xffffb8b4, 0xffffb4b4, 0xffffb4b0, 0xffffb0b0, 0xffffb0ac, 0xffffacac, 0xffffaca8, 0xffffa8a8, 0xffffa8a4, 0xffffa4a4, 0xffffa4a0, 0xffffa0a0, 0xffffa09c, 0xffff9c9c, 0xffff9c98, 0xffff9898, 0xffff9894, 0xffff9494, 0xffff9490, 0xffff9090, 0xffff908c, 0xffff8c8c, 0xffff8c88, 0xffff8888, 0xffff8884, 0xffff8484, 0xffff8480, 0xffff8080, 0xffff807c, 0xffff7c7c, 0xffff7c78, 0xffff7878, 0xffff7874, 0xffff7474, 0xffff7470, 0xffff7070, 0xffff706c, 0xffff6c6c, 0xffff6c68, 0xffff6868, 0xffff6864, 0xffff6464, 0xffff6460, 0xffff6060, 0xffff605c, 0xffff5c5c, 0xffff5c58, 0xffff5858, 0xffff5854, 0xffff5454, 0xffff5450, 0xffff5050, 0xffff504c, 0xffff4c4c, 0xffff4c48, 0xffff4848, 0xffff4844, 0xffff4444, 0xffff4440, 0xffff4040, 0xffff403c, 0xffff3c3c, 0xffff3c38, 0xffff3838, 0xffff3834, 0xffff3434, 0xffff3430, 0xffff3030, 0xffff302c, 0xffff2c2c, 0xffff2c28, 0xffff2828, 0xffff2824, 0xffff2424, 0xffff2420, 0xffff2020, 0xffff201c, 0xffff1c1c, 0xffff1c18, 0xffff1818, 0xffff1814, 0xffff1414, 0xffff1410, 0xffff1010, 0xffff100c, 0xffff0c0c, 0xffff0c08, 0xffff0808, 0xffff0804, 0xffff0404, 0xffff0400, 0xffff0000, 0xfffffffb, }; static u8 tmp_buf[TEST_BUFLEN]; #define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum)) #define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, (__force u64)lhs, (__force u64)rhs) static __sum16 to_sum16(u16 x) { return (__force __sum16)le16_to_cpu((__force __le16)x); } /* This function swaps the bytes inside each half of a __wsum */ static __wsum to_wsum(u32 x) { u16 hi = le16_to_cpu((__force __le16)(x >> 16)); u16 lo = le16_to_cpu((__force __le16)x); return (__force __wsum)((hi << 16) | lo); } static void assert_setup_correct(struct kunit *test) { CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN); CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]), MAX_LEN); CHECK_EQ(sizeof(init_sums_no_overflow) / sizeof(init_sums_no_overflow[0]), MAX_LEN); } /* * Test with randomized input (pre determined random with known results). */ static void test_csum_fixed_random_inputs(struct kunit *test) { int len, align; __wsum sum; __sum16 result, expec; assert_setup_correct(test); for (align = 0; align < TEST_BUFLEN; ++align) { memcpy(&tmp_buf[align], random_buf, min(MAX_LEN, TEST_BUFLEN - align)); for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; ++len) { /* * Test the precomputed random input. */ sum = to_wsum(random_init_sum); result = full_csum(&tmp_buf[align], len, sum); expec = to_sum16(expected_results[len]); CHECK_EQ(result, expec); } } } /* * All ones input test. If there are any missing carry operations, it fails. */ static void test_csum_all_carry_inputs(struct kunit *test) { int len, align; __wsum sum; __sum16 result, expec; assert_setup_correct(test); memset(tmp_buf, 0xff, TEST_BUFLEN); for (align = 0; align < TEST_BUFLEN; ++align) { for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; ++len) { /* * All carries from input and initial sum. */ sum = to_wsum(0xffffffff); result = full_csum(&tmp_buf[align], len, sum); expec = to_sum16((len & 1) ? 0xff00 : 0); CHECK_EQ(result, expec); /* * All carries from input. */ sum = 0; result = full_csum(&tmp_buf[align], len, sum); if (len & 1) expec = to_sum16(0xff00); else if (len) expec = 0; else expec = to_sum16(0xffff); CHECK_EQ(result, expec); } } } /* * Test with input that alone doesn't cause any carries. By selecting the * maximum initial sum, this allows us to test that there are no carries * where there shouldn't be. */ static void test_csum_no_carry_inputs(struct kunit *test) { int len, align; __wsum sum; __sum16 result, expec; assert_setup_correct(test); memset(tmp_buf, 0x4, TEST_BUFLEN); for (align = 0; align < TEST_BUFLEN; ++align) { for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; ++len) { /* * Expect no carries. */ sum = to_wsum(init_sums_no_overflow[len]); result = full_csum(&tmp_buf[align], len, sum); expec = 0; CHECK_EQ(result, expec); /* * Expect one carry. */ sum = to_wsum(init_sums_no_overflow[len] + 1); result = full_csum(&tmp_buf[align], len, sum); expec = to_sum16(len ? 0xfffe : 0xffff); CHECK_EQ(result, expec); } } } static struct kunit_case __refdata checksum_test_cases[] = { KUNIT_CASE(test_csum_fixed_random_inputs), KUNIT_CASE(test_csum_all_carry_inputs), KUNIT_CASE(test_csum_no_carry_inputs), {} }; static struct kunit_suite checksum_test_suite = { .name = "checksum", .test_cases = checksum_test_cases, }; kunit_test_suites(&checksum_test_suite); MODULE_AUTHOR("Noah Goldstein <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/checksum_kunit.c
// SPDX-License-Identifier: GPL-2.0 /* * This is a module to test the HMM (Heterogeneous Memory Management) * mirror and zone device private memory migration APIs of the kernel. * Userspace programs can register with the driver to mirror their own address * space and can use the device to read/write any valid virtual address. */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/cdev.h> #include <linux/device.h> #include <linux/memremap.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/delay.h> #include <linux/pagemap.h> #include <linux/hmm.h> #include <linux/vmalloc.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/mm.h> #include <linux/platform_device.h> #include <linux/rmap.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include "test_hmm_uapi.h" #define DMIRROR_NDEVICES 4 #define DMIRROR_RANGE_FAULT_TIMEOUT 1000 #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16 /* * For device_private pages, dpage is just a dummy struct page * representing a piece of device memory. dmirror_devmem_alloc_page * allocates a real system memory page as backing storage to fake a * real device. zone_device_data points to that backing page. But * for device_coherent memory, the struct page represents real * physical CPU-accessible memory that we can use directly. */ #define BACKING_PAGE(page) (is_device_private_page((page)) ? \ (page)->zone_device_data : (page)) static unsigned long spm_addr_dev0; module_param(spm_addr_dev0, long, 0644); MODULE_PARM_DESC(spm_addr_dev0, "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); static unsigned long spm_addr_dev1; module_param(spm_addr_dev1, long, 0644); MODULE_PARM_DESC(spm_addr_dev1, "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE."); static const struct dev_pagemap_ops dmirror_devmem_ops; static const struct mmu_interval_notifier_ops dmirror_min_ops; static dev_t dmirror_dev; struct dmirror_device; struct dmirror_bounce { void *ptr; unsigned long size; unsigned long addr; unsigned long cpages; }; #define DPT_XA_TAG_ATOMIC 1UL #define DPT_XA_TAG_WRITE 3UL /* * Data structure to track address ranges and register for mmu interval * notifier updates. */ struct dmirror_interval { struct mmu_interval_notifier notifier; struct dmirror *dmirror; }; /* * Data attached to the open device file. * Note that it might be shared after a fork(). */ struct dmirror { struct dmirror_device *mdevice; struct xarray pt; struct mmu_interval_notifier notifier; struct mutex mutex; }; /* * ZONE_DEVICE pages for migration and simulating device memory. */ struct dmirror_chunk { struct dev_pagemap pagemap; struct dmirror_device *mdevice; bool remove; }; /* * Per device data. */ struct dmirror_device { struct cdev cdevice; unsigned int zone_device_type; struct device device; unsigned int devmem_capacity; unsigned int devmem_count; struct dmirror_chunk **devmem_chunks; struct mutex devmem_lock; /* protects the above */ unsigned long calloc; unsigned long cfree; struct page *free_pages; spinlock_t lock; /* protects the above */ }; static struct dmirror_device dmirror_devices[DMIRROR_NDEVICES]; static int dmirror_bounce_init(struct dmirror_bounce *bounce, unsigned long addr, unsigned long size) { bounce->addr = addr; bounce->size = size; bounce->cpages = 0; bounce->ptr = vmalloc(size); if (!bounce->ptr) return -ENOMEM; return 0; } static bool dmirror_is_private_zone(struct dmirror_device *mdevice) { return (mdevice->zone_device_type == HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? true : false; } static enum migrate_vma_direction dmirror_select_device(struct dmirror *dmirror) { return (dmirror->mdevice->zone_device_type == HMM_DMIRROR_MEMORY_DEVICE_PRIVATE) ? MIGRATE_VMA_SELECT_DEVICE_PRIVATE : MIGRATE_VMA_SELECT_DEVICE_COHERENT; } static void dmirror_bounce_fini(struct dmirror_bounce *bounce) { vfree(bounce->ptr); } static int dmirror_fops_open(struct inode *inode, struct file *filp) { struct cdev *cdev = inode->i_cdev; struct dmirror *dmirror; int ret; /* Mirror this process address space */ dmirror = kzalloc(sizeof(*dmirror), GFP_KERNEL); if (dmirror == NULL) return -ENOMEM; dmirror->mdevice = container_of(cdev, struct dmirror_device, cdevice); mutex_init(&dmirror->mutex); xa_init(&dmirror->pt); ret = mmu_interval_notifier_insert(&dmirror->notifier, current->mm, 0, ULONG_MAX & PAGE_MASK, &dmirror_min_ops); if (ret) { kfree(dmirror); return ret; } filp->private_data = dmirror; return 0; } static int dmirror_fops_release(struct inode *inode, struct file *filp) { struct dmirror *dmirror = filp->private_data; mmu_interval_notifier_remove(&dmirror->notifier); xa_destroy(&dmirror->pt); kfree(dmirror); return 0; } static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) { return container_of(page->pgmap, struct dmirror_chunk, pagemap); } static struct dmirror_device *dmirror_page_to_device(struct page *page) { return dmirror_page_to_chunk(page)->mdevice; } static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range) { unsigned long *pfns = range->hmm_pfns; unsigned long pfn; for (pfn = (range->start >> PAGE_SHIFT); pfn < (range->end >> PAGE_SHIFT); pfn++, pfns++) { struct page *page; void *entry; /* * Since we asked for hmm_range_fault() to populate pages, * it shouldn't return an error entry on success. */ WARN_ON(*pfns & HMM_PFN_ERROR); WARN_ON(!(*pfns & HMM_PFN_VALID)); page = hmm_pfn_to_page(*pfns); WARN_ON(!page); entry = page; if (*pfns & HMM_PFN_WRITE) entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); else if (WARN_ON(range->default_flags & HMM_PFN_WRITE)) return -EFAULT; entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); if (xa_is_err(entry)) return xa_err(entry); } return 0; } static void dmirror_do_update(struct dmirror *dmirror, unsigned long start, unsigned long end) { unsigned long pfn; void *entry; /* * The XArray doesn't hold references to pages since it relies on * the mmu notifier to clear page pointers when they become stale. * Therefore, it is OK to just clear the entry. */ xa_for_each_range(&dmirror->pt, pfn, entry, start >> PAGE_SHIFT, end >> PAGE_SHIFT) xa_erase(&dmirror->pt, pfn); } static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { struct dmirror *dmirror = container_of(mni, struct dmirror, notifier); /* * Ignore invalidation callbacks for device private pages since * the invalidation is handled as part of the migration process. */ if (range->event == MMU_NOTIFY_MIGRATE && range->owner == dmirror->mdevice) return true; if (mmu_notifier_range_blockable(range)) mutex_lock(&dmirror->mutex); else if (!mutex_trylock(&dmirror->mutex)) return false; mmu_interval_set_seq(mni, cur_seq); dmirror_do_update(dmirror, range->start, range->end); mutex_unlock(&dmirror->mutex); return true; } static const struct mmu_interval_notifier_ops dmirror_min_ops = { .invalidate = dmirror_interval_invalidate, }; static int dmirror_range_fault(struct dmirror *dmirror, struct hmm_range *range) { struct mm_struct *mm = dmirror->notifier.mm; unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); int ret; while (true) { if (time_after(jiffies, timeout)) { ret = -EBUSY; goto out; } range->notifier_seq = mmu_interval_read_begin(range->notifier); mmap_read_lock(mm); ret = hmm_range_fault(range); mmap_read_unlock(mm); if (ret) { if (ret == -EBUSY) continue; goto out; } mutex_lock(&dmirror->mutex); if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { mutex_unlock(&dmirror->mutex); continue; } break; } ret = dmirror_do_fault(dmirror, range); mutex_unlock(&dmirror->mutex); out: return ret; } static int dmirror_fault(struct dmirror *dmirror, unsigned long start, unsigned long end, bool write) { struct mm_struct *mm = dmirror->notifier.mm; unsigned long addr; unsigned long pfns[64]; struct hmm_range range = { .notifier = &dmirror->notifier, .hmm_pfns = pfns, .pfn_flags_mask = 0, .default_flags = HMM_PFN_REQ_FAULT | (write ? HMM_PFN_REQ_WRITE : 0), .dev_private_owner = dmirror->mdevice, }; int ret = 0; /* Since the mm is for the mirrored process, get a reference first. */ if (!mmget_not_zero(mm)) return 0; for (addr = start; addr < end; addr = range.end) { range.start = addr; range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); ret = dmirror_range_fault(dmirror, &range); if (ret) break; } mmput(mm); return ret; } static int dmirror_do_read(struct dmirror *dmirror, unsigned long start, unsigned long end, struct dmirror_bounce *bounce) { unsigned long pfn; void *ptr; ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { void *entry; struct page *page; entry = xa_load(&dmirror->pt, pfn); page = xa_untag_pointer(entry); if (!page) return -ENOENT; memcpy_from_page(ptr, page, 0, PAGE_SIZE); ptr += PAGE_SIZE; bounce->cpages++; } return 0; } static int dmirror_read(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { struct dmirror_bounce bounce; unsigned long start, end; unsigned long size = cmd->npages << PAGE_SHIFT; int ret; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; while (1) { mutex_lock(&dmirror->mutex); ret = dmirror_do_read(dmirror, start, end, &bounce); mutex_unlock(&dmirror->mutex); if (ret != -ENOENT) break; start = cmd->addr + (bounce.cpages << PAGE_SHIFT); ret = dmirror_fault(dmirror, start, end, false); if (ret) break; cmd->faults++; } if (ret == 0) { if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, bounce.size)) ret = -EFAULT; } cmd->cpages = bounce.cpages; dmirror_bounce_fini(&bounce); return ret; } static int dmirror_do_write(struct dmirror *dmirror, unsigned long start, unsigned long end, struct dmirror_bounce *bounce) { unsigned long pfn; void *ptr; ptr = bounce->ptr + ((start - bounce->addr) & PAGE_MASK); for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { void *entry; struct page *page; entry = xa_load(&dmirror->pt, pfn); page = xa_untag_pointer(entry); if (!page || xa_pointer_tag(entry) != DPT_XA_TAG_WRITE) return -ENOENT; memcpy_to_page(page, 0, ptr, PAGE_SIZE); ptr += PAGE_SIZE; bounce->cpages++; } return 0; } static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { struct dmirror_bounce bounce; unsigned long start, end; unsigned long size = cmd->npages << PAGE_SHIFT; int ret; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; if (copy_from_user(bounce.ptr, u64_to_user_ptr(cmd->ptr), bounce.size)) { ret = -EFAULT; goto fini; } while (1) { mutex_lock(&dmirror->mutex); ret = dmirror_do_write(dmirror, start, end, &bounce); mutex_unlock(&dmirror->mutex); if (ret != -ENOENT) break; start = cmd->addr + (bounce.cpages << PAGE_SHIFT); ret = dmirror_fault(dmirror, start, end, true); if (ret) break; cmd->faults++; } fini: cmd->cpages = bounce.cpages; dmirror_bounce_fini(&bounce); return ret; } static int dmirror_allocate_chunk(struct dmirror_device *mdevice, struct page **ppage) { struct dmirror_chunk *devmem; struct resource *res = NULL; unsigned long pfn; unsigned long pfn_first; unsigned long pfn_last; void *ptr; int ret = -ENOMEM; devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem) return ret; switch (mdevice->zone_device_type) { case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE: res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE, "hmm_dmirror"); if (IS_ERR_OR_NULL(res)) goto err_devmem; devmem->pagemap.range.start = res->start; devmem->pagemap.range.end = res->end; devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; break; case HMM_DMIRROR_MEMORY_DEVICE_COHERENT: devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ? spm_addr_dev0 : spm_addr_dev1; devmem->pagemap.range.end = devmem->pagemap.range.start + DEVMEM_CHUNK_SIZE - 1; devmem->pagemap.type = MEMORY_DEVICE_COHERENT; break; default: ret = -EINVAL; goto err_devmem; } devmem->pagemap.nr_range = 1; devmem->pagemap.ops = &dmirror_devmem_ops; devmem->pagemap.owner = mdevice; mutex_lock(&mdevice->devmem_lock); if (mdevice->devmem_count == mdevice->devmem_capacity) { struct dmirror_chunk **new_chunks; unsigned int new_capacity; new_capacity = mdevice->devmem_capacity + DEVMEM_CHUNKS_RESERVE; new_chunks = krealloc(mdevice->devmem_chunks, sizeof(new_chunks[0]) * new_capacity, GFP_KERNEL); if (!new_chunks) goto err_release; mdevice->devmem_capacity = new_capacity; mdevice->devmem_chunks = new_chunks; } ptr = memremap_pages(&devmem->pagemap, numa_node_id()); if (IS_ERR_OR_NULL(ptr)) { if (ptr) ret = PTR_ERR(ptr); else ret = -EFAULT; goto err_release; } devmem->mdevice = mdevice; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT; pfn_last = pfn_first + (range_len(&devmem->pagemap.range) >> PAGE_SHIFT); mdevice->devmem_chunks[mdevice->devmem_count++] = devmem; mutex_unlock(&mdevice->devmem_lock); pr_info("added new %u MB chunk (total %u chunks, %u MB) PFNs [0x%lx 0x%lx)\n", DEVMEM_CHUNK_SIZE / (1024 * 1024), mdevice->devmem_count, mdevice->devmem_count * (DEVMEM_CHUNK_SIZE / (1024 * 1024)), pfn_first, pfn_last); spin_lock(&mdevice->lock); for (pfn = pfn_first; pfn < pfn_last; pfn++) { struct page *page = pfn_to_page(pfn); page->zone_device_data = mdevice->free_pages; mdevice->free_pages = page; } if (ppage) { *ppage = mdevice->free_pages; mdevice->free_pages = (*ppage)->zone_device_data; mdevice->calloc++; } spin_unlock(&mdevice->lock); return 0; err_release: mutex_unlock(&mdevice->devmem_lock); if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); err_devmem: kfree(devmem); return ret; } static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice) { struct page *dpage = NULL; struct page *rpage = NULL; /* * For ZONE_DEVICE private type, this is a fake device so we allocate * real system memory to store our device memory. * For ZONE_DEVICE coherent type we use the actual dpage to store the * data and ignore rpage. */ if (dmirror_is_private_zone(mdevice)) { rpage = alloc_page(GFP_HIGHUSER); if (!rpage) return NULL; } spin_lock(&mdevice->lock); if (mdevice->free_pages) { dpage = mdevice->free_pages; mdevice->free_pages = dpage->zone_device_data; mdevice->calloc++; spin_unlock(&mdevice->lock); } else { spin_unlock(&mdevice->lock); if (dmirror_allocate_chunk(mdevice, &dpage)) goto error; } zone_device_page_init(dpage); dpage->zone_device_data = rpage; return dpage; error: if (rpage) __free_page(rpage); return NULL; } static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, struct dmirror *dmirror) { struct dmirror_device *mdevice = dmirror->mdevice; const unsigned long *src = args->src; unsigned long *dst = args->dst; unsigned long addr; for (addr = args->start; addr < args->end; addr += PAGE_SIZE, src++, dst++) { struct page *spage; struct page *dpage; struct page *rpage; if (!(*src & MIGRATE_PFN_MIGRATE)) continue; /* * Note that spage might be NULL which is OK since it is an * unallocated pte_none() or read-only zero page. */ spage = migrate_pfn_to_page(*src); if (WARN(spage && is_zone_device_page(spage), "page already in device spage pfn: 0x%lx\n", page_to_pfn(spage))) continue; dpage = dmirror_devmem_alloc_page(mdevice); if (!dpage) continue; rpage = BACKING_PAGE(dpage); if (spage) copy_highpage(rpage, spage); else clear_highpage(rpage); /* * Normally, a device would use the page->zone_device_data to * point to the mirror but here we use it to hold the page for * the simulated device memory and that page holds the pointer * to the mirror. */ rpage->zone_device_data = dmirror; pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n", page_to_pfn(spage), page_to_pfn(dpage)); *dst = migrate_pfn(page_to_pfn(dpage)); if ((*src & MIGRATE_PFN_WRITE) || (!spage && args->vma->vm_flags & VM_WRITE)) *dst |= MIGRATE_PFN_WRITE; } } static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start, unsigned long end) { unsigned long pfn; for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) { void *entry; entry = xa_load(&dmirror->pt, pfn); if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC) return -EPERM; } return 0; } static int dmirror_atomic_map(unsigned long start, unsigned long end, struct page **pages, struct dmirror *dmirror) { unsigned long pfn, mapped = 0; int i; /* Map the migrated pages into the device's page tables. */ mutex_lock(&dmirror->mutex); for (i = 0, pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, i++) { void *entry; if (!pages[i]) continue; entry = pages[i]; entry = xa_tag_pointer(entry, DPT_XA_TAG_ATOMIC); entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); if (xa_is_err(entry)) { mutex_unlock(&dmirror->mutex); return xa_err(entry); } mapped++; } mutex_unlock(&dmirror->mutex); return mapped; } static int dmirror_migrate_finalize_and_map(struct migrate_vma *args, struct dmirror *dmirror) { unsigned long start = args->start; unsigned long end = args->end; const unsigned long *src = args->src; const unsigned long *dst = args->dst; unsigned long pfn; /* Map the migrated pages into the device's page tables. */ mutex_lock(&dmirror->mutex); for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++, src++, dst++) { struct page *dpage; void *entry; if (!(*src & MIGRATE_PFN_MIGRATE)) continue; dpage = migrate_pfn_to_page(*dst); if (!dpage) continue; entry = BACKING_PAGE(dpage); if (*dst & MIGRATE_PFN_WRITE) entry = xa_tag_pointer(entry, DPT_XA_TAG_WRITE); entry = xa_store(&dmirror->pt, pfn, entry, GFP_ATOMIC); if (xa_is_err(entry)) { mutex_unlock(&dmirror->mutex); return xa_err(entry); } } mutex_unlock(&dmirror->mutex); return 0; } static int dmirror_exclusive(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct page *pages[64]; struct dmirror_bounce bounce; unsigned long next; int ret; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; /* Since the mm is for the mirrored process, get a reference first. */ if (!mmget_not_zero(mm)) return -EINVAL; mmap_read_lock(mm); for (addr = start; addr < end; addr = next) { unsigned long mapped = 0; int i; if (end < addr + (ARRAY_SIZE(pages) << PAGE_SHIFT)) next = end; else next = addr + (ARRAY_SIZE(pages) << PAGE_SHIFT); ret = make_device_exclusive_range(mm, addr, next, pages, NULL); /* * Do dmirror_atomic_map() iff all pages are marked for * exclusive access to avoid accessing uninitialized * fields of pages. */ if (ret == (next - addr) >> PAGE_SHIFT) mapped = dmirror_atomic_map(addr, next, pages, dmirror); for (i = 0; i < ret; i++) { if (pages[i]) { unlock_page(pages[i]); put_page(pages[i]); } } if (addr + (mapped << PAGE_SHIFT) < next) { mmap_read_unlock(mm); mmput(mm); return -EBUSY; } } mmap_read_unlock(mm); mmput(mm); /* Return the migrated data for verification. */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; mutex_lock(&dmirror->mutex); ret = dmirror_do_read(dmirror, start, end, &bounce); mutex_unlock(&dmirror->mutex); if (ret == 0) { if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, bounce.size)) ret = -EFAULT; } cmd->cpages = bounce.cpages; dmirror_bounce_fini(&bounce); return ret; } static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, struct dmirror *dmirror) { const unsigned long *src = args->src; unsigned long *dst = args->dst; unsigned long start = args->start; unsigned long end = args->end; unsigned long addr; for (addr = start; addr < end; addr += PAGE_SIZE, src++, dst++) { struct page *dpage, *spage; spage = migrate_pfn_to_page(*src); if (!spage || !(*src & MIGRATE_PFN_MIGRATE)) continue; if (WARN_ON(!is_device_private_page(spage) && !is_device_coherent_page(spage))) continue; spage = BACKING_PAGE(spage); dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); if (!dpage) continue; pr_debug("migrating from dev to sys pfn src: 0x%lx pfn dst: 0x%lx\n", page_to_pfn(spage), page_to_pfn(dpage)); lock_page(dpage); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); copy_highpage(dpage, spage); *dst = migrate_pfn(page_to_pfn(dpage)); if (*src & MIGRATE_PFN_WRITE) *dst |= MIGRATE_PFN_WRITE; } return 0; } static unsigned long dmirror_successful_migrated_pages(struct migrate_vma *migrate) { unsigned long cpages = 0; unsigned long i; for (i = 0; i < migrate->npages; i++) { if (migrate->src[i] & MIGRATE_PFN_VALID && migrate->src[i] & MIGRATE_PFN_MIGRATE) cpages++; } return cpages; } static int dmirror_migrate_to_system(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; unsigned long src_pfns[64] = { 0 }; unsigned long dst_pfns[64] = { 0 }; struct migrate_vma args = { 0 }; unsigned long next; int ret; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; /* Since the mm is for the mirrored process, get a reference first. */ if (!mmget_not_zero(mm)) return -EINVAL; cmd->cpages = 0; mmap_read_lock(mm); for (addr = start; addr < end; addr = next) { vma = vma_lookup(mm, addr); if (!vma || !(vma->vm_flags & VM_READ)) { ret = -EINVAL; goto out; } next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); if (next > vma->vm_end) next = vma->vm_end; args.vma = vma; args.src = src_pfns; args.dst = dst_pfns; args.start = addr; args.end = next; args.pgmap_owner = dmirror->mdevice; args.flags = dmirror_select_device(dmirror); ret = migrate_vma_setup(&args); if (ret) goto out; pr_debug("Migrating from device mem to sys mem\n"); dmirror_devmem_fault_alloc_and_copy(&args, dmirror); migrate_vma_pages(&args); cmd->cpages += dmirror_successful_migrated_pages(&args); migrate_vma_finalize(&args); } out: mmap_read_unlock(mm); mmput(mm); return ret; } static int dmirror_migrate_to_device(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { unsigned long start, end, addr; unsigned long size = cmd->npages << PAGE_SHIFT; struct mm_struct *mm = dmirror->notifier.mm; struct vm_area_struct *vma; unsigned long src_pfns[64] = { 0 }; unsigned long dst_pfns[64] = { 0 }; struct dmirror_bounce bounce; struct migrate_vma args = { 0 }; unsigned long next; int ret; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; /* Since the mm is for the mirrored process, get a reference first. */ if (!mmget_not_zero(mm)) return -EINVAL; mmap_read_lock(mm); for (addr = start; addr < end; addr = next) { vma = vma_lookup(mm, addr); if (!vma || !(vma->vm_flags & VM_READ)) { ret = -EINVAL; goto out; } next = min(end, addr + (ARRAY_SIZE(src_pfns) << PAGE_SHIFT)); if (next > vma->vm_end) next = vma->vm_end; args.vma = vma; args.src = src_pfns; args.dst = dst_pfns; args.start = addr; args.end = next; args.pgmap_owner = dmirror->mdevice; args.flags = MIGRATE_VMA_SELECT_SYSTEM; ret = migrate_vma_setup(&args); if (ret) goto out; pr_debug("Migrating from sys mem to device mem\n"); dmirror_migrate_alloc_and_copy(&args, dmirror); migrate_vma_pages(&args); dmirror_migrate_finalize_and_map(&args, dmirror); migrate_vma_finalize(&args); } mmap_read_unlock(mm); mmput(mm); /* * Return the migrated data for verification. * Only for pages in device zone */ ret = dmirror_bounce_init(&bounce, start, size); if (ret) return ret; mutex_lock(&dmirror->mutex); ret = dmirror_do_read(dmirror, start, end, &bounce); mutex_unlock(&dmirror->mutex); if (ret == 0) { if (copy_to_user(u64_to_user_ptr(cmd->ptr), bounce.ptr, bounce.size)) ret = -EFAULT; } cmd->cpages = bounce.cpages; dmirror_bounce_fini(&bounce); return ret; out: mmap_read_unlock(mm); mmput(mm); return ret; } static void dmirror_mkentry(struct dmirror *dmirror, struct hmm_range *range, unsigned char *perm, unsigned long entry) { struct page *page; if (entry & HMM_PFN_ERROR) { *perm = HMM_DMIRROR_PROT_ERROR; return; } if (!(entry & HMM_PFN_VALID)) { *perm = HMM_DMIRROR_PROT_NONE; return; } page = hmm_pfn_to_page(entry); if (is_device_private_page(page)) { /* Is the page migrated to this device or some other? */ if (dmirror->mdevice == dmirror_page_to_device(page)) *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL; else *perm = HMM_DMIRROR_PROT_DEV_PRIVATE_REMOTE; } else if (is_device_coherent_page(page)) { /* Is the page migrated to this device or some other? */ if (dmirror->mdevice == dmirror_page_to_device(page)) *perm = HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL; else *perm = HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE; } else if (is_zero_pfn(page_to_pfn(page))) *perm = HMM_DMIRROR_PROT_ZERO; else *perm = HMM_DMIRROR_PROT_NONE; if (entry & HMM_PFN_WRITE) *perm |= HMM_DMIRROR_PROT_WRITE; else *perm |= HMM_DMIRROR_PROT_READ; if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PMD_SHIFT) *perm |= HMM_DMIRROR_PROT_PMD; else if (hmm_pfn_to_map_order(entry) + PAGE_SHIFT == PUD_SHIFT) *perm |= HMM_DMIRROR_PROT_PUD; } static bool dmirror_snapshot_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { struct dmirror_interval *dmi = container_of(mni, struct dmirror_interval, notifier); struct dmirror *dmirror = dmi->dmirror; if (mmu_notifier_range_blockable(range)) mutex_lock(&dmirror->mutex); else if (!mutex_trylock(&dmirror->mutex)) return false; /* * Snapshots only need to set the sequence number since any * invalidation in the interval invalidates the whole snapshot. */ mmu_interval_set_seq(mni, cur_seq); mutex_unlock(&dmirror->mutex); return true; } static const struct mmu_interval_notifier_ops dmirror_mrn_ops = { .invalidate = dmirror_snapshot_invalidate, }; static int dmirror_range_snapshot(struct dmirror *dmirror, struct hmm_range *range, unsigned char *perm) { struct mm_struct *mm = dmirror->notifier.mm; struct dmirror_interval notifier; unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); unsigned long i; unsigned long n; int ret = 0; notifier.dmirror = dmirror; range->notifier = &notifier.notifier; ret = mmu_interval_notifier_insert(range->notifier, mm, range->start, range->end - range->start, &dmirror_mrn_ops); if (ret) return ret; while (true) { if (time_after(jiffies, timeout)) { ret = -EBUSY; goto out; } range->notifier_seq = mmu_interval_read_begin(range->notifier); mmap_read_lock(mm); ret = hmm_range_fault(range); mmap_read_unlock(mm); if (ret) { if (ret == -EBUSY) continue; goto out; } mutex_lock(&dmirror->mutex); if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { mutex_unlock(&dmirror->mutex); continue; } break; } n = (range->end - range->start) >> PAGE_SHIFT; for (i = 0; i < n; i++) dmirror_mkentry(dmirror, range, perm + i, range->hmm_pfns[i]); mutex_unlock(&dmirror->mutex); out: mmu_interval_notifier_remove(range->notifier); return ret; } static int dmirror_snapshot(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) { struct mm_struct *mm = dmirror->notifier.mm; unsigned long start, end; unsigned long size = cmd->npages << PAGE_SHIFT; unsigned long addr; unsigned long next; unsigned long pfns[64]; unsigned char perm[64]; char __user *uptr; struct hmm_range range = { .hmm_pfns = pfns, .dev_private_owner = dmirror->mdevice, }; int ret = 0; start = cmd->addr; end = start + size; if (end < start) return -EINVAL; /* Since the mm is for the mirrored process, get a reference first. */ if (!mmget_not_zero(mm)) return -EINVAL; /* * Register a temporary notifier to detect invalidations even if it * overlaps with other mmu_interval_notifiers. */ uptr = u64_to_user_ptr(cmd->ptr); for (addr = start; addr < end; addr = next) { unsigned long n; next = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); range.start = addr; range.end = next; ret = dmirror_range_snapshot(dmirror, &range, perm); if (ret) break; n = (range.end - range.start) >> PAGE_SHIFT; if (copy_to_user(uptr, perm, n)) { ret = -EFAULT; break; } cmd->cpages += n; uptr += n; } mmput(mm); return ret; } static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk) { unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT; unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT; unsigned long npages = end_pfn - start_pfn + 1; unsigned long i; unsigned long *src_pfns; unsigned long *dst_pfns; src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); migrate_device_range(src_pfns, start_pfn, npages); for (i = 0; i < npages; i++) { struct page *dpage, *spage; spage = migrate_pfn_to_page(src_pfns[i]); if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) continue; if (WARN_ON(!is_device_private_page(spage) && !is_device_coherent_page(spage))) continue; spage = BACKING_PAGE(spage); dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL); lock_page(dpage); copy_highpage(dpage, spage); dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)); if (src_pfns[i] & MIGRATE_PFN_WRITE) dst_pfns[i] |= MIGRATE_PFN_WRITE; } migrate_device_pages(src_pfns, dst_pfns, npages); migrate_device_finalize(src_pfns, dst_pfns, npages); kfree(src_pfns); kfree(dst_pfns); } /* Removes free pages from the free list so they can't be re-allocated */ static void dmirror_remove_free_pages(struct dmirror_chunk *devmem) { struct dmirror_device *mdevice = devmem->mdevice; struct page *page; for (page = mdevice->free_pages; page; page = page->zone_device_data) if (dmirror_page_to_chunk(page) == devmem) mdevice->free_pages = page->zone_device_data; } static void dmirror_device_remove_chunks(struct dmirror_device *mdevice) { unsigned int i; mutex_lock(&mdevice->devmem_lock); if (mdevice->devmem_chunks) { for (i = 0; i < mdevice->devmem_count; i++) { struct dmirror_chunk *devmem = mdevice->devmem_chunks[i]; spin_lock(&mdevice->lock); devmem->remove = true; dmirror_remove_free_pages(devmem); spin_unlock(&mdevice->lock); dmirror_device_evict_chunk(devmem); memunmap_pages(&devmem->pagemap); if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range)); kfree(devmem); } mdevice->devmem_count = 0; mdevice->devmem_capacity = 0; mdevice->free_pages = NULL; kfree(mdevice->devmem_chunks); mdevice->devmem_chunks = NULL; } mutex_unlock(&mdevice->devmem_lock); } static long dmirror_fops_unlocked_ioctl(struct file *filp, unsigned int command, unsigned long arg) { void __user *uarg = (void __user *)arg; struct hmm_dmirror_cmd cmd; struct dmirror *dmirror; int ret; dmirror = filp->private_data; if (!dmirror) return -EINVAL; if (copy_from_user(&cmd, uarg, sizeof(cmd))) return -EFAULT; if (cmd.addr & ~PAGE_MASK) return -EINVAL; if (cmd.addr >= (cmd.addr + (cmd.npages << PAGE_SHIFT))) return -EINVAL; cmd.cpages = 0; cmd.faults = 0; switch (command) { case HMM_DMIRROR_READ: ret = dmirror_read(dmirror, &cmd); break; case HMM_DMIRROR_WRITE: ret = dmirror_write(dmirror, &cmd); break; case HMM_DMIRROR_MIGRATE_TO_DEV: ret = dmirror_migrate_to_device(dmirror, &cmd); break; case HMM_DMIRROR_MIGRATE_TO_SYS: ret = dmirror_migrate_to_system(dmirror, &cmd); break; case HMM_DMIRROR_EXCLUSIVE: ret = dmirror_exclusive(dmirror, &cmd); break; case HMM_DMIRROR_CHECK_EXCLUSIVE: ret = dmirror_check_atomic(dmirror, cmd.addr, cmd.addr + (cmd.npages << PAGE_SHIFT)); break; case HMM_DMIRROR_SNAPSHOT: ret = dmirror_snapshot(dmirror, &cmd); break; case HMM_DMIRROR_RELEASE: dmirror_device_remove_chunks(dmirror->mdevice); ret = 0; break; default: return -EINVAL; } if (ret) return ret; if (copy_to_user(uarg, &cmd, sizeof(cmd))) return -EFAULT; return 0; } static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long addr; for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { struct page *page; int ret; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return -ENOMEM; ret = vm_insert_page(vma, addr, page); if (ret) { __free_page(page); return ret; } put_page(page); } return 0; } static const struct file_operations dmirror_fops = { .open = dmirror_fops_open, .release = dmirror_fops_release, .mmap = dmirror_fops_mmap, .unlocked_ioctl = dmirror_fops_unlocked_ioctl, .llseek = default_llseek, .owner = THIS_MODULE, }; static void dmirror_devmem_free(struct page *page) { struct page *rpage = BACKING_PAGE(page); struct dmirror_device *mdevice; if (rpage != page) __free_page(rpage); mdevice = dmirror_page_to_device(page); spin_lock(&mdevice->lock); /* Return page to our allocator if not freeing the chunk */ if (!dmirror_page_to_chunk(page)->remove) { mdevice->cfree++; page->zone_device_data = mdevice->free_pages; mdevice->free_pages = page; } spin_unlock(&mdevice->lock); } static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) { struct migrate_vma args = { 0 }; unsigned long src_pfns = 0; unsigned long dst_pfns = 0; struct page *rpage; struct dmirror *dmirror; vm_fault_t ret; /* * Normally, a device would use the page->zone_device_data to point to * the mirror but here we use it to hold the page for the simulated * device memory and that page holds the pointer to the mirror. */ rpage = vmf->page->zone_device_data; dmirror = rpage->zone_device_data; /* FIXME demonstrate how we can adjust migrate range */ args.vma = vmf->vma; args.start = vmf->address; args.end = args.start + PAGE_SIZE; args.src = &src_pfns; args.dst = &dst_pfns; args.pgmap_owner = dmirror->mdevice; args.flags = dmirror_select_device(dmirror); args.fault_page = vmf->page; if (migrate_vma_setup(&args)) return VM_FAULT_SIGBUS; ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror); if (ret) return ret; migrate_vma_pages(&args); /* * No device finalize step is needed since * dmirror_devmem_fault_alloc_and_copy() will have already * invalidated the device page table. */ migrate_vma_finalize(&args); return 0; } static const struct dev_pagemap_ops dmirror_devmem_ops = { .page_free = dmirror_devmem_free, .migrate_to_ram = dmirror_devmem_fault, }; static int dmirror_device_init(struct dmirror_device *mdevice, int id) { dev_t dev; int ret; dev = MKDEV(MAJOR(dmirror_dev), id); mutex_init(&mdevice->devmem_lock); spin_lock_init(&mdevice->lock); cdev_init(&mdevice->cdevice, &dmirror_fops); mdevice->cdevice.owner = THIS_MODULE; device_initialize(&mdevice->device); mdevice->device.devt = dev; ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id); if (ret) return ret; ret = cdev_device_add(&mdevice->cdevice, &mdevice->device); if (ret) return ret; /* Build a list of free ZONE_DEVICE struct pages */ return dmirror_allocate_chunk(mdevice, NULL); } static void dmirror_device_remove(struct dmirror_device *mdevice) { dmirror_device_remove_chunks(mdevice); cdev_device_del(&mdevice->cdevice, &mdevice->device); } static int __init hmm_dmirror_init(void) { int ret; int id = 0; int ndevices = 0; ret = alloc_chrdev_region(&dmirror_dev, 0, DMIRROR_NDEVICES, "HMM_DMIRROR"); if (ret) goto err_unreg; memset(dmirror_devices, 0, DMIRROR_NDEVICES * sizeof(dmirror_devices[0])); dmirror_devices[ndevices++].zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; dmirror_devices[ndevices++].zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE; if (spm_addr_dev0 && spm_addr_dev1) { dmirror_devices[ndevices++].zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_COHERENT; dmirror_devices[ndevices++].zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_COHERENT; } for (id = 0; id < ndevices; id++) { ret = dmirror_device_init(dmirror_devices + id, id); if (ret) goto err_chrdev; } pr_info("HMM test module loaded. This is only for testing HMM.\n"); return 0; err_chrdev: while (--id >= 0) dmirror_device_remove(dmirror_devices + id); unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); err_unreg: return ret; } static void __exit hmm_dmirror_exit(void) { int id; for (id = 0; id < DMIRROR_NDEVICES; id++) if (dmirror_devices[id].zone_device_type) dmirror_device_remove(dmirror_devices + id); unregister_chrdev_region(dmirror_dev, DMIRROR_NDEVICES); } module_init(hmm_dmirror_init); module_exit(hmm_dmirror_exit); MODULE_LICENSE("GPL");
linux-master
lib/test_hmm.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/err.h> #include <linux/bug.h> #include <linux/atomic.h> #include <linux/errseq.h> #include <linux/log2.h> /* * An errseq_t is a way of recording errors in one place, and allowing any * number of "subscribers" to tell whether it has changed since a previous * point where it was sampled. * * It's implemented as an unsigned 32-bit value. The low order bits are * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits * are used as a counter. This is done with atomics instead of locking so that * these functions can be called from any context. * * The general idea is for consumers to sample an errseq_t value. That value * can later be used to tell whether any new errors have occurred since that * sampling was done. * * Note that there is a risk of collisions if new errors are being recorded * frequently, since we have so few bits to use as a counter. * * To mitigate this, one bit is used as a flag to tell whether the value has * been sampled since a new value was recorded. That allows us to avoid bumping * the counter if no one has sampled it since the last time an error was * recorded. * * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes * is the special (but common) case where there has never been an error. An all * zero value thus serves as the "epoch" if one wishes to know whether there * has ever been an error set since it was first initialized. */ /* The low bits are designated for error code (max of MAX_ERRNO) */ #define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1) /* This bit is used as a flag to indicate whether the value has been seen */ #define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT) /* The lowest bit of the counter */ #define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1)) /** * errseq_set - set a errseq_t for later reporting * @eseq: errseq_t field that should be set * @err: error to set (must be between -1 and -MAX_ERRNO) * * This function sets the error in @eseq, and increments the sequence counter * if the last sequence was sampled at some point in the past. * * Any error set will always overwrite an existing error. * * Return: The previous value, primarily for debugging purposes. The * return value should not be used as a previously sampled value in later * calls as it will not have the SEEN flag set. */ errseq_t errseq_set(errseq_t *eseq, int err) { errseq_t cur, old; /* MAX_ERRNO must be able to serve as a mask */ BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1); /* * Ensure the error code actually fits where we want it to go. If it * doesn't then just throw a warning and don't record anything. We * also don't accept zero here as that would effectively clear a * previous error. */ old = READ_ONCE(*eseq); if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO), "err = %d\n", err)) return old; for (;;) { errseq_t new; /* Clear out error bits and set new error */ new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err; /* Only increment if someone has looked at it */ if (old & ERRSEQ_SEEN) new += ERRSEQ_CTR_INC; /* If there would be no change, then call it done */ if (new == old) { cur = new; break; } /* Try to swap the new value into place */ cur = cmpxchg(eseq, old, new); /* * Call it success if we did the swap or someone else beat us * to it for the same value. */ if (likely(cur == old || cur == new)) break; /* Raced with an update, try again */ old = cur; } return cur; } EXPORT_SYMBOL(errseq_set); /** * errseq_sample() - Grab current errseq_t value. * @eseq: Pointer to errseq_t to be sampled. * * This function allows callers to initialise their errseq_t variable. * If the error has been "seen", new callers will not see an old error. * If there is an unseen error in @eseq, the caller of this function will * see it the next time it checks for an error. * * Context: Any context. * Return: The current errseq value. */ errseq_t errseq_sample(errseq_t *eseq) { errseq_t old = READ_ONCE(*eseq); /* If nobody has seen this error yet, then we can be the first. */ if (!(old & ERRSEQ_SEEN)) old = 0; return old; } EXPORT_SYMBOL(errseq_sample); /** * errseq_check() - Has an error occurred since a particular sample point? * @eseq: Pointer to errseq_t value to be checked. * @since: Previously-sampled errseq_t from which to check. * * Grab the value that eseq points to, and see if it has changed @since * the given value was sampled. The @since value is not advanced, so there * is no need to mark the value as seen. * * Return: The latest error set in the errseq_t or 0 if it hasn't changed. */ int errseq_check(errseq_t *eseq, errseq_t since) { errseq_t cur = READ_ONCE(*eseq); if (likely(cur == since)) return 0; return -(cur & MAX_ERRNO); } EXPORT_SYMBOL(errseq_check); /** * errseq_check_and_advance() - Check an errseq_t and advance to current value. * @eseq: Pointer to value being checked and reported. * @since: Pointer to previously-sampled errseq_t to check against and advance. * * Grab the eseq value, and see whether it matches the value that @since * points to. If it does, then just return 0. * * If it doesn't, then the value has changed. Set the "seen" flag, and try to * swap it into place as the new eseq value. Then, set that value as the new * "since" value, and return whatever the error portion is set to. * * Note that no locking is provided here for concurrent updates to the "since" * value. The caller must provide that if necessary. Because of this, callers * may want to do a lockless errseq_check before taking the lock and calling * this. * * Return: Negative errno if one has been stored, or 0 if no new error has * occurred. */ int errseq_check_and_advance(errseq_t *eseq, errseq_t *since) { int err = 0; errseq_t old, new; /* * Most callers will want to use the inline wrapper to check this, * so that the common case of no error is handled without needing * to take the lock that protects the "since" value. */ old = READ_ONCE(*eseq); if (old != *since) { /* * Set the flag and try to swap it into place if it has * changed. * * We don't care about the outcome of the swap here. If the * swap doesn't occur, then it has either been updated by a * writer who is altering the value in some way (updating * counter or resetting the error), or another reader who is * just setting the "seen" flag. Either outcome is OK, and we * can advance "since" and return an error based on what we * have. */ new = old | ERRSEQ_SEEN; if (new != old) cmpxchg(eseq, old, new); *since = new; err = -(new & MAX_ERRNO); } return err; } EXPORT_SYMBOL(errseq_check_and_advance);
linux-master
lib/errseq.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/lib/cmdline.c * Helper functions generally used for parsing kernel command line * and module options. * * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c. * * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ctype.h> /* * If a hyphen was found in get_option, this will handle the * range of numbers, M-N. This will expand the range and insert * the values[M, M+1, ..., N] into the ints array in get_options. */ static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } /** * get_option - Parse integer from an option string * @str: option string * @pint: (optional output) integer value parsed from @str * * Read an int from an option string; if available accept a subsequent * comma as well. * * When @pint is NULL the function can be used as a validator of * the current option in the string. * * Return values: * 0 - no int in string * 1 - int found, no subsequent comma * 2 - int found including a subsequent comma * 3 - hyphen found to denote a range * * Leading hyphen without integer is no integer case, but we consume it * for the sake of simplification. */ int get_option(char **str, int *pint) { char *cur = *str; int value; if (!cur || !(*cur)) return 0; if (*cur == '-') value = -simple_strtoull(++cur, str, 0); else value = simple_strtoull(cur, str, 0); if (pint) *pint = value; if (cur == *str) return 0; if (**str == ',') { (*str)++; return 2; } if (**str == '-') return 3; return 1; } EXPORT_SYMBOL(get_option); /** * get_options - Parse a string into a list of integers * @str: String to be parsed * @nints: size of integer array * @ints: integer array (must have room for at least one element) * * This function parses a string containing a comma-separated * list of integers, a hyphen-separated range of _positive_ integers, * or a combination of both. The parse halts when the array is * full, or when no more numbers can be retrieved from the * string. * * When @nints is 0, the function just validates the given @str and * returns the amount of parseable integers as described below. * * Returns: * * The first element is filled by the number of collected integers * in the range. The rest is what was parsed from the @str. * * Return value is the character in the string which caused * the parse to end (typically a null terminator, if @str is * completely parseable). */ char *get_options(const char *str, int nints, int *ints) { bool validate = (nints == 0); int res, i = 1; while (i < nints || validate) { int *pint = validate ? ints : ints + i; res = get_option((char **)&str, pint); if (res == 0) break; if (res == 3) { int n = validate ? 0 : nints - i; int range_nums; range_nums = get_range((char **)&str, pint, n); if (range_nums < 0) break; /* * Decrement the result by one to leave out the * last number in the range. The next iteration * will handle the upper number in the range */ i += (range_nums - 1); } i++; if (res == 1) break; } ints[0] = i - 1; return (char *)str; } EXPORT_SYMBOL(get_options); /** * memparse - parse a string with mem suffixes into a number * @ptr: Where parse begins * @retptr: (output) Optional pointer to next char after parse completes * * Parses a string into a number. The number stored at @ptr is * potentially suffixed with K, M, G, T, P, E. */ unsigned long long memparse(const char *ptr, char **retptr) { char *endptr; /* local pointer to end of parsed string */ unsigned long long ret = simple_strtoull(ptr, &endptr, 0); switch (*endptr) { case 'E': case 'e': ret <<= 10; fallthrough; case 'P': case 'p': ret <<= 10; fallthrough; case 'T': case 't': ret <<= 10; fallthrough; case 'G': case 'g': ret <<= 10; fallthrough; case 'M': case 'm': ret <<= 10; fallthrough; case 'K': case 'k': ret <<= 10; endptr++; fallthrough; default: break; } if (retptr) *retptr = endptr; return ret; } EXPORT_SYMBOL(memparse); /** * parse_option_str - Parse a string and check an option is set or not * @str: String to be parsed * @option: option name * * This function parses a string containing a comma-separated list of * strings like a=b,c. * * Return true if there's such option in the string, or return false. */ bool parse_option_str(const char *str, const char *option) { while (*str) { if (!strncmp(str, option, strlen(option))) { str += strlen(option); if (!*str || *str == ',') return true; } while (*str && *str != ',') str++; if (*str == ',') str++; } return false; } /* * Parse a string to get a param value pair. * You can use " around spaces, but can't escape ". * Hyphens and underscores equivalent in parameter names. */ char *next_arg(char *args, char **param, char **val) { unsigned int i, equals = 0; int in_quote = 0, quoted = 0; if (*args == '"') { args++; in_quote = 1; quoted = 1; } for (i = 0; args[i]; i++) { if (isspace(args[i]) && !in_quote) break; if (equals == 0) { if (args[i] == '=') equals = i; } if (args[i] == '"') in_quote = !in_quote; } *param = args; if (!equals) *val = NULL; else { args[equals] = '\0'; *val = args + equals + 1; /* Don't include quotes in value. */ if (**val == '"') { (*val)++; if (args[i-1] == '"') args[i-1] = '\0'; } } if (quoted && i > 0 && args[i-1] == '"') args[i-1] = '\0'; if (args[i]) { args[i] = '\0'; args += i + 1; } else args += i; /* Chew up trailing spaces. */ return skip_spaces(args); } EXPORT_SYMBOL(next_arg);
linux-master
lib/cmdline.c
// SPDX-License-Identifier: GPL-2.0-only /* * This module emits "Hello, world" on printk when loaded. * * It is designed to be used for basic evaluation of the module loading * subsystem (for example when validating module signing/verification). It * lacks any extra dependencies, and will not normally be loaded by the * system unless explicitly requested by name. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/printk.h> static int __init test_module_init(void) { pr_warn("Hello, world\n"); return 0; } module_init(test_module_init); static void __exit test_module_exit(void) { pr_warn("Goodbye\n"); } module_exit(test_module_exit); MODULE_AUTHOR("Kees Cook <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_module.c
// SPDX-License-Identifier: GPL-2.0-only /* * A generic implementation of binary search for the Linux kernel * * Copyright (C) 2008-2009 Ksplice, Inc. * Author: Tim Abbott <[email protected]> */ #include <linux/export.h> #include <linux/bsearch.h> #include <linux/kprobes.h> /* * bsearch - binary search an array of elements * @key: pointer to item being searched for * @base: pointer to first element to search * @num: number of elements * @size: size of each element * @cmp: pointer to comparison function * * This function does a binary search on the given array. The * contents of the array should already be in ascending sorted order * under the provided comparison function. * * Note that the key need not have the same type as the elements in * the array, e.g. key could be a string and the comparison function * could compare the string with the struct's name field. However, if * the key and elements in the array are of the same type, you can use * the same comparison function for both sort() and bsearch(). */ void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) { return __inline_bsearch(key, base, num, size, cmp); } EXPORT_SYMBOL(bsearch); NOKPROBE_SYMBOL(bsearch);
linux-master
lib/bsearch.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/ucs2_string.h> #include <linux/module.h> /* Return the number of unicode characters in data */ unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) { unsigned long length = 0; while (*s++ != 0 && length < maxlength) length++; return length; } EXPORT_SYMBOL(ucs2_strnlen); unsigned long ucs2_strlen(const ucs2_char_t *s) { return ucs2_strnlen(s, ~0UL); } EXPORT_SYMBOL(ucs2_strlen); /* * Return the number of bytes is the length of this string * Note: this is NOT the same as the number of unicode characters */ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) { return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); } EXPORT_SYMBOL(ucs2_strsize); int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) { while (1) { if (len == 0) return 0; if (*a < *b) return -1; if (*a > *b) return 1; if (*a == 0) /* implies *b == 0 */ return 0; a++; b++; len--; } } EXPORT_SYMBOL(ucs2_strncmp); unsigned long ucs2_utf8size(const ucs2_char_t *src) { unsigned long i; unsigned long j = 0; for (i = 0; src[i]; i++) { u16 c = src[i]; if (c >= 0x800) j += 3; else if (c >= 0x80) j += 2; else j += 1; } return j; } EXPORT_SYMBOL(ucs2_utf8size); /* * copy at most maxlength bytes of whole utf8 characters to dest from the * ucs2 string src. * * The return value is the number of characters copied, not including the * final NUL character. */ unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) { unsigned int i; unsigned long j = 0; unsigned long limit = ucs2_strnlen(src, maxlength); for (i = 0; maxlength && i < limit; i++) { u16 c = src[i]; if (c >= 0x800) { if (maxlength < 3) break; maxlength -= 3; dest[j++] = 0xe0 | (c & 0xf000) >> 12; dest[j++] = 0x80 | (c & 0x0fc0) >> 6; dest[j++] = 0x80 | (c & 0x003f); } else if (c >= 0x80) { if (maxlength < 2) break; maxlength -= 2; dest[j++] = 0xc0 | (c & 0x7c0) >> 6; dest[j++] = 0x80 | (c & 0x03f); } else { maxlength -= 1; dest[j++] = c & 0x7f; } } if (maxlength) dest[j] = '\0'; return j; } EXPORT_SYMBOL(ucs2_as_utf8); MODULE_LICENSE("GPL v2");
linux-master
lib/ucs2_string.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright (C) 2016-2022 Jason A. Donenfeld <[email protected]>. All Rights Reserved. * * SipHash: a fast short-input PRF * https://131002.net/siphash/ * * This implementation is specifically for SipHash2-4 for a secure PRF * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for * hashtables. */ #include <linux/siphash.h> #include <asm/unaligned.h> #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 #include <linux/dcache.h> #include <asm/word-at-a-time.h> #endif #define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) #define PREAMBLE(len) \ u64 v0 = SIPHASH_CONST_0; \ u64 v1 = SIPHASH_CONST_1; \ u64 v2 = SIPHASH_CONST_2; \ u64 v3 = SIPHASH_CONST_3; \ u64 b = ((u64)(len)) << 56; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ v1 ^= key->key[1]; \ v0 ^= key->key[0]; #define POSTAMBLE \ v3 ^= b; \ SIPROUND; \ SIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ SIPROUND; \ SIPROUND; \ SIPROUND; \ SIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; PREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = le64_to_cpup(data); v3 ^= m; SIPROUND; SIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif POSTAMBLE } EXPORT_SYMBOL(__siphash_aligned); #endif u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; PREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = get_unaligned_le64(data); v3 ^= m; SIPROUND; SIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif POSTAMBLE } EXPORT_SYMBOL(__siphash_unaligned); /** * siphash_1u64 - compute 64-bit siphash PRF value of a u64 * @first: first u64 * @key: the siphash key */ u64 siphash_1u64(const u64 first, const siphash_key_t *key) { PREAMBLE(8) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; POSTAMBLE } EXPORT_SYMBOL(siphash_1u64); /** * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64 * @first: first u64 * @second: second u64 * @key: the siphash key */ u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) { PREAMBLE(16) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; POSTAMBLE } EXPORT_SYMBOL(siphash_2u64); /** * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64 * @first: first u64 * @second: second u64 * @third: third u64 * @key: the siphash key */ u64 siphash_3u64(const u64 first, const u64 second, const u64 third, const siphash_key_t *key) { PREAMBLE(24) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; v3 ^= third; SIPROUND; SIPROUND; v0 ^= third; POSTAMBLE } EXPORT_SYMBOL(siphash_3u64); /** * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64 * @first: first u64 * @second: second u64 * @third: third u64 * @forth: forth u64 * @key: the siphash key */ u64 siphash_4u64(const u64 first, const u64 second, const u64 third, const u64 forth, const siphash_key_t *key) { PREAMBLE(32) v3 ^= first; SIPROUND; SIPROUND; v0 ^= first; v3 ^= second; SIPROUND; SIPROUND; v0 ^= second; v3 ^= third; SIPROUND; SIPROUND; v0 ^= third; v3 ^= forth; SIPROUND; SIPROUND; v0 ^= forth; POSTAMBLE } EXPORT_SYMBOL(siphash_4u64); u64 siphash_1u32(const u32 first, const siphash_key_t *key) { PREAMBLE(4) b |= first; POSTAMBLE } EXPORT_SYMBOL(siphash_1u32); u64 siphash_3u32(const u32 first, const u32 second, const u32 third, const siphash_key_t *key) { u64 combined = (u64)second << 32 | first; PREAMBLE(12) v3 ^= combined; SIPROUND; SIPROUND; v0 ^= combined; b |= third; POSTAMBLE } EXPORT_SYMBOL(siphash_3u32); #if BITS_PER_LONG == 64 /* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3. */ #define HSIPROUND SIPROUND #define HPREAMBLE(len) PREAMBLE(len) #define HPOSTAMBLE \ v3 ^= b; \ HSIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ HSIPROUND; \ HSIPROUND; \ HSIPROUND; \ return (v0 ^ v1) ^ (v2 ^ v3); #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; HPREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = le64_to_cpup(data); v3 ^= m; HSIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= le32_to_cpup(data); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); #endif u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u64)); const u8 left = len & (sizeof(u64) - 1); u64 m; HPREAMBLE(len) for (; data != end; data += sizeof(u64)) { m = get_unaligned_le64(data); v3 ^= m; HSIPROUND; v0 ^= m; } #if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64 if (left) b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) & bytemask_from_count(left))); #else switch (left) { case 7: b |= ((u64)end[6]) << 48; fallthrough; case 6: b |= ((u64)end[5]) << 40; fallthrough; case 5: b |= ((u64)end[4]) << 32; fallthrough; case 4: b |= get_unaligned_le32(end); break; case 3: b |= ((u64)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } #endif HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); /** * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32 * @first: first u32 * @key: the hsiphash key */ u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) { HPREAMBLE(4) b |= first; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_1u32); /** * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 * @first: first u32 * @second: second u32 * @key: the hsiphash key */ u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(8) v3 ^= combined; HSIPROUND; v0 ^= combined; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_2u32); /** * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @key: the hsiphash key */ u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(12) v3 ^= combined; HSIPROUND; v0 ^= combined; b |= third; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_3u32); /** * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @forth: forth u32 * @key: the hsiphash key */ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, const u32 forth, const hsiphash_key_t *key) { u64 combined = (u64)second << 32 | first; HPREAMBLE(16) v3 ^= combined; HSIPROUND; v0 ^= combined; combined = (u64)forth << 32 | third; v3 ^= combined; HSIPROUND; v0 ^= combined; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_4u32); #else #define HSIPROUND HSIPHASH_PERMUTATION(v0, v1, v2, v3) #define HPREAMBLE(len) \ u32 v0 = HSIPHASH_CONST_0; \ u32 v1 = HSIPHASH_CONST_1; \ u32 v2 = HSIPHASH_CONST_2; \ u32 v3 = HSIPHASH_CONST_3; \ u32 b = ((u32)(len)) << 24; \ v3 ^= key->key[1]; \ v2 ^= key->key[0]; \ v1 ^= key->key[1]; \ v0 ^= key->key[0]; #define HPOSTAMBLE \ v3 ^= b; \ HSIPROUND; \ v0 ^= b; \ v2 ^= 0xff; \ HSIPROUND; \ HSIPROUND; \ HSIPROUND; \ return v1 ^ v3; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); const u8 left = len & (sizeof(u32) - 1); u32 m; HPREAMBLE(len) for (; data != end; data += sizeof(u32)) { m = le32_to_cpup(data); v3 ^= m; HSIPROUND; v0 ^= m; } switch (left) { case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= le16_to_cpup(data); break; case 1: b |= end[0]; } HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_aligned); #endif u32 __hsiphash_unaligned(const void *data, size_t len, const hsiphash_key_t *key) { const u8 *end = data + len - (len % sizeof(u32)); const u8 left = len & (sizeof(u32) - 1); u32 m; HPREAMBLE(len) for (; data != end; data += sizeof(u32)) { m = get_unaligned_le32(data); v3 ^= m; HSIPROUND; v0 ^= m; } switch (left) { case 3: b |= ((u32)end[2]) << 16; fallthrough; case 2: b |= get_unaligned_le16(end); break; case 1: b |= end[0]; } HPOSTAMBLE } EXPORT_SYMBOL(__hsiphash_unaligned); /** * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32 * @first: first u32 * @key: the hsiphash key */ u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key) { HPREAMBLE(4) v3 ^= first; HSIPROUND; v0 ^= first; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_1u32); /** * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32 * @first: first u32 * @second: second u32 * @key: the hsiphash key */ u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key) { HPREAMBLE(8) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_2u32); /** * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @key: the hsiphash key */ u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third, const hsiphash_key_t *key) { HPREAMBLE(12) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; v3 ^= third; HSIPROUND; v0 ^= third; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_3u32); /** * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32 * @first: first u32 * @second: second u32 * @third: third u32 * @forth: forth u32 * @key: the hsiphash key */ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third, const u32 forth, const hsiphash_key_t *key) { HPREAMBLE(16) v3 ^= first; HSIPROUND; v0 ^= first; v3 ^= second; HSIPROUND; v0 ^= second; v3 ^= third; HSIPROUND; v0 ^= third; v3 ^= forth; HSIPROUND; v0 ^= forth; HPOSTAMBLE } EXPORT_SYMBOL(hsiphash_4u32); #endif
linux-master
lib/siphash.c
#include <linux/libfdt_env.h> #include "../scripts/dtc/libfdt/fdt_addresses.c"
linux-master
lib/fdt_addresses.c
// SPDX-License-Identifier: GPL-2.0 /* * Normal 64-bit CRC calculation. * * This is a basic crc64 implementation following ECMA-182 specification, * which can be found from, * https://www.ecma-international.org/publications/standards/Ecma-182.htm * * Dr. Ross N. Williams has a great document to introduce the idea of CRC * algorithm, here the CRC64 code is also inspired by the table-driven * algorithm and detail example from this paper. This paper can be found * from, * http://www.ross.net/crc/download/crc_v3.txt * * crc64table[256] is the lookup table of a table-driven 64-bit CRC * calculation, which is generated by gen_crc64table.c in kernel build * time. The polynomial of crc64 arithmetic is from ECMA-182 specification * as well, which is defined as, * * x^64 + x^62 + x^57 + x^55 + x^54 + x^53 + x^52 + x^47 + x^46 + x^45 + * x^40 + x^39 + x^38 + x^37 + x^35 + x^33 + x^32 + x^31 + x^29 + x^27 + * x^24 + x^23 + x^22 + x^21 + x^19 + x^17 + x^13 + x^12 + x^10 + x^9 + * x^7 + x^4 + x + 1 * * crc64rocksoft[256] table is from the Rocksoft specification polynomial * defined as, * * x^64 + x^63 + x^61 + x^59 + x^58 + x^56 + x^55 + x^52 + x^49 + x^48 + x^47 + * x^46 + x^44 + x^41 + x^37 + x^36 + x^34 + x^32 + x^31 + x^28 + x^26 + x^23 + * x^22 + x^19 + x^16 + x^13 + x^12 + x^10 + x^9 + x^6 + x^4 + x^3 + 1 * * Copyright 2018 SUSE Linux. * Author: Coly Li <[email protected]> */ #include <linux/module.h> #include <linux/types.h> #include <linux/crc64.h> #include "crc64table.h" MODULE_DESCRIPTION("CRC64 calculations"); MODULE_LICENSE("GPL v2"); /** * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64 * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation, * or the previous crc64 value if computing incrementally. * @p: pointer to buffer over which CRC64 is run * @len: length of buffer @p */ u64 __pure crc64_be(u64 crc, const void *p, size_t len) { size_t i, t; const unsigned char *_p = p; for (i = 0; i < len; i++) { t = ((crc >> 56) ^ (*_p++)) & 0xFF; crc = crc64table[t] ^ (crc << 8); } return crc; } EXPORT_SYMBOL_GPL(crc64_be); /** * crc64_rocksoft_generic - Calculate bitwise Rocksoft CRC64 * @crc: seed value for computation. 0 for a new CRC calculation, or the * previous crc64 value if computing incrementally. * @p: pointer to buffer over which CRC64 is run * @len: length of buffer @p */ u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len) { const unsigned char *_p = p; size_t i; crc = ~crc; for (i = 0; i < len; i++) crc = (crc >> 8) ^ crc64rocksofttable[(crc & 0xff) ^ *_p++]; return ~crc; } EXPORT_SYMBOL_GPL(crc64_rocksoft_generic);
linux-master
lib/crc64.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2005 SGI, Christoph Lameter * Copyright (C) 2006 Nick Piggin * Copyright (C) 2012 Konstantin Khlebnikov * Copyright (C) 2016 Intel, Matthew Wilcox * Copyright (C) 2016 Intel, Ross Zwisler */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/xarray.h> #include "radix-tree.h" /* * Radix tree node cache. */ struct kmem_cache *radix_tree_node_cachep; /* * The radix tree is variable-height, so an insert operation not only has * to build the branch to its corresponding item, it also has to build the * branch to existing items if the size has to be increased (by * radix_tree_extend). * * The worst case is a zero height tree with just a single item at index 0, * and then inserting an item at index ULONG_MAX. This requires 2 new branches * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. * Hence: */ #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) /* * The IDR does not have to be as high as the radix tree since it uses * signed integers, not unsigned longs. */ #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) /* * Per-cpu pool of preloaded nodes */ DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { .lock = INIT_LOCAL_LOCK(lock), }; EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); static inline struct radix_tree_node *entry_to_node(void *ptr) { return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); } static inline void *node_to_entry(void *ptr) { return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); } #define RADIX_TREE_RETRY XA_RETRY_ENTRY static inline unsigned long get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) { return parent ? slot - parent->slots : 0; } static unsigned int radix_tree_descend(const struct radix_tree_node *parent, struct radix_tree_node **nodep, unsigned long index) { unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); *nodep = (void *)entry; return offset; } static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) { return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); } static inline void tag_set(struct radix_tree_node *node, unsigned int tag, int offset) { __set_bit(offset, node->tags[tag]); } static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, int offset) { __clear_bit(offset, node->tags[tag]); } static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, int offset) { return test_bit(offset, node->tags[tag]); } static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) { root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) { root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); } static inline void root_tag_clear_all(struct radix_tree_root *root) { root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); } static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) { return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); } static inline unsigned root_tags_get(const struct radix_tree_root *root) { return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; } static inline bool is_idr(const struct radix_tree_root *root) { return !!(root->xa_flags & ROOT_IS_IDR); } /* * Returns 1 if any slot in the node has this tag set. * Otherwise returns 0. */ static inline int any_tag_set(const struct radix_tree_node *node, unsigned int tag) { unsigned idx; for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { if (node->tags[tag][idx]) return 1; } return 0; } static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) { bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); } /** * radix_tree_find_next_bit - find the next set bit in a memory region * * @node: where to begin the search * @tag: the tag index * @offset: the bitnumber to start searching at * * Unrollable variant of find_next_bit() for constant size arrays. * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. * Returns next bit offset, or size if nothing found. */ static __always_inline unsigned long radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, unsigned long offset) { const unsigned long *addr = node->tags[tag]; if (offset < RADIX_TREE_MAP_SIZE) { unsigned long tmp; addr += offset / BITS_PER_LONG; tmp = *addr >> (offset % BITS_PER_LONG); if (tmp) return __ffs(tmp) + offset; offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); while (offset < RADIX_TREE_MAP_SIZE) { tmp = *++addr; if (tmp) return __ffs(tmp) + offset; offset += BITS_PER_LONG; } } return RADIX_TREE_MAP_SIZE; } static unsigned int iter_offset(const struct radix_tree_iter *iter) { return iter->index & RADIX_TREE_MAP_MASK; } /* * The maximum index which can be stored in a radix tree */ static inline unsigned long shift_maxindex(unsigned int shift) { return (RADIX_TREE_MAP_SIZE << shift) - 1; } static inline unsigned long node_maxindex(const struct radix_tree_node *node) { return shift_maxindex(node->shift); } static unsigned long next_index(unsigned long index, const struct radix_tree_node *node, unsigned long offset) { return (index & ~node_maxindex(node)) + (offset << node->shift); } /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. */ static struct radix_tree_node * radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, struct radix_tree_root *root, unsigned int shift, unsigned int offset, unsigned int count, unsigned int nr_values) { struct radix_tree_node *ret = NULL; /* * Preload code isn't irq safe and it doesn't make sense to use * preloading during an interrupt anyway as all the allocations have * to be atomic. So just do normal allocation when in interrupt. */ if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { struct radix_tree_preload *rtp; /* * Even if the caller has preloaded, try to allocate from the * cache first for the new node to get accounted to the memory * cgroup. */ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask | __GFP_NOWARN); if (ret) goto out; /* * Provided the caller has preloaded here, we will always * succeed in getting a node here (and never reach * kmem_cache_alloc) */ rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; rtp->nodes = ret->parent; rtp->nr--; } /* * Update the allocation stack trace as this is more useful * for debugging. */ kmemleak_update_trace(ret); goto out; } ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); if (ret) { ret->shift = shift; ret->offset = offset; ret->count = count; ret->nr_values = nr_values; ret->parent = parent; ret->array = root; } return ret; } void radix_tree_node_rcu_free(struct rcu_head *head) { struct radix_tree_node *node = container_of(head, struct radix_tree_node, rcu_head); /* * Must only free zeroed nodes into the slab. We can be left with * non-NULL entries by radix_tree_free_nodes, so clear the entries * and tags here. */ memset(node->slots, 0, sizeof(node->slots)); memset(node->tags, 0, sizeof(node->tags)); INIT_LIST_HEAD(&node->private_list); kmem_cache_free(radix_tree_node_cachep, node); } static inline void radix_tree_node_free(struct radix_tree_node *node) { call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) { struct radix_tree_preload *rtp; struct radix_tree_node *node; int ret = -ENOMEM; /* * Nodes preloaded by one cgroup can be used by another cgroup, so * they should never be accounted to any particular memory cgroup. */ gfp_mask &= ~__GFP_ACCOUNT; local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; rtp->nodes = node; rtp->nr++; } else { kmem_cache_free(radix_tree_node_cachep, node); } } ret = 0; out: return ret; } /* * Load up this CPU's radix_tree_node buffer with sufficient objects to * ensure that the addition of a single element in the tree cannot fail. On * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. * * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ int radix_tree_preload(gfp_t gfp_mask) { /* Warn on non-sensical use... */ WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); } EXPORT_SYMBOL(radix_tree_preload); /* * The same as above function, except we don't guarantee preloading happens. * We do it, if we decide it helps. On success, return zero with preemption * disabled. On error, return -ENOMEM with preemption not disabled. */ int radix_tree_maybe_preload(gfp_t gfp_mask) { if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ local_lock(&radix_tree_preloads.lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); static unsigned radix_tree_load_root(const struct radix_tree_root *root, struct radix_tree_node **nodep, unsigned long *maxindex) { struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); *nodep = node; if (likely(radix_tree_is_internal_node(node))) { node = entry_to_node(node); *maxindex = node_maxindex(node); return node->shift + RADIX_TREE_MAP_SHIFT; } *maxindex = 0; return 0; } /* * Extend a radix tree so it can store key @index. */ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, unsigned long index, unsigned int shift) { void *entry; unsigned int maxshift; int tag; /* Figure out what the shift should be. */ maxshift = shift; while (index > shift_maxindex(maxshift)) maxshift += RADIX_TREE_MAP_SHIFT; entry = rcu_dereference_raw(root->xa_head); if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) goto out; do { struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, root, shift, 0, 1, 0); if (!node) return -ENOMEM; if (is_idr(root)) { all_tag_set(node, IDR_FREE); if (!root_tag_get(root, IDR_FREE)) { tag_clear(node, IDR_FREE, 0); root_tag_set(root, IDR_FREE); } } else { /* Propagate the aggregated tag info to the new child */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (root_tag_get(root, tag)) tag_set(node, tag, 0); } } BUG_ON(shift > BITS_PER_LONG); if (radix_tree_is_internal_node(entry)) { entry_to_node(entry)->parent = node; } else if (xa_is_value(entry)) { /* Moving a value entry root->xa_head to a node */ node->nr_values = 1; } /* * entry was already in the radix tree, so we do not need * rcu_assign_pointer here */ node->slots[0] = (void __rcu *)entry; entry = node_to_entry(node); rcu_assign_pointer(root->xa_head, entry); shift += RADIX_TREE_MAP_SHIFT; } while (shift <= maxshift); out: return maxshift + RADIX_TREE_MAP_SHIFT; } /** * radix_tree_shrink - shrink radix tree to minimum height * @root: radix tree root */ static inline bool radix_tree_shrink(struct radix_tree_root *root) { bool shrunk = false; for (;;) { struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); struct radix_tree_node *child; if (!radix_tree_is_internal_node(node)) break; node = entry_to_node(node); /* * The candidate node has more than one child, or its child * is not at the leftmost slot, we cannot shrink. */ if (node->count != 1) break; child = rcu_dereference_raw(node->slots[0]); if (!child) break; /* * For an IDR, we must not shrink entry 0 into the root in * case somebody calls idr_replace() with a pointer that * appears to be an internal entry */ if (!node->shift && is_idr(root)) break; if (radix_tree_is_internal_node(child)) entry_to_node(child)->parent = NULL; /* * We don't need rcu_assign_pointer(), since we are simply * moving the node from one part of the tree to another: if it * was safe to dereference the old pointer to it * (node->slots[0]), it will be safe to dereference the new * one (root->xa_head) as far as dependent read barriers go. */ root->xa_head = (void __rcu *)child; if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) root_tag_clear(root, IDR_FREE); /* * We have a dilemma here. The node's slot[0] must not be * NULLed in case there are concurrent lookups expecting to * find the item. However if this was a bottom-level node, * then it may be subject to the slot pointer being visible * to callers dereferencing it. If item corresponding to * slot[0] is subsequently deleted, these callers would expect * their slot to become empty sooner or later. * * For example, lockless pagecache will look up a slot, deref * the page pointer, and if the page has 0 refcount it means it * was concurrently deleted from pagecache so try the deref * again. Fortunately there is already a requirement for logic * to retry the entire slot lookup -- the indirect pointer * problem (replacing direct root node with an indirect pointer * also results in a stale slot). So tag the slot as indirect * to force callers to retry. */ node->count = 0; if (!radix_tree_is_internal_node(child)) { node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); shrunk = true; } return shrunk; } static bool delete_node(struct radix_tree_root *root, struct radix_tree_node *node) { bool deleted = false; do { struct radix_tree_node *parent; if (node->count) { if (node_to_entry(node) == rcu_dereference_raw(root->xa_head)) deleted |= radix_tree_shrink(root); return deleted; } parent = node->parent; if (parent) { parent->slots[node->offset] = NULL; parent->count--; } else { /* * Shouldn't the tags already have all been cleared * by the caller? */ if (!is_idr(root)) root_tag_clear_all(root); root->xa_head = NULL; } WARN_ON_ONCE(!list_empty(&node->private_list)); radix_tree_node_free(node); deleted = true; node = parent; } while (node); return deleted; } /** * __radix_tree_create - create a slot in a radix tree * @root: radix tree root * @index: index key * @nodep: returns node * @slotp: returns slot * * Create, if necessary, and return the node and slot for an item * at position @index in the radix tree @root. * * Until there is more than one item in the tree, no nodes are * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. * * Returns -ENOMEM, or 0 for success. */ static int __radix_tree_create(struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp) { struct radix_tree_node *node = NULL, *child; void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex; unsigned int shift, offset = 0; unsigned long max = index; gfp_t gfp = root_gfp_mask(root); shift = radix_tree_load_root(root, &child, &maxindex); /* Make sure the tree is high enough. */ if (max > maxindex) { int error = radix_tree_extend(root, gfp, max, shift); if (error < 0) return error; shift = error; child = rcu_dereference_raw(root->xa_head); } while (shift > 0) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return -ENOMEM; rcu_assign_pointer(*slot, node_to_entry(child)); if (node) node->count++; } else if (!radix_tree_is_internal_node(child)) break; /* Go a level down */ node = entry_to_node(child); offset = radix_tree_descend(node, &child, index); slot = &node->slots[offset]; } if (nodep) *nodep = node; if (slotp) *slotp = slot; return 0; } /* * Free any nodes below this node. The tree is presumed to not need * shrinking, and any user data in the tree is presumed to not need a * destructor called on it. If we need to add a destructor, we can * add that functionality later. Note that we may not clear tags or * slots from the tree as an RCU walker may still have a pointer into * this subtree. We could replace the entries with RADIX_TREE_RETRY, * but we'll still have to clear those in rcu_free. */ static void radix_tree_free_nodes(struct radix_tree_node *node) { unsigned offset = 0; struct radix_tree_node *child = entry_to_node(node); for (;;) { void *entry = rcu_dereference_raw(child->slots[offset]); if (xa_is_node(entry) && child->shift) { child = entry_to_node(entry); offset = 0; continue; } offset++; while (offset == RADIX_TREE_MAP_SIZE) { struct radix_tree_node *old = child; offset = child->offset + 1; child = child->parent; WARN_ON_ONCE(!list_empty(&old->private_list)); radix_tree_node_free(old); if (old == entry_to_node(node)) return; } } } static inline int insert_entries(struct radix_tree_node *node, void __rcu **slot, void *item) { if (*slot) return -EEXIST; rcu_assign_pointer(*slot, item); if (node) { node->count++; if (xa_is_value(item)) node->nr_values++; } return 1; } /** * radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key * @item: item to insert * * Insert an item into the radix tree at position @index. */ int radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node; void __rcu **slot; int error; BUG_ON(radix_tree_is_internal_node(item)); error = __radix_tree_create(root, index, &node, &slot); if (error) return error; error = insert_entries(node, slot, item); if (error < 0) return error; if (node) { unsigned offset = get_slot_offset(node, slot); BUG_ON(tag_get(node, 0, offset)); BUG_ON(tag_get(node, 1, offset)); BUG_ON(tag_get(node, 2, offset)); } else { BUG_ON(root_tags_get(root)); } return 0; } EXPORT_SYMBOL(radix_tree_insert); /** * __radix_tree_lookup - lookup an item in a radix tree * @root: radix tree root * @index: index key * @nodep: returns node * @slotp: returns slot * * Lookup and return the item at position @index in the radix * tree @root. * * Until there is more than one item in the tree, no nodes are * allocated and @root->xa_head is used as a direct slot instead of * pointing to a node, in which case *@nodep will be NULL. */ void *__radix_tree_lookup(const struct radix_tree_root *root, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp) { struct radix_tree_node *node, *parent; unsigned long maxindex; void __rcu **slot; restart: parent = NULL; slot = (void __rcu **)&root->xa_head; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); slot = parent->slots + offset; if (node == RADIX_TREE_RETRY) goto restart; if (parent->shift == 0) break; } if (nodep) *nodep = parent; if (slotp) *slotp = slot; return node; } /** * radix_tree_lookup_slot - lookup a slot in a radix tree * @root: radix tree root * @index: index key * * Returns: the slot corresponding to the position @index in the * radix tree @root. This is useful for update-if-exists operations. * * This function can be called under rcu_read_lock iff the slot is not * modified by radix_tree_replace_slot, otherwise it must be called * exclusive from other writers. Any dereference of the slot must be done * using radix_tree_deref_slot. */ void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, unsigned long index) { void __rcu **slot; if (!__radix_tree_lookup(root, index, NULL, &slot)) return NULL; return slot; } EXPORT_SYMBOL(radix_tree_lookup_slot); /** * radix_tree_lookup - perform lookup operation on a radix tree * @root: radix tree root * @index: index key * * Lookup the item at the position @index in the radix tree @root. * * This function can be called under rcu_read_lock, however the caller * must manage lifetimes of leaf nodes (eg. RCU may also be used to free * them safely). No RCU barriers are required to access or modify the * returned item, however. */ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) { return __radix_tree_lookup(root, index, NULL, NULL); } EXPORT_SYMBOL(radix_tree_lookup); static void replace_slot(void __rcu **slot, void *item, struct radix_tree_node *node, int count, int values) { if (node && (count || values)) { node->count += count; node->nr_values += values; } rcu_assign_pointer(*slot, item); } static bool node_tag_get(const struct radix_tree_root *root, const struct radix_tree_node *node, unsigned int tag, unsigned int offset) { if (node) return tag_get(node, tag, offset); return root_tag_get(root, tag); } /* * IDR users want to be able to store NULL in the tree, so if the slot isn't * free, don't adjust the count, even if it's transitioning between NULL and * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still * have empty bits, but it only stores NULL in slots when they're being * deleted. */ static int calculate_count(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot, void *item, void *old) { if (is_idr(root)) { unsigned offset = get_slot_offset(node, slot); bool free = node_tag_get(root, node, IDR_FREE, offset); if (!free) return 0; if (!old) return 1; } return !!item - !!old; } /** * __radix_tree_replace - replace item in a slot * @root: radix tree root * @node: pointer to tree node * @slot: pointer to slot in @node * @item: new item to store in the slot. * * For use with __radix_tree_lookup(). Caller must hold tree write locked * across slot lookup and replacement. */ void __radix_tree_replace(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot, void *item) { void *old = rcu_dereference_raw(*slot); int values = !!xa_is_value(item) - !!xa_is_value(old); int count = calculate_count(root, node, slot, item, old); /* * This function supports replacing value entries and * deleting entries, but that needs accounting against the * node unless the slot is root->xa_head. */ WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && (count || values)); replace_slot(slot, item, node, count, values); if (!node) return; delete_node(root, node); } /** * radix_tree_replace_slot - replace item in a slot * @root: radix tree root * @slot: pointer to slot * @item: new item to store in the slot. * * For use with radix_tree_lookup_slot() and * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked * across slot lookup and replacement. * * NOTE: This cannot be used to switch between non-entries (empty slots), * regular entries, and value entries, as that requires accounting * inside the radix tree node. When switching from one type of entry or * deleting, use __radix_tree_lookup() and __radix_tree_replace() or * radix_tree_iter_replace(). */ void radix_tree_replace_slot(struct radix_tree_root *root, void __rcu **slot, void *item) { __radix_tree_replace(root, NULL, slot, item); } EXPORT_SYMBOL(radix_tree_replace_slot); /** * radix_tree_iter_replace - replace item in a slot * @root: radix tree root * @iter: iterator state * @slot: pointer to slot * @item: new item to store in the slot. * * For use with radix_tree_for_each_slot(). * Caller must hold tree write locked. */ void radix_tree_iter_replace(struct radix_tree_root *root, const struct radix_tree_iter *iter, void __rcu **slot, void *item) { __radix_tree_replace(root, iter->node, slot, item); } static void node_tag_set(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) { while (node) { if (tag_get(node, tag, offset)) return; tag_set(node, tag, offset); offset = node->offset; node = node->parent; } if (!root_tag_get(root, tag)) root_tag_set(root, tag); } /** * radix_tree_tag_set - set a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index * * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) * corresponding to @index in the radix tree. From * the root all the way down to the leaf node. * * Returns the address of the tagged item. Setting a tag on a not-present * item is a bug. */ void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; radix_tree_load_root(root, &node, &maxindex); BUG_ON(index > maxindex); while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); BUG_ON(!node); if (!tag_get(parent, tag, offset)) tag_set(parent, tag, offset); } /* set the root's tag bit */ if (!root_tag_get(root, tag)) root_tag_set(root, tag); return node; } EXPORT_SYMBOL(radix_tree_tag_set); static void node_tag_clear(struct radix_tree_root *root, struct radix_tree_node *node, unsigned int tag, unsigned int offset) { while (node) { if (!tag_get(node, tag, offset)) return; tag_clear(node, tag, offset); if (any_tag_set(node, tag)) return; offset = node->offset; node = node->parent; } /* clear the root's tag bit */ if (root_tag_get(root, tag)) root_tag_clear(root, tag); } /** * radix_tree_tag_clear - clear a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index * * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) * corresponding to @index in the radix tree. If this causes * the leaf node to have no tags set then clear the tag in the * next-to-leaf node, etc. * * Returns the address of the tagged item on success, else NULL. ie: * has the same return value and semantics as radix_tree_lookup(). */ void *radix_tree_tag_clear(struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; int offset = 0; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return NULL; parent = NULL; while (radix_tree_is_internal_node(node)) { parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); } if (node) node_tag_clear(root, parent, tag, offset); return node; } EXPORT_SYMBOL(radix_tree_tag_clear); /** * radix_tree_iter_tag_clear - clear a tag on the current iterator entry * @root: radix tree root * @iter: iterator state * @tag: tag to clear */ void radix_tree_iter_tag_clear(struct radix_tree_root *root, const struct radix_tree_iter *iter, unsigned int tag) { node_tag_clear(root, iter->node, tag, iter_offset(iter)); } /** * radix_tree_tag_get - get a tag on a radix tree node * @root: radix tree root * @index: index key * @tag: tag index (< RADIX_TREE_MAX_TAGS) * * Return values: * * 0: tag not present or not set * 1: tag set * * Note that the return value of this function may not be relied on, even if * the RCU lock is held, unless tag modification and node deletion are excluded * from concurrency. */ int radix_tree_tag_get(const struct radix_tree_root *root, unsigned long index, unsigned int tag) { struct radix_tree_node *node, *parent; unsigned long maxindex; if (!root_tag_get(root, tag)) return 0; radix_tree_load_root(root, &node, &maxindex); if (index > maxindex) return 0; while (radix_tree_is_internal_node(node)) { unsigned offset; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); if (!tag_get(parent, tag, offset)) return 0; if (node == RADIX_TREE_RETRY) break; } return 1; } EXPORT_SYMBOL(radix_tree_tag_get); /* Construct iter->tags bit-mask from node->tags[tag] array */ static void set_iter_tags(struct radix_tree_iter *iter, struct radix_tree_node *node, unsigned offset, unsigned tag) { unsigned tag_long = offset / BITS_PER_LONG; unsigned tag_bit = offset % BITS_PER_LONG; if (!node) { iter->tags = 1; return; } iter->tags = node->tags[tag][tag_long] >> tag_bit; /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ if (tag_long < RADIX_TREE_TAG_LONGS - 1) { /* Pick tags from next element */ if (tag_bit) iter->tags |= node->tags[tag][tag_long + 1] << (BITS_PER_LONG - tag_bit); /* Clip chunk size, here only BITS_PER_LONG tags */ iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); } } void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; return NULL; } EXPORT_SYMBOL(radix_tree_iter_resume); /** * radix_tree_next_chunk - find next chunk of slots for iteration * * @root: radix tree root * @iter: iterator state * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if iteration is over */ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags) { unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; struct radix_tree_node *node, *child; unsigned long index, offset, maxindex; if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) return NULL; /* * Catch next_index overflow after ~0UL. iter->index never overflows * during iterating; it can be zero only at the beginning. * And we cannot overflow iter->next_index in a single step, * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. * * This condition also used by radix_tree_next_slot() to stop * contiguous iterating, and forbid switching to the next chunk. */ index = iter->next_index; if (!index && iter->index) return NULL; restart: radix_tree_load_root(root, &child, &maxindex); if (index > maxindex) return NULL; if (!child) return NULL; if (!radix_tree_is_internal_node(child)) { /* Single-slot tree */ iter->index = index; iter->next_index = maxindex + 1; iter->tags = 1; iter->node = NULL; return (void __rcu **)&root->xa_head; } do { node = entry_to_node(child); offset = radix_tree_descend(node, &child, index); if ((flags & RADIX_TREE_ITER_TAGGED) ? !tag_get(node, tag, offset) : !child) { /* Hole detected */ if (flags & RADIX_TREE_ITER_CONTIG) return NULL; if (flags & RADIX_TREE_ITER_TAGGED) offset = radix_tree_find_next_bit(node, tag, offset + 1); else while (++offset < RADIX_TREE_MAP_SIZE) { void *slot = rcu_dereference_raw( node->slots[offset]); if (slot) break; } index &= ~node_maxindex(node); index += offset << node->shift; /* Overflow after ~0UL */ if (!index) return NULL; if (offset == RADIX_TREE_MAP_SIZE) goto restart; child = rcu_dereference_raw(node->slots[offset]); } if (!child) goto restart; if (child == RADIX_TREE_RETRY) break; } while (node->shift && radix_tree_is_internal_node(child)); /* Update the iterator state */ iter->index = (index &~ node_maxindex(node)) | offset; iter->next_index = (index | node_maxindex(node)) + 1; iter->node = node; if (flags & RADIX_TREE_ITER_TAGGED) set_iter_tags(iter, node, offset, tag); return node->slots + offset; } EXPORT_SYMBOL(radix_tree_next_chunk); /** * radix_tree_gang_lookup - perform multiple lookup on a radix tree * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * * Performs an index-ascending scan of the tree for present items. Places * them at *@results and returns the number of items which were placed at * *@results. * * The implementation is naive. * * Like radix_tree_lookup, radix_tree_gang_lookup may be called under * rcu_read_lock. In this case, rather than the returned results being * an atomic snapshot of the tree at a single point in time, the * semantics of an RCU protected gang lookup are as though multiple * radix_tree_lookups have been issued in individual locks, and results * stored in 'results'. */ unsigned int radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_slot(slot, root, &iter, first_index) { results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; if (radix_tree_is_internal_node(results[ret])) { slot = radix_tree_iter_retry(&iter); continue; } if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup); /** * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree * based on a tag * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the items at *@results and * returns the number of items which were placed at *@results. */ unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; if (radix_tree_is_internal_node(results[ret])) { slot = radix_tree_iter_retry(&iter); continue; } if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_tag); /** * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a * radix tree based on a tag * @root: radix tree root * @results: where the results of the lookup are placed * @first_index: start the lookup from this key * @max_items: place up to this many items at *results * @tag: the tag index (< RADIX_TREE_MAX_TAGS) * * Performs an index-ascending scan of the tree for present items which * have the tag indexed by @tag set. Places the slots at *@results and * returns the number of slots which were placed at *@results. */ unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, void __rcu ***results, unsigned long first_index, unsigned int max_items, unsigned int tag) { struct radix_tree_iter iter; void __rcu **slot; unsigned int ret = 0; if (unlikely(!max_items)) return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { results[ret] = slot; if (++ret == max_items) break; } return ret; } EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); static bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); int values = xa_is_value(old) ? -1 : 0; unsigned offset = get_slot_offset(node, slot); int tag; if (is_idr(root)) node_tag_set(root, node, IDR_FREE, offset); else for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) node_tag_clear(root, node, tag, offset); replace_slot(slot, NULL, node, -1, values); return node && delete_node(root, node); } /** * radix_tree_iter_delete - delete the entry at this iterator position * @root: radix tree root * @iter: iterator state * @slot: pointer to slot * * Delete the entry at the position currently pointed to by the iterator. * This may result in the current node being freed; if it is, the iterator * is advanced so that it will not reference the freed memory. This * function may be called without any locking if there are no other threads * which can access this tree. */ void radix_tree_iter_delete(struct radix_tree_root *root, struct radix_tree_iter *iter, void __rcu **slot) { if (__radix_tree_delete(root, iter->node, slot)) iter->index = iter->next_index; } EXPORT_SYMBOL(radix_tree_iter_delete); /** * radix_tree_delete_item - delete an item from a radix tree * @root: radix tree root * @index: index key * @item: expected item * * Remove @item at @index from the radix tree rooted at @root. * * Return: the deleted entry, or %NULL if it was not present * or the entry at the given @index was not @item. */ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node = NULL; void __rcu **slot = NULL; void *entry; entry = __radix_tree_lookup(root, index, &node, &slot); if (!slot) return NULL; if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, get_slot_offset(node, slot)))) return NULL; if (item && entry != item) return NULL; __radix_tree_delete(root, node, slot); return entry; } EXPORT_SYMBOL(radix_tree_delete_item); /** * radix_tree_delete - delete an entry from a radix tree * @root: radix tree root * @index: index key * * Remove the entry at @index from the radix tree rooted at @root. * * Return: The deleted entry, or %NULL if it was not present. */ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) { return radix_tree_delete_item(root, index, NULL); } EXPORT_SYMBOL(radix_tree_delete); /** * radix_tree_tagged - test whether any items in the tree are tagged * @root: radix tree root * @tag: tag to test */ int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) { return root_tag_get(root, tag); } EXPORT_SYMBOL(radix_tree_tagged); /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading * * Preallocate memory to use for the next call to idr_alloc(). This function * returns with preemption disabled. It will be enabled by idr_preload_end(). */ void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) local_lock(&radix_tree_preloads.lock); } EXPORT_SYMBOL(idr_preload); void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max) { struct radix_tree_node *node = NULL, *child; void __rcu **slot = (void __rcu **)&root->xa_head; unsigned long maxindex, start = iter->next_index; unsigned int shift, offset = 0; grow: shift = radix_tree_load_root(root, &child, &maxindex); if (!radix_tree_tagged(root, IDR_FREE)) start = max(start, maxindex + 1); if (start > max) return ERR_PTR(-ENOSPC); if (start > maxindex) { int error = radix_tree_extend(root, gfp, start, shift); if (error < 0) return ERR_PTR(error); shift = error; child = rcu_dereference_raw(root->xa_head); } if (start == 0 && shift == 0) shift = RADIX_TREE_MAP_SHIFT; while (shift) { shift -= RADIX_TREE_MAP_SHIFT; if (child == NULL) { /* Have to add a child node. */ child = radix_tree_node_alloc(gfp, node, root, shift, offset, 0, 0); if (!child) return ERR_PTR(-ENOMEM); all_tag_set(child, IDR_FREE); rcu_assign_pointer(*slot, node_to_entry(child)); if (node) node->count++; } else if (!radix_tree_is_internal_node(child)) break; node = entry_to_node(child); offset = radix_tree_descend(node, &child, start); if (!tag_get(node, IDR_FREE, offset)) { offset = radix_tree_find_next_bit(node, IDR_FREE, offset + 1); start = next_index(start, node, offset); if (start > max || start == 0) return ERR_PTR(-ENOSPC); while (offset == RADIX_TREE_MAP_SIZE) { offset = node->offset + 1; node = node->parent; if (!node) goto grow; shift = node->shift; } child = rcu_dereference_raw(node->slots[offset]); } slot = &node->slots[offset]; } iter->index = start; if (node) iter->next_index = 1 + min(max, (start | node_maxindex(node))); else iter->next_index = 1; iter->node = node; set_iter_tags(iter, node, offset, IDR_FREE); return slot; } /** * idr_destroy - release all internal memory from an IDR * @idr: idr handle * * After this function is called, the IDR is empty, and may be reused or * the data structure containing it may be freed. * * A typical clean-up sequence for objects stored in an idr tree will use * idr_for_each() to free all objects, if necessary, then idr_destroy() to * free the memory used to keep track of those objects. */ void idr_destroy(struct idr *idr) { struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); if (radix_tree_is_internal_node(node)) radix_tree_free_nodes(node); idr->idr_rt.xa_head = NULL; root_tag_set(&idr->idr_rt, IDR_FREE); } EXPORT_SYMBOL(idr_destroy); static void radix_tree_node_ctor(void *arg) { struct radix_tree_node *node = arg; memset(node, 0, sizeof(*node)); INIT_LIST_HEAD(&node->private_list); } static int radix_tree_cpu_dead(unsigned int cpu) { struct radix_tree_preload *rtp; struct radix_tree_node *node; /* Free per-cpu pool of preloaded nodes */ rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { node = rtp->nodes; rtp->nodes = node->parent; kmem_cache_free(radix_tree_node_cachep, node); rtp->nr--; } return 0; } void __init radix_tree_init(void) { int ret; BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); BUILD_BUG_ON(XA_CHUNK_SIZE > 255); radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", NULL, radix_tree_cpu_dead); WARN_ON(ret < 0); }
linux-master
lib/radix-tree.c
// SPDX-License-Identifier: GPL-2.0-only /* * lib/debug_locks.c * * Generic place for common debugging facilities for various locks: * spinlocks, rwlocks, mutexes and rwsems. * * Started by Ingo Molnar: * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <[email protected]> */ #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/debug_locks.h> /* * We want to turn all lock-debugging facilities on/off at once, * via a global flag. The reason is that once a single bug has been * detected and reported, there might be cascade of followup bugs * that would just muddy the log. So we report the first one and * shut up after that. */ int debug_locks __read_mostly = 1; EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses <debug_locks_silent> to get a * 'silent failure': nothing is printed to the console when * a locking bug is detected. */ int debug_locks_silent __read_mostly; EXPORT_SYMBOL_GPL(debug_locks_silent); /* * Generic 'turn off all lock debugging' function: */ int debug_locks_off(void) { if (debug_locks && __debug_locks_off()) { if (!debug_locks_silent) { console_verbose(); return 1; } } return 0; } EXPORT_SYMBOL_GPL(debug_locks_off);
linux-master
lib/debug_locks.c
// SPDX-License-Identifier: GPL-2.0-only /* * Test cases for bitmap API. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitmap.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> #include "../tools/testing/selftests/kselftest_module.h" #define EXP1_IN_BITS (sizeof(exp1) * 8) KSTM_MODULE_GLOBALS(); static char pbl_buffer[PAGE_SIZE] __initdata; static char print_buf[PAGE_SIZE * 2] __initdata; static const unsigned long exp1[] __initconst = { BITMAP_FROM_U64(1), BITMAP_FROM_U64(2), BITMAP_FROM_U64(0x0000ffff), BITMAP_FROM_U64(0xffff0000), BITMAP_FROM_U64(0x55555555), BITMAP_FROM_U64(0xaaaaaaaa), BITMAP_FROM_U64(0x11111111), BITMAP_FROM_U64(0x22222222), BITMAP_FROM_U64(0xffffffff), BITMAP_FROM_U64(0xfffffffe), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0xffffffff77777777ULL), BITMAP_FROM_U64(0), BITMAP_FROM_U64(0x00008000), BITMAP_FROM_U64(0x80000000), }; static const unsigned long exp2[] __initconst = { BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0xffffffff77777777ULL), }; /* Fibonacci sequence */ static const unsigned long exp2_to_exp3_mask[] __initconst = { BITMAP_FROM_U64(0x008000020020212eULL), }; /* exp3_0_1 = (exp2[0] & ~exp2_to_exp3_mask) | (exp2[1] & exp2_to_exp3_mask) */ static const unsigned long exp3_0_1[] __initconst = { BITMAP_FROM_U64(0x33b3333311313137ULL), }; /* exp3_1_0 = (exp2[1] & ~exp2_to_exp3_mask) | (exp2[0] & exp2_to_exp3_mask) */ static const unsigned long exp3_1_0[] __initconst = { BITMAP_FROM_U64(0xff7fffff77575751ULL), }; static bool __init __check_eq_uint(const char *srcfile, unsigned int line, const unsigned int exp_uint, unsigned int x) { if (exp_uint != x) { pr_err("[%s:%u] expected %u, got %u\n", srcfile, line, exp_uint, x); return false; } return true; } static bool __init __check_eq_bitmap(const char *srcfile, unsigned int line, const unsigned long *exp_bmap, const unsigned long *bmap, unsigned int nbits) { if (!bitmap_equal(exp_bmap, bmap, nbits)) { pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", srcfile, line, nbits, exp_bmap, nbits, bmap); return false; } return true; } static bool __init __check_eq_pbl(const char *srcfile, unsigned int line, const char *expected_pbl, const unsigned long *bitmap, unsigned int nbits) { snprintf(pbl_buffer, sizeof(pbl_buffer), "%*pbl", nbits, bitmap); if (strcmp(expected_pbl, pbl_buffer)) { pr_warn("[%s:%u] expected \"%s\", got \"%s\"\n", srcfile, line, expected_pbl, pbl_buffer); return false; } return true; } static bool __init __check_eq_u32_array(const char *srcfile, unsigned int line, const u32 *exp_arr, unsigned int exp_len, const u32 *arr, unsigned int len) __used; static bool __init __check_eq_u32_array(const char *srcfile, unsigned int line, const u32 *exp_arr, unsigned int exp_len, const u32 *arr, unsigned int len) { if (exp_len != len) { pr_warn("[%s:%u] array length differ: expected %u, got %u\n", srcfile, line, exp_len, len); return false; } if (memcmp(exp_arr, arr, len*sizeof(*arr))) { pr_warn("[%s:%u] array contents differ\n", srcfile, line); print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET, 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false); print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET, 32, 4, arr, len*sizeof(*arr), false); return false; } return true; } static bool __init __check_eq_clump8(const char *srcfile, unsigned int line, const unsigned int offset, const unsigned int size, const unsigned char *const clump_exp, const unsigned long *const clump) { unsigned long exp; if (offset >= size) { pr_warn("[%s:%u] bit offset for clump out-of-bounds: expected less than %u, got %u\n", srcfile, line, size, offset); return false; } exp = clump_exp[offset / 8]; if (!exp) { pr_warn("[%s:%u] bit offset for zero clump: expected nonzero clump, got bit offset %u with clump value 0", srcfile, line, offset); return false; } if (*clump != exp) { pr_warn("[%s:%u] expected clump value of 0x%lX, got clump value of 0x%lX", srcfile, line, exp, *clump); return false; } return true; } static bool __init __check_eq_str(const char *srcfile, unsigned int line, const char *exp_str, const char *str, unsigned int len) { bool eq; eq = strncmp(exp_str, str, len) == 0; if (!eq) pr_err("[%s:%u] expected %s, got %s\n", srcfile, line, exp_str, str); return eq; } #define __expect_eq(suffix, ...) \ ({ \ int result = 0; \ total_tests++; \ if (!__check_eq_ ## suffix(__FILE__, __LINE__, \ ##__VA_ARGS__)) { \ failed_tests++; \ result = 1; \ } \ result; \ }) #define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) #define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__) #define expect_eq_str(...) __expect_eq(str, ##__VA_ARGS__) static void __init test_zero_clear(void) { DECLARE_BITMAP(bmap, 1024); /* Known way to set all bits */ memset(bmap, 0xff, 128); expect_eq_pbl("0-22", bmap, 23); expect_eq_pbl("0-1023", bmap, 1024); /* single-word bitmaps */ bitmap_clear(bmap, 0, 9); expect_eq_pbl("9-1023", bmap, 1024); bitmap_zero(bmap, 35); expect_eq_pbl("64-1023", bmap, 1024); /* cross boundaries operations */ bitmap_clear(bmap, 79, 19); expect_eq_pbl("64-78,98-1023", bmap, 1024); bitmap_zero(bmap, 115); expect_eq_pbl("128-1023", bmap, 1024); /* Zeroing entire area */ bitmap_zero(bmap, 1024); expect_eq_pbl("", bmap, 1024); } static void __init test_find_nth_bit(void) { unsigned long b, bit, cnt = 0; DECLARE_BITMAP(bmap, 64 * 3); bitmap_zero(bmap, 64 * 3); __set_bit(10, bmap); __set_bit(20, bmap); __set_bit(30, bmap); __set_bit(40, bmap); __set_bit(50, bmap); __set_bit(60, bmap); __set_bit(80, bmap); __set_bit(123, bmap); expect_eq_uint(10, find_nth_bit(bmap, 64 * 3, 0)); expect_eq_uint(20, find_nth_bit(bmap, 64 * 3, 1)); expect_eq_uint(30, find_nth_bit(bmap, 64 * 3, 2)); expect_eq_uint(40, find_nth_bit(bmap, 64 * 3, 3)); expect_eq_uint(50, find_nth_bit(bmap, 64 * 3, 4)); expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5)); expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6)); expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7)); expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8)); expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0)); expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1)); expect_eq_uint(30, find_nth_bit(bmap, 64 * 3 - 1, 2)); expect_eq_uint(40, find_nth_bit(bmap, 64 * 3 - 1, 3)); expect_eq_uint(50, find_nth_bit(bmap, 64 * 3 - 1, 4)); expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5)); expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6)); expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7)); expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8)); for_each_set_bit(bit, exp1, EXP1_IN_BITS) { b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++); expect_eq_uint(b, bit); } } static void __init test_fill_set(void) { DECLARE_BITMAP(bmap, 1024); /* Known way to clear all bits */ memset(bmap, 0x00, 128); expect_eq_pbl("", bmap, 23); expect_eq_pbl("", bmap, 1024); /* single-word bitmaps */ bitmap_set(bmap, 0, 9); expect_eq_pbl("0-8", bmap, 1024); bitmap_fill(bmap, 35); expect_eq_pbl("0-63", bmap, 1024); /* cross boundaries operations */ bitmap_set(bmap, 79, 19); expect_eq_pbl("0-63,79-97", bmap, 1024); bitmap_fill(bmap, 115); expect_eq_pbl("0-127", bmap, 1024); /* Zeroing entire area */ bitmap_fill(bmap, 1024); expect_eq_pbl("0-1023", bmap, 1024); } static void __init test_copy(void) { DECLARE_BITMAP(bmap1, 1024); DECLARE_BITMAP(bmap2, 1024); bitmap_zero(bmap1, 1024); bitmap_zero(bmap2, 1024); /* single-word bitmaps */ bitmap_set(bmap1, 0, 19); bitmap_copy(bmap2, bmap1, 23); expect_eq_pbl("0-18", bmap2, 1024); bitmap_set(bmap2, 0, 23); bitmap_copy(bmap2, bmap1, 23); expect_eq_pbl("0-18", bmap2, 1024); /* multi-word bitmaps */ bitmap_set(bmap1, 0, 109); bitmap_copy(bmap2, bmap1, 1024); expect_eq_pbl("0-108", bmap2, 1024); bitmap_fill(bmap2, 1024); bitmap_copy(bmap2, bmap1, 1024); expect_eq_pbl("0-108", bmap2, 1024); /* the following tests assume a 32- or 64-bit arch (even 128b * if we care) */ bitmap_fill(bmap2, 1024); bitmap_copy(bmap2, bmap1, 109); /* ... but 0-padded til word length */ expect_eq_pbl("0-108,128-1023", bmap2, 1024); bitmap_fill(bmap2, 1024); bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */ expect_eq_pbl("0-108,128-1023", bmap2, 1024); } #define EXP2_IN_BITS (sizeof(exp2) * 8) static void __init test_replace(void) { unsigned int nbits = 64; unsigned int nlongs = DIV_ROUND_UP(nbits, BITS_PER_LONG); DECLARE_BITMAP(bmap, 1024); BUILD_BUG_ON(EXP2_IN_BITS < nbits * 2); bitmap_zero(bmap, 1024); bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits); expect_eq_bitmap(bmap, exp3_0_1, nbits); bitmap_zero(bmap, 1024); bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits); expect_eq_bitmap(bmap, exp3_1_0, nbits); bitmap_fill(bmap, 1024); bitmap_replace(bmap, &exp2[0 * nlongs], &exp2[1 * nlongs], exp2_to_exp3_mask, nbits); expect_eq_bitmap(bmap, exp3_0_1, nbits); bitmap_fill(bmap, 1024); bitmap_replace(bmap, &exp2[1 * nlongs], &exp2[0 * nlongs], exp2_to_exp3_mask, nbits); expect_eq_bitmap(bmap, exp3_1_0, nbits); } #define PARSE_TIME 0x1 #define NO_LEN 0x2 struct test_bitmap_parselist{ const int errno; const char *in; const unsigned long *expected; const int nbits; const int flags; }; static const struct test_bitmap_parselist parselist_tests[] __initconst = { #define step (sizeof(u64) / sizeof(unsigned long)) {0, "0", &exp1[0], 8, 0}, {0, "1", &exp1[1 * step], 8, 0}, {0, "0-15", &exp1[2 * step], 32, 0}, {0, "16-31", &exp1[3 * step], 32, 0}, {0, "0-31:1/2", &exp1[4 * step], 32, 0}, {0, "1-31:1/2", &exp1[5 * step], 32, 0}, {0, "0-31:1/4", &exp1[6 * step], 32, 0}, {0, "1-31:1/4", &exp1[7 * step], 32, 0}, {0, "0-31:4/4", &exp1[8 * step], 32, 0}, {0, "1-31:4/4", &exp1[9 * step], 32, 0}, {0, "0-31:1/4,32-63:2/4", &exp1[10 * step], 64, 0}, {0, "0-31:3/4,32-63:4/4", &exp1[11 * step], 64, 0}, {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp1[11 * step], 64, 0}, {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0}, {0, "0-2047:128/256", NULL, 2048, PARSE_TIME}, {0, "", &exp1[12 * step], 8, 0}, {0, "\n", &exp1[12 * step], 8, 0}, {0, ",, ,, , , ,", &exp1[12 * step], 8, 0}, {0, " , ,, , , ", &exp1[12 * step], 8, 0}, {0, " , ,, , , \n", &exp1[12 * step], 8, 0}, {0, "0-0", &exp1[0], 32, 0}, {0, "1-1", &exp1[1 * step], 32, 0}, {0, "15-15", &exp1[13 * step], 32, 0}, {0, "31-31", &exp1[14 * step], 32, 0}, {0, "0-0:0/1", &exp1[12 * step], 32, 0}, {0, "0-0:1/1", &exp1[0], 32, 0}, {0, "0-0:1/31", &exp1[0], 32, 0}, {0, "0-0:31/31", &exp1[0], 32, 0}, {0, "1-1:1/1", &exp1[1 * step], 32, 0}, {0, "0-15:16/31", &exp1[2 * step], 32, 0}, {0, "15-15:1/2", &exp1[13 * step], 32, 0}, {0, "15-15:31/31", &exp1[13 * step], 32, 0}, {0, "15-31:1/31", &exp1[13 * step], 32, 0}, {0, "16-31:16/31", &exp1[3 * step], 32, 0}, {0, "31-31:31/31", &exp1[14 * step], 32, 0}, {0, "N-N", &exp1[14 * step], 32, 0}, {0, "0-0:1/N", &exp1[0], 32, 0}, {0, "0-0:N/N", &exp1[0], 32, 0}, {0, "0-15:16/N", &exp1[2 * step], 32, 0}, {0, "15-15:N/N", &exp1[13 * step], 32, 0}, {0, "15-N:1/N", &exp1[13 * step], 32, 0}, {0, "16-N:16/N", &exp1[3 * step], 32, 0}, {0, "N-N:N/N", &exp1[14 * step], 32, 0}, {0, "0-N:1/3,1-N:1/3,2-N:1/3", &exp1[8 * step], 32, 0}, {0, "0-31:1/3,1-31:1/3,2-31:1/3", &exp1[8 * step], 32, 0}, {0, "1-10:8/12,8-31:24/29,0-31:0/3", &exp1[9 * step], 32, 0}, {0, "all", &exp1[8 * step], 32, 0}, {0, "0, 1, all, ", &exp1[8 * step], 32, 0}, {0, "all:1/2", &exp1[4 * step], 32, 0}, {0, "ALL:1/2", &exp1[4 * step], 32, 0}, {-EINVAL, "al", NULL, 8, 0}, {-EINVAL, "alll", NULL, 8, 0}, {-EINVAL, "-1", NULL, 8, 0}, {-EINVAL, "-0", NULL, 8, 0}, {-EINVAL, "10-1", NULL, 8, 0}, {-ERANGE, "8-8", NULL, 8, 0}, {-ERANGE, "0-31", NULL, 8, 0}, {-EINVAL, "0-31:", NULL, 32, 0}, {-EINVAL, "0-31:0", NULL, 32, 0}, {-EINVAL, "0-31:0/", NULL, 32, 0}, {-EINVAL, "0-31:0/0", NULL, 32, 0}, {-EINVAL, "0-31:1/0", NULL, 32, 0}, {-EINVAL, "0-31:10/1", NULL, 32, 0}, {-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0}, {-EINVAL, "a-31", NULL, 8, 0}, {-EINVAL, "0-a1", NULL, 8, 0}, {-EINVAL, "a-31:10/1", NULL, 8, 0}, {-EINVAL, "0-31:a/1", NULL, 8, 0}, {-EINVAL, "0-\n", NULL, 8, 0}, }; static void __init test_bitmap_parselist(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) { #define ptest parselist_tests[i] time = ktime_get(); err = bitmap_parselist(ptest.in, bmap, ptest.nbits); time = ktime_get() - time; if (err != ptest.errno) { pr_err("parselist: %d: input is %s, errno is %d, expected %d\n", i, ptest.in, err, ptest.errno); failed_tests++; continue; } if (!err && ptest.expected && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) { pr_err("parselist: %d: input is %s, result is 0x%lx, expected 0x%lx\n", i, ptest.in, bmap[0], *ptest.expected); failed_tests++; continue; } if (ptest.flags & PARSE_TIME) pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", i, ptest.in, time); #undef ptest } } static void __init test_bitmap_printlist(void) { unsigned long *bmap = kmalloc(PAGE_SIZE, GFP_KERNEL); char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); char expected[256]; int ret, slen; ktime_t time; if (!buf || !bmap) goto out; memset(bmap, -1, PAGE_SIZE); slen = snprintf(expected, 256, "0-%ld", PAGE_SIZE * 8 - 1); if (slen < 0) goto out; time = ktime_get(); ret = bitmap_print_to_pagebuf(true, buf, bmap, PAGE_SIZE * 8); time = ktime_get() - time; if (ret != slen + 1) { pr_err("bitmap_print_to_pagebuf: result is %d, expected %d\n", ret, slen); failed_tests++; goto out; } if (strncmp(buf, expected, slen)) { pr_err("bitmap_print_to_pagebuf: result is %s, expected %s\n", buf, expected); failed_tests++; goto out; } pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); out: kfree(buf); kfree(bmap); } static const unsigned long parse_test[] __initconst = { BITMAP_FROM_U64(0), BITMAP_FROM_U64(1), BITMAP_FROM_U64(0xdeadbeef), BITMAP_FROM_U64(0x100000000ULL), }; static const unsigned long parse_test2[] __initconst = { BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xdeadbeef), BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0xbaadf00ddeadbeef), BITMAP_FROM_U64(0x100000000ULL), BITMAP_FROM_U64(0x0badf00ddeadbeef), }; static const struct test_bitmap_parselist parse_tests[] __initconst = { {0, "", &parse_test[0 * step], 32, 0}, {0, " ", &parse_test[0 * step], 32, 0}, {0, "0", &parse_test[0 * step], 32, 0}, {0, "0\n", &parse_test[0 * step], 32, 0}, {0, "1", &parse_test[1 * step], 32, 0}, {0, "deadbeef", &parse_test[2 * step], 32, 0}, {0, "1,0", &parse_test[3 * step], 33, 0}, {0, "deadbeef,\n,0,1", &parse_test[2 * step], 96, 0}, {0, "deadbeef,1,0", &parse_test2[0 * 2 * step], 96, 0}, {0, "baadf00d,deadbeef,1,0", &parse_test2[1 * 2 * step], 128, 0}, {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, 0}, {0, "badf00d,deadbeef,1,0", &parse_test2[2 * 2 * step], 124, NO_LEN}, {0, " badf00d,deadbeef,1,0 ", &parse_test2[2 * 2 * step], 124, 0}, {0, " , badf00d,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0}, {0, " , badf00d, ,, ,,deadbeef,1,0 , ", &parse_test2[2 * 2 * step], 124, 0}, {-EINVAL, "goodfood,deadbeef,1,0", NULL, 128, 0}, {-EOVERFLOW, "3,0", NULL, 33, 0}, {-EOVERFLOW, "123badf00d,deadbeef,1,0", NULL, 128, 0}, {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 90, 0}, {-EOVERFLOW, "fbadf00d,deadbeef,1,0", NULL, 95, 0}, {-EOVERFLOW, "badf00d,deadbeef,1,0", NULL, 100, 0}, #undef step }; static void __init test_bitmap_parse(void) { int i; int err; ktime_t time; DECLARE_BITMAP(bmap, 2048); for (i = 0; i < ARRAY_SIZE(parse_tests); i++) { struct test_bitmap_parselist test = parse_tests[i]; size_t len = test.flags & NO_LEN ? UINT_MAX : strlen(test.in); time = ktime_get(); err = bitmap_parse(test.in, len, bmap, test.nbits); time = ktime_get() - time; if (err != test.errno) { pr_err("parse: %d: input is %s, errno is %d, expected %d\n", i, test.in, err, test.errno); failed_tests++; continue; } if (!err && test.expected && !__bitmap_equal(bmap, test.expected, test.nbits)) { pr_err("parse: %d: input is %s, result is 0x%lx, expected 0x%lx\n", i, test.in, bmap[0], *test.expected); failed_tests++; continue; } if (test.flags & PARSE_TIME) pr_err("parse: %d: input is '%s' OK, Time: %llu\n", i, test.in, time); } } static void __init test_bitmap_arr32(void) { unsigned int nbits, next_bit; u32 arr[EXP1_IN_BITS / 32]; DECLARE_BITMAP(bmap2, EXP1_IN_BITS); memset(arr, 0xa5, sizeof(arr)); for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { bitmap_to_arr32(arr, exp1, nbits); bitmap_from_arr32(bmap2, arr, nbits); expect_eq_bitmap(bmap2, exp1, nbits); next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); if (next_bit < round_up(nbits, BITS_PER_LONG)) { pr_err("bitmap_copy_arr32(nbits == %d:" " tail is not safely cleared: %d\n", nbits, next_bit); failed_tests++; } if (nbits < EXP1_IN_BITS - 32) expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], 0xa5a5a5a5); } } static void __init test_bitmap_arr64(void) { unsigned int nbits, next_bit; u64 arr[EXP1_IN_BITS / 64]; DECLARE_BITMAP(bmap2, EXP1_IN_BITS); memset(arr, 0xa5, sizeof(arr)); for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { memset(bmap2, 0xff, sizeof(arr)); bitmap_to_arr64(arr, exp1, nbits); bitmap_from_arr64(bmap2, arr, nbits); expect_eq_bitmap(bmap2, exp1, nbits); next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); if (next_bit < round_up(nbits, BITS_PER_LONG)) { pr_err("bitmap_copy_arr64(nbits == %d:" " tail is not safely cleared: %d\n", nbits, next_bit); failed_tests++; } if ((nbits % 64) && (arr[(nbits - 1) / 64] & ~GENMASK_ULL((nbits - 1) % 64, 0))) { pr_err("bitmap_to_arr64(nbits == %d): tail is not safely cleared: 0x%016llx (must be 0x%016llx)\n", nbits, arr[(nbits - 1) / 64], GENMASK_ULL((nbits - 1) % 64, 0)); failed_tests++; } if (nbits < EXP1_IN_BITS - 64) expect_eq_uint(arr[DIV_ROUND_UP(nbits, 64)], 0xa5a5a5a5); } } static void noinline __init test_mem_optimisations(void) { DECLARE_BITMAP(bmap1, 1024); DECLARE_BITMAP(bmap2, 1024); unsigned int start, nbits; for (start = 0; start < 1024; start += 8) { for (nbits = 0; nbits < 1024 - start; nbits += 8) { memset(bmap1, 0x5a, sizeof(bmap1)); memset(bmap2, 0x5a, sizeof(bmap2)); bitmap_set(bmap1, start, nbits); __bitmap_set(bmap2, start, nbits); if (!bitmap_equal(bmap1, bmap2, 1024)) { printk("set not equal %d %d\n", start, nbits); failed_tests++; } if (!__bitmap_equal(bmap1, bmap2, 1024)) { printk("set not __equal %d %d\n", start, nbits); failed_tests++; } bitmap_clear(bmap1, start, nbits); __bitmap_clear(bmap2, start, nbits); if (!bitmap_equal(bmap1, bmap2, 1024)) { printk("clear not equal %d %d\n", start, nbits); failed_tests++; } if (!__bitmap_equal(bmap1, bmap2, 1024)) { printk("clear not __equal %d %d\n", start, nbits); failed_tests++; } } } } static const unsigned char clump_exp[] __initconst = { 0x01, /* 1 bit set */ 0x02, /* non-edge 1 bit set */ 0x00, /* zero bits set */ 0x38, /* 3 bits set across 4-bit boundary */ 0x38, /* Repeated clump */ 0x0F, /* 4 bits set */ 0xFF, /* all bits set */ 0x05, /* non-adjacent 2 bits set */ }; static void __init test_for_each_set_clump8(void) { #define CLUMP_EXP_NUMBITS 64 DECLARE_BITMAP(bits, CLUMP_EXP_NUMBITS); unsigned int start; unsigned long clump; /* set bitmap to test case */ bitmap_zero(bits, CLUMP_EXP_NUMBITS); bitmap_set(bits, 0, 1); /* 0x01 */ bitmap_set(bits, 9, 1); /* 0x02 */ bitmap_set(bits, 27, 3); /* 0x28 */ bitmap_set(bits, 35, 3); /* 0x28 */ bitmap_set(bits, 40, 4); /* 0x0F */ bitmap_set(bits, 48, 8); /* 0xFF */ bitmap_set(bits, 56, 1); /* 0x05 - part 1 */ bitmap_set(bits, 58, 1); /* 0x05 - part 2 */ for_each_set_clump8(start, clump, bits, CLUMP_EXP_NUMBITS) expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump); } static void __init test_for_each_set_bit_wrap(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int wr, bit; bitmap_zero(orig, 500); /* Set individual bits */ for (bit = 0; bit < 500; bit += 10) bitmap_set(orig, bit, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for (wr = 0; wr < 500; wr++) { bitmap_zero(copy, 500); for_each_set_bit_wrap(bit, orig, 500, wr) bitmap_set(copy, bit, 1); expect_eq_bitmap(orig, copy, 500); } } static void __init test_for_each_set_bit(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int bit; bitmap_zero(orig, 500); bitmap_zero(copy, 500); /* Set individual bits */ for (bit = 0; bit < 500; bit += 10) bitmap_set(orig, bit, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for_each_set_bit(bit, orig, 500) bitmap_set(copy, bit, 1); expect_eq_bitmap(orig, copy, 500); } static void __init test_for_each_set_bit_from(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int wr, bit; bitmap_zero(orig, 500); /* Set individual bits */ for (bit = 0; bit < 500; bit += 10) bitmap_set(orig, bit, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for (wr = 0; wr < 500; wr++) { DECLARE_BITMAP(tmp, 500); bitmap_zero(copy, 500); bit = wr; for_each_set_bit_from(bit, orig, 500) bitmap_set(copy, bit, 1); bitmap_copy(tmp, orig, 500); bitmap_clear(tmp, 0, wr); expect_eq_bitmap(tmp, copy, 500); } } static void __init test_for_each_clear_bit(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int bit; bitmap_fill(orig, 500); bitmap_fill(copy, 500); /* Set individual bits */ for (bit = 0; bit < 500; bit += 10) bitmap_clear(orig, bit, 1); /* Set range of bits */ bitmap_clear(orig, 100, 50); for_each_clear_bit(bit, orig, 500) bitmap_clear(copy, bit, 1); expect_eq_bitmap(orig, copy, 500); } static void __init test_for_each_clear_bit_from(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int wr, bit; bitmap_fill(orig, 500); /* Set individual bits */ for (bit = 0; bit < 500; bit += 10) bitmap_clear(orig, bit, 1); /* Set range of bits */ bitmap_clear(orig, 100, 50); for (wr = 0; wr < 500; wr++) { DECLARE_BITMAP(tmp, 500); bitmap_fill(copy, 500); bit = wr; for_each_clear_bit_from(bit, orig, 500) bitmap_clear(copy, bit, 1); bitmap_copy(tmp, orig, 500); bitmap_set(tmp, 0, wr); expect_eq_bitmap(tmp, copy, 500); } } static void __init test_for_each_set_bitrange(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int s, e; bitmap_zero(orig, 500); bitmap_zero(copy, 500); /* Set individual bits */ for (s = 0; s < 500; s += 10) bitmap_set(orig, s, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for_each_set_bitrange(s, e, orig, 500) bitmap_set(copy, s, e-s); expect_eq_bitmap(orig, copy, 500); } static void __init test_for_each_clear_bitrange(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int s, e; bitmap_fill(orig, 500); bitmap_fill(copy, 500); /* Set individual bits */ for (s = 0; s < 500; s += 10) bitmap_clear(orig, s, 1); /* Set range of bits */ bitmap_clear(orig, 100, 50); for_each_clear_bitrange(s, e, orig, 500) bitmap_clear(copy, s, e-s); expect_eq_bitmap(orig, copy, 500); } static void __init test_for_each_set_bitrange_from(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int wr, s, e; bitmap_zero(orig, 500); /* Set individual bits */ for (s = 0; s < 500; s += 10) bitmap_set(orig, s, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for (wr = 0; wr < 500; wr++) { DECLARE_BITMAP(tmp, 500); bitmap_zero(copy, 500); s = wr; for_each_set_bitrange_from(s, e, orig, 500) bitmap_set(copy, s, e - s); bitmap_copy(tmp, orig, 500); bitmap_clear(tmp, 0, wr); expect_eq_bitmap(tmp, copy, 500); } } static void __init test_for_each_clear_bitrange_from(void) { DECLARE_BITMAP(orig, 500); DECLARE_BITMAP(copy, 500); unsigned int wr, s, e; bitmap_fill(orig, 500); /* Set individual bits */ for (s = 0; s < 500; s += 10) bitmap_clear(orig, s, 1); /* Set range of bits */ bitmap_set(orig, 100, 50); for (wr = 0; wr < 500; wr++) { DECLARE_BITMAP(tmp, 500); bitmap_fill(copy, 500); s = wr; for_each_clear_bitrange_from(s, e, orig, 500) bitmap_clear(copy, s, e - s); bitmap_copy(tmp, orig, 500); bitmap_set(tmp, 0, wr); expect_eq_bitmap(tmp, copy, 500); } } struct test_bitmap_cut { unsigned int first; unsigned int cut; unsigned int nbits; unsigned long in[4]; unsigned long expected[4]; }; static struct test_bitmap_cut test_cut[] = { { 0, 0, 8, { 0x0000000aUL, }, { 0x0000000aUL, }, }, { 0, 0, 32, { 0xdadadeadUL, }, { 0xdadadeadUL, }, }, { 0, 3, 8, { 0x000000aaUL, }, { 0x00000015UL, }, }, { 3, 3, 8, { 0x000000aaUL, }, { 0x00000012UL, }, }, { 0, 1, 32, { 0xa5a5a5a5UL, }, { 0x52d2d2d2UL, }, }, { 0, 8, 32, { 0xdeadc0deUL, }, { 0x00deadc0UL, }, }, { 1, 1, 32, { 0x5a5a5a5aUL, }, { 0x2d2d2d2cUL, }, }, { 0, 15, 32, { 0xa5a5a5a5UL, }, { 0x00014b4bUL, }, }, { 0, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, }, { 15, 15, 32, { 0xa5a5a5a5UL, }, { 0x000125a5UL, }, }, { 15, 16, 32, { 0xa5a5a5a5UL, }, { 0x0000a5a5UL, }, }, { 16, 15, 32, { 0xa5a5a5a5UL, }, { 0x0001a5a5UL, }, }, { BITS_PER_LONG, BITS_PER_LONG, BITS_PER_LONG, { 0xa5a5a5a5UL, 0xa5a5a5a5UL, }, { 0xa5a5a5a5UL, 0xa5a5a5a5UL, }, }, { 1, BITS_PER_LONG - 1, BITS_PER_LONG, { 0xa5a5a5a5UL, 0xa5a5a5a5UL, }, { 0x00000001UL, 0x00000001UL, }, }, { 0, BITS_PER_LONG * 2, BITS_PER_LONG * 2 + 1, { 0xa5a5a5a5UL, 0x00000001UL, 0x00000001UL, 0x00000001UL }, { 0x00000001UL, }, }, { 16, BITS_PER_LONG * 2 + 1, BITS_PER_LONG * 2 + 1 + 16, { 0x0000ffffUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL, 0x5a5a5a5aUL }, { 0x2d2dffffUL, }, }, }; static void __init test_bitmap_cut(void) { unsigned long b[5], *in = &b[1], *out = &b[0]; /* Partial overlap */ int i; for (i = 0; i < ARRAY_SIZE(test_cut); i++) { struct test_bitmap_cut *t = &test_cut[i]; memcpy(in, t->in, sizeof(t->in)); bitmap_cut(out, in, t->first, t->cut, t->nbits); expect_eq_bitmap(t->expected, out, t->nbits); } } struct test_bitmap_print { const unsigned long *bitmap; unsigned long nbits; const char *mask; const char *list; }; static const unsigned long small_bitmap[] __initconst = { BITMAP_FROM_U64(0x3333333311111111ULL), }; static const char small_mask[] __initconst = "33333333,11111111\n"; static const char small_list[] __initconst = "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61\n"; static const unsigned long large_bitmap[] __initconst = { BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL), }; static const char large_mask[] __initconst = "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111," "33333333,11111111,33333333,11111111\n"; static const char large_list[] __initconst = /* more than 4KB */ "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61,64,68,72,76,80,84,88,92,96-97,100-101,104-1" "05,108-109,112-113,116-117,120-121,124-125,128,132,136,140,144,148,152,156,160-161,164-165,168-169,172-173,176-1" "77,180-181,184-185,188-189,192,196,200,204,208,212,216,220,224-225,228-229,232-233,236-237,240-241,244-245,248-2" "49,252-253,256,260,264,268,272,276,280,284,288-289,292-293,296-297,300-301,304-305,308-309,312-313,316-317,320,3" "24,328,332,336,340,344,348,352-353,356-357,360-361,364-365,368-369,372-373,376-377,380-381,384,388,392,396,400,4" "04,408,412,416-417,420-421,424-425,428-429,432-433,436-437,440-441,444-445,448,452,456,460,464,468,472,476,480-4" "81,484-485,488-489,492-493,496-497,500-501,504-505,508-509,512,516,520,524,528,532,536,540,544-545,548-549,552-5" "53,556-557,560-561,564-565,568-569,572-573,576,580,584,588,592,596,600,604,608-609,612-613,616-617,620-621,624-6" "25,628-629,632-633,636-637,640,644,648,652,656,660,664,668,672-673,676-677,680-681,684-685,688-689,692-693,696-6" "97,700-701,704,708,712,716,720,724,728,732,736-737,740-741,744-745,748-749,752-753,756-757,760-761,764-765,768,7" "72,776,780,784,788,792,796,800-801,804-805,808-809,812-813,816-817,820-821,824-825,828-829,832,836,840,844,848,8" "52,856,860,864-865,868-869,872-873,876-877,880-881,884-885,888-889,892-893,896,900,904,908,912,916,920,924,928-9" "29,932-933,936-937,940-941,944-945,948-949,952-953,956-957,960,964,968,972,976,980,984,988,992-993,996-997,1000-" "1001,1004-1005,1008-1009,1012-1013,1016-1017,1020-1021,1024,1028,1032,1036,1040,1044,1048,1052,1056-1057,1060-10" "61,1064-1065,1068-1069,1072-1073,1076-1077,1080-1081,1084-1085,1088,1092,1096,1100,1104,1108,1112,1116,1120-1121" ",1124-1125,1128-1129,1132-1133,1136-1137,1140-1141,1144-1145,1148-1149,1152,1156,1160,1164,1168,1172,1176,1180,1" "184-1185,1188-1189,1192-1193,1196-1197,1200-1201,1204-1205,1208-1209,1212-1213,1216,1220,1224,1228,1232,1236,124" "0,1244,1248-1249,1252-1253,1256-1257,1260-1261,1264-1265,1268-1269,1272-1273,1276-1277,1280,1284,1288,1292,1296," "1300,1304,1308,1312-1313,1316-1317,1320-1321,1324-1325,1328-1329,1332-1333,1336-1337,1340-1341,1344,1348,1352,13" "56,1360,1364,1368,1372,1376-1377,1380-1381,1384-1385,1388-1389,1392-1393,1396-1397,1400-1401,1404-1405,1408,1412" ",1416,1420,1424,1428,1432,1436,1440-1441,1444-1445,1448-1449,1452-1453,1456-1457,1460-1461,1464-1465,1468-1469,1" "472,1476,1480,1484,1488,1492,1496,1500,1504-1505,1508-1509,1512-1513,1516-1517,1520-1521,1524-1525,1528-1529,153" "2-1533,1536,1540,1544,1548,1552,1556,1560,1564,1568-1569,1572-1573,1576-1577,1580-1581,1584-1585,1588-1589,1592-" "1593,1596-1597,1600,1604,1608,1612,1616,1620,1624,1628,1632-1633,1636-1637,1640-1641,1644-1645,1648-1649,1652-16" "53,1656-1657,1660-1661,1664,1668,1672,1676,1680,1684,1688,1692,1696-1697,1700-1701,1704-1705,1708-1709,1712-1713" ",1716-1717,1720-1721,1724-1725,1728,1732,1736,1740,1744,1748,1752,1756,1760-1761,1764-1765,1768-1769,1772-1773,1" "776-1777,1780-1781,1784-1785,1788-1789,1792,1796,1800,1804,1808,1812,1816,1820,1824-1825,1828-1829,1832-1833,183" "6-1837,1840-1841,1844-1845,1848-1849,1852-1853,1856,1860,1864,1868,1872,1876,1880,1884,1888-1889,1892-1893,1896-" "1897,1900-1901,1904-1905,1908-1909,1912-1913,1916-1917,1920,1924,1928,1932,1936,1940,1944,1948,1952-1953,1956-19" "57,1960-1961,1964-1965,1968-1969,1972-1973,1976-1977,1980-1981,1984,1988,1992,1996,2000,2004,2008,2012,2016-2017" ",2020-2021,2024-2025,2028-2029,2032-2033,2036-2037,2040-2041,2044-2045,2048,2052,2056,2060,2064,2068,2072,2076,2" "080-2081,2084-2085,2088-2089,2092-2093,2096-2097,2100-2101,2104-2105,2108-2109,2112,2116,2120,2124,2128,2132,213" "6,2140,2144-2145,2148-2149,2152-2153,2156-2157,2160-2161,2164-2165,2168-2169,2172-2173,2176,2180,2184,2188,2192," "2196,2200,2204,2208-2209,2212-2213,2216-2217,2220-2221,2224-2225,2228-2229,2232-2233,2236-2237,2240,2244,2248,22" "52,2256,2260,2264,2268,2272-2273,2276-2277,2280-2281,2284-2285,2288-2289,2292-2293,2296-2297,2300-2301,2304,2308" ",2312,2316,2320,2324,2328,2332,2336-2337,2340-2341,2344-2345,2348-2349,2352-2353,2356-2357,2360-2361,2364-2365,2" "368,2372,2376,2380,2384,2388,2392,2396,2400-2401,2404-2405,2408-2409,2412-2413,2416-2417,2420-2421,2424-2425,242" "8-2429,2432,2436,2440,2444,2448,2452,2456,2460,2464-2465,2468-2469,2472-2473,2476-2477,2480-2481,2484-2485,2488-" "2489,2492-2493,2496,2500,2504,2508,2512,2516,2520,2524,2528-2529,2532-2533,2536-2537,2540-2541,2544-2545,2548-25" "49,2552-2553,2556-2557\n"; static const struct test_bitmap_print test_print[] __initconst = { { small_bitmap, sizeof(small_bitmap) * BITS_PER_BYTE, small_mask, small_list }, { large_bitmap, sizeof(large_bitmap) * BITS_PER_BYTE, large_mask, large_list }, }; static void __init test_bitmap_print_buf(void) { int i; for (i = 0; i < ARRAY_SIZE(test_print); i++) { const struct test_bitmap_print *t = &test_print[i]; int n; n = bitmap_print_bitmask_to_buf(print_buf, t->bitmap, t->nbits, 0, 2 * PAGE_SIZE); expect_eq_uint(strlen(t->mask) + 1, n); expect_eq_str(t->mask, print_buf, n); n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits, 0, 2 * PAGE_SIZE); expect_eq_uint(strlen(t->list) + 1, n); expect_eq_str(t->list, print_buf, n); /* test by non-zero offset */ if (strlen(t->list) > PAGE_SIZE) { n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits, PAGE_SIZE, PAGE_SIZE); expect_eq_uint(strlen(t->list) + 1 - PAGE_SIZE, n); expect_eq_str(t->list + PAGE_SIZE, print_buf, n); } } } /* * FIXME: Clang breaks compile-time evaluations when KASAN and GCOV are enabled. * To workaround it, GCOV is force-disabled in Makefile for this configuration. */ static void __init test_bitmap_const_eval(void) { DECLARE_BITMAP(bitmap, BITS_PER_LONG); unsigned long initvar = BIT(2); unsigned long bitopvar = 0; unsigned long var = 0; int res; /* * Compilers must be able to optimize all of those to compile-time * constants on any supported optimization level (-O2, -Os) and any * architecture. Otherwise, trigger a build bug. * The whole function gets optimized out then, there's nothing to do * in runtime. */ /* * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. * Clang on s390 optimizes bitops at compile-time as intended, but at * the same time stops treating @bitmap and @bitopvar as compile-time * constants after regular test_bit() is executed, thus triggering the * build bugs below. So, call const_test_bit() there directly until * the compiler is fixed. */ bitmap_clear(bitmap, 0, BITS_PER_LONG); if (!test_bit(7, bitmap)) bitmap_set(bitmap, 5, 2); /* Equals to `unsigned long bitopvar = BIT(20)` */ __change_bit(31, &bitopvar); bitmap_shift_right(&bitopvar, &bitopvar, 11, BITS_PER_LONG); /* Equals to `unsigned long var = BIT(25)` */ var |= BIT(25); if (var & BIT(0)) var ^= GENMASK(9, 6); /* __const_hweight<32|64>(GENMASK(6, 5)) == 2 */ res = bitmap_weight(bitmap, 20); BUILD_BUG_ON(!__builtin_constant_p(res)); BUILD_BUG_ON(res != 2); /* !(BIT(31) & BIT(18)) == 1 */ res = !test_bit(18, &bitopvar); BUILD_BUG_ON(!__builtin_constant_p(res)); BUILD_BUG_ON(!res); /* BIT(2) & GENMASK(14, 8) == 0 */ res = initvar & GENMASK(14, 8); BUILD_BUG_ON(!__builtin_constant_p(res)); BUILD_BUG_ON(res); /* ~BIT(25) */ BUILD_BUG_ON(!__builtin_constant_p(~var)); BUILD_BUG_ON(~var != ~BIT(25)); } static void __init selftest(void) { test_zero_clear(); test_fill_set(); test_copy(); test_replace(); test_bitmap_arr32(); test_bitmap_arr64(); test_bitmap_parse(); test_bitmap_parselist(); test_bitmap_printlist(); test_mem_optimisations(); test_bitmap_cut(); test_bitmap_print_buf(); test_bitmap_const_eval(); test_find_nth_bit(); test_for_each_set_bit(); test_for_each_set_bit_from(); test_for_each_clear_bit(); test_for_each_clear_bit_from(); test_for_each_set_bitrange(); test_for_each_clear_bitrange(); test_for_each_set_bitrange_from(); test_for_each_clear_bitrange_from(); test_for_each_set_clump8(); test_for_each_set_bit_wrap(); } KSTM_MODULE_LOADERS(test_bitmap); MODULE_AUTHOR("david decotigny <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_bitmap.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* Copyright (C) 2016-2022 Jason A. Donenfeld <[email protected]>. All Rights Reserved. * * Test cases for siphash.c * * SipHash: a fast short-input PRF * https://131002.net/siphash/ * * This implementation is specifically for SipHash2-4 for a secure PRF * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for * hashtables. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <kunit/test.h> #include <linux/siphash.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/module.h> /* Test vectors taken from reference source available at: * https://github.com/veorq/SipHash */ static const siphash_key_t test_key_siphash = {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; static const u64 test_vectors_siphash[64] = { 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL, 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL, 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL, 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL, 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL, 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL, 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL, 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL, 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL, 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL, 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL, 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL, 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL, 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL, 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL, 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL, 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL, 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL, 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL, 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL, 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL, 0x958a324ceb064572ULL }; #if BITS_PER_LONG == 64 static const hsiphash_key_t test_key_hsiphash = {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }}; static const u32 test_vectors_hsiphash[64] = { 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU, 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U, 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU, 0x6c063de4U, 0x92ff097fU, 0xf94dc352U, 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U, 0x2a519956U, 0x7d908b66U, 0x63dbd80cU, 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U, 0x2b45f844U, 0xa320872eU, 0xdae6c123U, 0x67349c8cU, 0x705b0979U, 0xca9913a5U, 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U, 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU, 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U, 0xada26206U, 0xa3c33057U, 0xae3a36a1U, 0x7b108392U, 0x99e41531U, 0x3f1ad944U, 0xc8138825U, 0xc28949a6U, 0xfaf8876bU, 0x9f042196U, 0x68b1d623U, 0x8b5114fdU, 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU, 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U, 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U, 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U, 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U, 0xb7bbb3a8U }; #else static const hsiphash_key_t test_key_hsiphash = {{ 0x03020100U, 0x07060504U }}; static const u32 test_vectors_hsiphash[64] = { 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U, 0x01539939U, 0x7e059ea6U, 0x88e3d89bU, 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U, 0xc839caedU, 0xe4fa32cfU, 0x959246eeU, 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU, 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU, 0x06712339U, 0x522aca67U, 0x911bb605U, 0x90a65f0eU, 0xf826ef7bU, 0x62512debU, 0x57150ad7U, 0x5d473507U, 0x1ec47442U, 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U, 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU, 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU, 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU, 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU, 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U, 0x65671619U, 0x9f5fff91U, 0xd89c5267U, 0x007783ebU, 0x95766243U, 0xab639262U, 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U, 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU, 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U, 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U, 0x87178304U }; #endif #define chk(hash, vector, fmt...) \ KUNIT_EXPECT_EQ_MSG(test, hash, vector, fmt) static void siphash_test(struct kunit *test) { u8 in[64] __aligned(SIPHASH_ALIGNMENT); u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT); u8 i; for (i = 0; i < 64; ++i) { in[i] = i; in_unaligned[i + 1] = i; chk(siphash(in, i, &test_key_siphash), test_vectors_siphash[i], "siphash self-test aligned %u: FAIL", i + 1); chk(siphash(in_unaligned + 1, i, &test_key_siphash), test_vectors_siphash[i], "siphash self-test unaligned %u: FAIL", i + 1); chk(hsiphash(in, i, &test_key_hsiphash), test_vectors_hsiphash[i], "hsiphash self-test aligned %u: FAIL", i + 1); chk(hsiphash(in_unaligned + 1, i, &test_key_hsiphash), test_vectors_hsiphash[i], "hsiphash self-test unaligned %u: FAIL", i + 1); } chk(siphash_1u64(0x0706050403020100ULL, &test_key_siphash), test_vectors_siphash[8], "siphash self-test 1u64: FAIL"); chk(siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, &test_key_siphash), test_vectors_siphash[16], "siphash self-test 2u64: FAIL"); chk(siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, 0x1716151413121110ULL, &test_key_siphash), test_vectors_siphash[24], "siphash self-test 3u64: FAIL"); chk(siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL, 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL, &test_key_siphash), test_vectors_siphash[32], "siphash self-test 4u64: FAIL"); chk(siphash_1u32(0x03020100U, &test_key_siphash), test_vectors_siphash[4], "siphash self-test 1u32: FAIL"); chk(siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash), test_vectors_siphash[8], "siphash self-test 2u32: FAIL"); chk(siphash_3u32(0x03020100U, 0x07060504U, 0x0b0a0908U, &test_key_siphash), test_vectors_siphash[12], "siphash self-test 3u32: FAIL"); chk(siphash_4u32(0x03020100U, 0x07060504U, 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash), test_vectors_siphash[16], "siphash self-test 4u32: FAIL"); chk(hsiphash_1u32(0x03020100U, &test_key_hsiphash), test_vectors_hsiphash[4], "hsiphash self-test 1u32: FAIL"); chk(hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash), test_vectors_hsiphash[8], "hsiphash self-test 2u32: FAIL"); chk(hsiphash_3u32(0x03020100U, 0x07060504U, 0x0b0a0908U, &test_key_hsiphash), test_vectors_hsiphash[12], "hsiphash self-test 3u32: FAIL"); chk(hsiphash_4u32(0x03020100U, 0x07060504U, 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash), test_vectors_hsiphash[16], "hsiphash self-test 4u32: FAIL"); } static struct kunit_case siphash_test_cases[] = { KUNIT_CASE(siphash_test), {} }; static struct kunit_suite siphash_test_suite = { .name = "siphash", .test_cases = siphash_test_cases, }; kunit_test_suite(siphash_test_suite); MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
lib/siphash_kunit.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/module.h> #include <linux/suspend.h> #include "notifier-error-inject.h" static int priority; module_param(priority, int, 0); MODULE_PARM_DESC(priority, "specify PM notifier priority"); static struct notifier_err_inject pm_notifier_err_inject = { .actions = { { NOTIFIER_ERR_INJECT_ACTION(PM_HIBERNATION_PREPARE) }, { NOTIFIER_ERR_INJECT_ACTION(PM_SUSPEND_PREPARE) }, { NOTIFIER_ERR_INJECT_ACTION(PM_RESTORE_PREPARE) }, {} } }; static struct dentry *dir; static int err_inject_init(void) { int err; dir = notifier_err_inject_init("pm", notifier_err_inject_dir, &pm_notifier_err_inject, priority); if (IS_ERR(dir)) return PTR_ERR(dir); err = register_pm_notifier(&pm_notifier_err_inject.nb); if (err) debugfs_remove_recursive(dir); return err; } static void err_inject_exit(void) { unregister_pm_notifier(&pm_notifier_err_inject.nb); debugfs_remove_recursive(dir); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_DESCRIPTION("PM notifier error injection module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akinobu Mita <[email protected]>");
linux-master
lib/pm-notifier-error-inject.c
// SPDX-License-Identifier: GPL-2.0-only /* * Resizable, Scalable, Concurrent Hash Table * * Copyright (c) 2015 Herbert Xu <[email protected]> * Copyright (c) 2014-2015 Thomas Graf <[email protected]> * Copyright (c) 2008-2014 Patrick McHardy <[email protected]> * * Code partially derived from nft_hash * Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett */ #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/sched.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/rhashtable.h> #include <linux/err.h> #include <linux/export.h> #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U union nested_table { union nested_table __rcu *table; struct rhash_lock_head __rcu *bucket; }; static u32 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) { return rht_head_hashfn(ht, tbl, he, ht->p); } #ifdef CONFIG_PROVE_LOCKING #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { if (!debug_locks) return 1; if (unlikely(tbl->nest)) return 1; return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) #endif static inline union nested_table *nested_table_top( const struct bucket_table *tbl) { /* The top-level bucket entry does not need RCU protection * because it's set at the same time as tbl->nest. */ return (void *)rcu_dereference_protected(tbl->buckets[0], 1); } static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; ntbl = rcu_dereference_protected(ntbl->table, 1); if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; ntbl = nested_table_top(tbl); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } static void bucket_table_free(const struct bucket_table *tbl) { if (tbl->nest) nested_bucket_table_free(tbl); kvfree(tbl); } static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, bool leaf) { union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket); } if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) return ntbl; /* Raced with another thread. */ kfree(ntbl); return rcu_dereference(*prev); } static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = kzalloc(size, gfp); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, false)) { kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { struct bucket_table *tbl = NULL; size_t size; int i; static struct lock_class_key __key; tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); size = nbuckets; if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } if (tbl == NULL) return NULL; lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); tbl->size = size; rcu_head_init(&tbl->rcu); INIT_LIST_HEAD(&tbl->walkers); tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i]); return tbl; } static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); int err = -EAGAIN; struct rhash_head *head, *next, *entry; struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; unsigned long flags; if (new_tbl->nest) goto out; err = -ENOENT; rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; pprev = &entry->next; } if (err) goto out; new_hash = head_hashfn(ht, new_tbl, entry); flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); if (pprev) rcu_assign_pointer(*pprev, next); else /* Need to preserved the bit lock. */ rht_assign_locked(bkt, next); out: return err; } static int rhashtable_rehash_chain(struct rhashtable *ht, unsigned int old_hash) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); unsigned long flags; int err; if (!bkt) return 0; flags = rht_lock(old_tbl, bkt); while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) ; if (err == -ENOENT) err = 0; rht_unlock(old_tbl, bkt, flags); return err; } static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) { /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. * As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer(). */ if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) return -EEXIST; return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; unsigned int old_hash; int err; new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; cond_resched(); } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); spin_lock(&ht->lock); list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; /* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. * We do this inside the locked region so that * rhashtable_walk_stop() can use rcu_head_after_call_rcu() * to check if it should not re-link the table. */ call_rcu(&old_tbl->rcu, bucket_table_free_rcu); spin_unlock(&ht->lock); return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; } static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) { struct bucket_table *new_tbl; int err; ASSERT_RHT_MUTEX(ht); new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (new_tbl == NULL) return -ENOMEM; err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; } /** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink * * This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. * * The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. * * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. */ static int rhashtable_shrink(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; return rhashtable_rehash_alloc(ht, old_tbl, size); } static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; int err = 0; ht = container_of(work, struct rhashtable, run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); tbl = rhashtable_last_table(ht, tbl); if (rht_grow_above_75(ht, tbl)) err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); if (!err || err == -EEXIST) { int nerr; nerr = rhashtable_rehash_table(ht); err = err ?: nerr; } mutex_unlock(&ht->mutex); if (err) schedule_work(&ht->run_work); } static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *old_tbl; struct bucket_table *new_tbl; unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); size = tbl->size; err = -EBUSY; if (rht_grow_above_75(ht, tbl)) size *= 2; /* Do not schedule more than one rehash */ else if (old_tbl != tbl) goto fail; err = -ENOMEM; new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); if (new_tbl == NULL) goto fail; err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; fail: /* Do not fail the insert if someone else did a rehash. */ if (likely(rcu_access_pointer(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; } static void *rhashtable_lookup_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) { struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu **pprev = NULL; struct rhash_head *head; int elasticity; elasticity = RHT_ELASTICITY; rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; continue; } if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); if (pprev) rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */ rht_assign_locked(bkt, obj); return NULL; } if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } static struct bucket_table *rhashtable_insert_one( struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) { struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); head = rht_ptr(bkt, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } /* bkt is always the head of the list, so it holds * the lock, which we need to preserve */ rht_assign_locked(bkt, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); return NULL; } static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; unsigned long flags; unsigned int hash; void *data; new_tbl = rcu_dereference(ht->tbl); do { tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); if (rcu_access_pointer(tbl->future_tbl)) /* Failure is OK */ bkt = rht_bucket_var(tbl, hash); else bkt = rht_bucket_insert(ht, tbl, hash); if (bkt == NULL) { new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); rht_unlock(tbl, bkt, flags); } } while (!IS_ERR_OR_NULL(new_tbl)); if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; } EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. */ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; iter->end_of_table = 0; spin_lock(&ht->lock); iter->walker.tbl = rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); list_add(&iter->walker.list, &iter->walker.tbl->walkers); spin_unlock(&ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_enter); /** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * * This function frees resources allocated by rhashtable_walk_enter. */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { spin_lock(&iter->ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&iter->ht->lock); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** * rhashtable_walk_start_check - Start a hash table walk * @iter: Hash table iterator * * Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. * * Returns zero if successful. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. * * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue. */ int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU) { struct rhashtable *ht = iter->ht; bool rhlist = ht->rhlist; rcu_read_lock(); spin_lock(&ht->lock); if (iter->walker.tbl) list_del(&iter->walker.list); spin_unlock(&ht->lock); if (iter->end_of_table) return 0; if (!iter->walker.tbl) { iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); iter->slot = 0; iter->skip = 0; return -EAGAIN; } if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip' */ struct rhash_head *p; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { skip++; if (p == iter->p) { iter->skip = skip; goto found; } } iter->p = NULL; } else if (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'. */ struct rhash_head *p; struct rhlist_head *list; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead); list; list = rcu_dereference(list->next)) { skip++; if (list == iter->list) { iter->p = p; iter->skip = skip; goto found; } } } iter->p = NULL; } found: return 0; } EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); /** * __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * * @iter: Hash table iterator * * Returns the found object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. */ static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) { struct bucket_table *tbl = iter->walker.tbl; struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (!tbl) return NULL; for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } iter->skip = 0; } iter->p = NULL; /* Ensure we see any new tables. */ smp_rmb(); iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { iter->slot = 0; iter->skip = 0; return ERR_PTR(-EAGAIN); } else { iter->end_of_table = true; } return NULL; } /** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } /* At the end of this slot, switch to next one and then find * next entry from that point. */ iter->skip = 0; iter->slot++; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** * rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_peek(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p); /* No object found in current iter, find next one in the table. */ if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed. */ iter->skip--; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_peek); /** * rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * * Finish a hash table walk. Does not reset the iterator to the start of the * hash table. */ void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU) { struct rhashtable *ht; struct bucket_table *tbl = iter->walker.tbl; if (!tbl) goto out; ht = iter->ht; spin_lock(&ht->lock); if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) /* This bucket table is being freed, don't re-link it. */ iter->walker.tbl = NULL; else list_add(&iter->walker.list, &tbl->walkers); spin_unlock(&ht->lock); out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rhashtable_walk_stop); static size_t rounded_hashtable_size(const struct rhashtable_params *params) { size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; } static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } /** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), * .hashfn = jhash, * }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * * u32 my_hash_fn(const void *data, u32 len, u32 seed) * { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .hashfn = jhash, * .obj_hashfn = my_hash_fn, * }; */ int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; size_t size; if ((!params->key_len && !params->obj_hashfn) || (params->obj_hashfn && !params->obj_cmpfn)) return -EINVAL; memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); /* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); size = rounded_hashtable_size(&ht->p); ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } /* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics. */ tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (unlikely(tbl == NULL)) { size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); } atomic_set(&ht->nelems, 0); RCU_INIT_POINTER(ht->tbl, tbl); INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); /** * rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** * rhashtable_free_and_destroy - free elements and destroy hash table * @ht: the hash table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. */ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) { struct bucket_table *tbl, *next_tbl; unsigned int i; cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); tbl = rht_dereference(ht->tbl, ht); restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; cond_resched(); for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) rhashtable_free_one(ht, pos, free_fn, arg); } } next_tbl = rht_dereference(tbl->future_tbl, ht); bucket_table_free(tbl); if (next_tbl) { tbl = next_tbl; goto restart; } mutex_unlock(&ht->mutex); } EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } EXPORT_SYMBOL_GPL(rhashtable_destroy); struct rhash_lock_head __rcu **__rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; ntbl = nested_table_top(tbl); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); size >>= shift; subhash >>= shift; } if (!ntbl) return NULL; return &ntbl[subhash].bucket; } EXPORT_SYMBOL_GPL(__rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) { static struct rhash_lock_head __rcu *rhnull; if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); return __rht_bucket_nested(tbl, hash) ?: &rhnull; } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_lock_head __rcu **rht_bucket_nested_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; ntbl = nested_table_top(tbl); hash >>= tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift)); } if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
linux-master
lib/rhashtable.c
// SPDX-License-Identifier: GPL-2.0-only /* * UBSAN error reporting functions * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin <[email protected]> */ #include <linux/bitops.h> #include <linux/bug.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/ubsan.h> #include <kunit/test-bug.h> #include "ubsan.h" #ifdef CONFIG_UBSAN_TRAP /* * Only include matches for UBSAN checks that are actually compiled in. * The mappings of struct SanitizerKind (the -fsanitize=xxx args) to * enum SanitizerHandler (the traps) in Clang is in clang/lib/CodeGen/. */ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) { switch (check_type) { #ifdef CONFIG_UBSAN_BOUNDS /* * SanitizerKind::ArrayBounds and SanitizerKind::LocalBounds * emit SanitizerHandler::OutOfBounds. */ case ubsan_out_of_bounds: return "UBSAN: array index out of bounds"; #endif #ifdef CONFIG_UBSAN_SHIFT /* * SanitizerKind::ShiftBase and SanitizerKind::ShiftExponent * emit SanitizerHandler::ShiftOutOfBounds. */ case ubsan_shift_out_of_bounds: return "UBSAN: shift out of bounds"; #endif #ifdef CONFIG_UBSAN_DIV_ZERO /* * SanitizerKind::IntegerDivideByZero emits * SanitizerHandler::DivremOverflow. */ case ubsan_divrem_overflow: return "UBSAN: divide/remainder overflow"; #endif #ifdef CONFIG_UBSAN_UNREACHABLE /* * SanitizerKind::Unreachable emits * SanitizerHandler::BuiltinUnreachable. */ case ubsan_builtin_unreachable: return "UBSAN: unreachable code"; #endif #if defined(CONFIG_UBSAN_BOOL) || defined(CONFIG_UBSAN_ENUM) /* * SanitizerKind::Bool and SanitizerKind::Enum emit * SanitizerHandler::LoadInvalidValue. */ case ubsan_load_invalid_value: return "UBSAN: loading invalid value"; #endif #ifdef CONFIG_UBSAN_ALIGNMENT /* * SanitizerKind::Alignment emits SanitizerHandler::TypeMismatch * or SanitizerHandler::AlignmentAssumption. */ case ubsan_alignment_assumption: return "UBSAN: alignment assumption"; case ubsan_type_mismatch: return "UBSAN: type mismatch"; #endif default: return "UBSAN: unrecognized failure code"; } } #else static const char * const type_check_kinds[] = { "load of", "store to", "reference binding to", "member access within", "member call on", "constructor call on", "downcast of", "downcast of" }; #define REPORTED_BIT 31 #if (BITS_PER_LONG == 64) && defined(__BIG_ENDIAN) #define COLUMN_MASK (~(1U << REPORTED_BIT)) #define LINE_MASK (~0U) #else #define COLUMN_MASK (~0U) #define LINE_MASK (~(1U << REPORTED_BIT)) #endif #define VALUE_LENGTH 40 static bool was_reported(struct source_location *location) { return test_and_set_bit(REPORTED_BIT, &location->reported); } static bool suppress_report(struct source_location *loc) { return current->in_ubsan || was_reported(loc); } static bool type_is_int(struct type_descriptor *type) { return type->type_kind == type_kind_int; } static bool type_is_signed(struct type_descriptor *type) { WARN_ON(!type_is_int(type)); return type->type_info & 1; } static unsigned type_bit_width(struct type_descriptor *type) { return 1 << (type->type_info >> 1); } static bool is_inline_int(struct type_descriptor *type) { unsigned inline_bits = sizeof(unsigned long)*8; unsigned bits = type_bit_width(type); WARN_ON(!type_is_int(type)); return bits <= inline_bits; } static s_max get_signed_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) { unsigned extra_bits = sizeof(s_max)*8 - type_bit_width(type); unsigned long ulong_val = (unsigned long)val; return ((s_max)ulong_val) << extra_bits >> extra_bits; } if (type_bit_width(type) == 64) return *(s64 *)val; return *(s_max *)val; } static bool val_is_negative(struct type_descriptor *type, void *val) { return type_is_signed(type) && get_signed_val(type, val) < 0; } static u_max get_unsigned_val(struct type_descriptor *type, void *val) { if (is_inline_int(type)) return (unsigned long)val; if (type_bit_width(type) == 64) return *(u64 *)val; return *(u_max *)val; } static void val_to_string(char *str, size_t size, struct type_descriptor *type, void *value) { if (type_is_int(type)) { if (type_bit_width(type) == 128) { #if defined(CONFIG_ARCH_SUPPORTS_INT128) u_max val = get_unsigned_val(type, value); scnprintf(str, size, "0x%08x%08x%08x%08x", (u32)(val >> 96), (u32)(val >> 64), (u32)(val >> 32), (u32)(val)); #else WARN_ON(1); #endif } else if (type_is_signed(type)) { scnprintf(str, size, "%lld", (s64)get_signed_val(type, value)); } else { scnprintf(str, size, "%llu", (u64)get_unsigned_val(type, value)); } } } static void ubsan_prologue(struct source_location *loc, const char *reason) { current->in_ubsan++; pr_err("========================================" "========================================\n"); pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name, loc->line & LINE_MASK, loc->column & COLUMN_MASK); kunit_fail_current_test("%s in %s", reason, loc->file_name); } static void ubsan_epilogue(void) { dump_stack(); pr_err("========================================" "========================================\n"); current->in_ubsan--; check_panic_on_warn("UBSAN"); } void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs) { struct overflow_data *data = _data; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; ubsan_prologue(&data->location, "division-overflow"); val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) pr_err("division of %s by -1 cannot be represented in type %s\n", rhs_val_str, data->type->type_name); else pr_err("division by zero\n"); ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { if (suppress_report(data->location)) return; ubsan_prologue(data->location, "null-ptr-deref"); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], data->type->type_name); ubsan_epilogue(); } static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { if (suppress_report(data->location)) return; ubsan_prologue(data->location, "misaligned-access"); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], (void *)ptr, data->type->type_name); pr_err("which requires %ld byte alignment\n", data->alignment); ubsan_epilogue(); } static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { if (suppress_report(data->location)) return; ubsan_prologue(data->location, "object-size-mismatch"); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); ubsan_epilogue(); } static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, unsigned long ptr) { unsigned long flags = user_access_save(); if (!ptr) handle_null_ptr_deref(data); else if (data->alignment && !IS_ALIGNED(ptr, data->alignment)) handle_misaligned_access(data, ptr); else handle_object_size_mismatch(data, ptr); user_access_restore(flags); } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr) { struct type_mismatch_data_common common_data = { .location = &data->location, .type = data->type, .alignment = data->alignment, .type_check_kind = data->type_check_kind }; ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch); void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr) { struct type_mismatch_data_v1 *data = _data; struct type_mismatch_data_common common_data = { .location = &data->location, .type = data->type, .alignment = 1UL << data->log_alignment, .type_check_kind = data->type_check_kind }; ubsan_type_mismatch_common(&common_data, (unsigned long)ptr); } EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); void __ubsan_handle_out_of_bounds(void *_data, void *index) { struct out_of_bounds_data *data = _data; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; ubsan_prologue(&data->location, "array-index-out-of-bounds"); val_to_string(index_str, sizeof(index_str), data->index_type, index); pr_err("index %s is out of range for type %s\n", index_str, data->array_type->type_name); ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs) { struct shift_out_of_bounds_data *data = _data; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; char lhs_str[VALUE_LENGTH]; unsigned long ua_flags = user_access_save(); if (suppress_report(&data->location)) goto out; ubsan_prologue(&data->location, "shift-out-of-bounds"); val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); if (val_is_negative(rhs_type, rhs)) pr_err("shift exponent %s is negative\n", rhs_str); else if (get_unsigned_val(rhs_type, rhs) >= type_bit_width(lhs_type)) pr_err("shift exponent %s is too large for %u-bit type %s\n", rhs_str, type_bit_width(lhs_type), lhs_type->type_name); else if (val_is_negative(lhs_type, lhs)) pr_err("left shift of negative value %s\n", lhs_str); else pr_err("left shift of %s by %s places cannot be" " represented in type %s\n", lhs_str, rhs_str, lhs_type->type_name); ubsan_epilogue(); out: user_access_restore(ua_flags); } EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); void __ubsan_handle_builtin_unreachable(void *_data) { struct unreachable_data *data = _data; ubsan_prologue(&data->location, "unreachable"); pr_err("calling __builtin_unreachable()\n"); ubsan_epilogue(); panic("can't return from __builtin_unreachable()"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(void *_data, void *val) { struct invalid_value_data *data = _data; char val_str[VALUE_LENGTH]; unsigned long ua_flags = user_access_save(); if (suppress_report(&data->location)) goto out; ubsan_prologue(&data->location, "invalid-load"); val_to_string(val_str, sizeof(val_str), data->type, val); pr_err("load of value %s is not a valid value for type %s\n", val_str, data->type->type_name); ubsan_epilogue(); out: user_access_restore(ua_flags); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, unsigned long align, unsigned long offset) { struct alignment_assumption_data *data = _data; unsigned long real_ptr; if (suppress_report(&data->location)) return; ubsan_prologue(&data->location, "alignment-assumption"); if (offset) pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed", align, offset, data->type->type_name); else pr_err("assumption of %lu byte alignment for pointer of type %s failed", align, data->type->type_name); real_ptr = ptr - offset; pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes", offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0), real_ptr & (align - 1)); ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_alignment_assumption); #endif /* !CONFIG_UBSAN_TRAP */
linux-master
lib/ubsan.c
// SPDX-License-Identifier: GPL-2.0-only /* * Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2005 (C) Jes Sorensen <[email protected]> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/bitmap.h> #include <linux/rculist.h> #include <linux/interrupt.h> #include <linux/genalloc.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> static inline size_t chunk_size(const struct gen_pool_chunk *chunk) { return chunk->end_addr - chunk->start_addr + 1; } static inline int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val = READ_ONCE(*addr); do { if (val & mask_to_set) return -EBUSY; cpu_relax(); } while (!try_cmpxchg(addr, &val, val | mask_to_set)); return 0; } static inline int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) { unsigned long val = READ_ONCE(*addr); do { if ((val & mask_to_clear) != mask_to_clear) return -EBUSY; cpu_relax(); } while (!try_cmpxchg(addr, &val, val & ~mask_to_clear)); return 0; } /* * bitmap_set_ll - set the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Set @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users set the same bit, one user will return remain bits, otherwise * return 0. */ static unsigned long bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr) { unsigned long *p = map + BIT_WORD(start); const unsigned long size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr >= bits_to_set) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); if (set_bits_ll(p, mask_to_set)) return nr; } return 0; } /* * bitmap_clear_ll - clear the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Clear @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users clear the same bit, one user will return remain bits, * otherwise return 0. */ static unsigned long bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr) { unsigned long *p = map + BIT_WORD(start); const unsigned long size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr >= bits_to_clear) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); if (clear_bits_ll(p, mask_to_clear)) return nr; } return 0; } /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; pool->algo = gen_pool_first_fit; pool->data = NULL; pool->name = NULL; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_owner- add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * @owner: private data the publisher would like to recall at alloc time * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, size_t size, int nid, void *owner) { struct gen_pool_chunk *chunk; unsigned long nbits = size >> pool->min_alloc_order; unsigned long nbytes = sizeof(struct gen_pool_chunk) + BITS_TO_LONGS(nbits) * sizeof(long); chunk = vzalloc_node(nbytes, nid); if (unlikely(chunk == NULL)) return -ENOMEM; chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size - 1; chunk->owner = owner; atomic_long_set(&chunk->avail, size); spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); spin_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add_owner); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) { struct gen_pool_chunk *chunk; phys_addr_t paddr = -1; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr <= chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } } rcu_read_unlock(); return paddr; } EXPORT_SYMBOL(gen_pool_virt_to_phys); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; unsigned long bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = chunk_size(chunk) >> order; bit = find_first_bit(chunk->bits, end_bit); BUG_ON(bit < end_bit); vfree(chunk); } kfree_const(pool->name); kfree(pool); } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc_algo_owner - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @algo: algorithm passed from caller * @data: data passed to algorithm * @owner: optionally retrieve the chunk owner * * Allocate the requested number of bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, genpool_algo_t algo, void *data, void **owner) { struct gen_pool_chunk *chunk; unsigned long addr = 0; int order = pool->min_alloc_order; unsigned long nbits, start_bit, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif if (owner) *owner = NULL; if (size == 0) return 0; nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (size > atomic_long_read(&chunk->avail)) continue; start_bit = 0; end_bit = chunk_size(chunk) >> order; retry: start_bit = algo(chunk->bits, end_bit, start_bit, nbits, data, pool, chunk->start_addr); if (start_bit >= end_bit) continue; remain = bitmap_set_ll(chunk->bits, start_bit, nbits); if (remain) { remain = bitmap_clear_ll(chunk->bits, start_bit, nbits - remain); BUG_ON(remain); goto retry; } addr = chunk->start_addr + ((unsigned long)start_bit << order); size = nbits << order; atomic_long_sub(size, &chunk->avail); if (owner) *owner = chunk->owner; break; } rcu_read_unlock(); return addr; } EXPORT_SYMBOL(gen_pool_alloc_algo_owner); /** * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: dma-view physical address return value. Use %NULL if unneeded. * * Allocate the requested number of bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated memory, or %NULL on failure */ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) { return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data); } EXPORT_SYMBOL(gen_pool_dma_alloc); /** * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA * usage with the given pool algorithm * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: DMA-view physical address return value. Use %NULL if unneeded. * @algo: algorithm passed from caller * @data: data passed to algorithm * * Allocate the requested number of bytes from the specified pool. Uses the * given pool allocation function. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated memory, or %NULL on failure */ void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size, dma_addr_t *dma, genpool_algo_t algo, void *data) { unsigned long vaddr; if (!pool) return NULL; vaddr = gen_pool_alloc_algo(pool, size, algo, data); if (!vaddr) return NULL; if (dma) *dma = gen_pool_virt_to_phys(pool, vaddr); return (void *)vaddr; } EXPORT_SYMBOL(gen_pool_dma_alloc_algo); /** * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA * usage with the given alignment * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: DMA-view physical address return value. Use %NULL if unneeded. * @align: alignment in bytes for starting address * * Allocate the requested number bytes from the specified pool, with the given * alignment restriction. Can not be used in NMI handler on architectures * without NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated memory, or %NULL on failure */ void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size, dma_addr_t *dma, int align) { struct genpool_data_align data = { .align = align }; return gen_pool_dma_alloc_algo(pool, size, dma, gen_pool_first_fit_align, &data); } EXPORT_SYMBOL(gen_pool_dma_alloc_align); /** * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for * DMA usage * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: dma-view physical address return value. Use %NULL if unneeded. * * Allocate the requested number of zeroed bytes from the specified pool. * Uses the pool allocation function (with first-fit algorithm by default). * Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated zeroed memory, or %NULL on failure */ void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) { return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data); } EXPORT_SYMBOL(gen_pool_dma_zalloc); /** * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for * DMA usage with the given pool algorithm * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: DMA-view physical address return value. Use %NULL if unneeded. * @algo: algorithm passed from caller * @data: data passed to algorithm * * Allocate the requested number of zeroed bytes from the specified pool. Uses * the given pool allocation function. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated zeroed memory, or %NULL on failure */ void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size, dma_addr_t *dma, genpool_algo_t algo, void *data) { void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data); if (vaddr) memset(vaddr, 0, size); return vaddr; } EXPORT_SYMBOL(gen_pool_dma_zalloc_algo); /** * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for * DMA usage with the given alignment * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * @dma: DMA-view physical address return value. Use %NULL if unneeded. * @align: alignment in bytes for starting address * * Allocate the requested number of zeroed bytes from the specified pool, * with the given alignment restriction. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. * * Return: virtual address of the allocated zeroed memory, or %NULL on failure */ void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size, dma_addr_t *dma, int align) { struct genpool_data_align data = { .align = align }; return gen_pool_dma_zalloc_algo(pool, size, dma, gen_pool_first_fit_align, &data); } EXPORT_SYMBOL(gen_pool_dma_zalloc_align); /** * gen_pool_free_owner - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * @owner: private data stashed at gen_pool_add() time * * Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. */ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size, void **owner) { struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; unsigned long start_bit, nbits, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif if (owner) *owner = NULL; nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr <= chunk->end_addr) { BUG_ON(addr + size - 1 > chunk->end_addr); start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; atomic_long_add(size, &chunk->avail); if (owner) *owner = chunk->owner; rcu_read_unlock(); return; } } rcu_read_unlock(); BUG(); } EXPORT_SYMBOL(gen_pool_free_owner); /** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool * @pool: the generic memory pool * @func: func to call * @data: additional data used by @func * * Call @func for every chunk of generic memory pool. The @func is * called with rcu_read_lock held. */ void gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) { struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) func(pool, chunk, data); rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_for_each_chunk); /** * gen_pool_has_addr - checks if an address falls within the range of a pool * @pool: the generic memory pool * @start: start address * @size: size of the region * * Check if the range of addresses falls within the specified pool. Returns * true if the entire range is contained in the pool and false otherwise. */ bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, size_t size) { bool found = false; unsigned long end = start + size - 1; struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { if (start >= chunk->start_addr && start <= chunk->end_addr) { if (end <= chunk->end_addr) { found = true; break; } } } rcu_read_unlock(); return found; } EXPORT_SYMBOL(gen_pool_has_addr); /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space * * Return available free space of the specified pool. */ size_t gen_pool_avail(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t avail = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) avail += atomic_long_read(&chunk->avail); rcu_read_unlock(); return avail; } EXPORT_SYMBOL_GPL(gen_pool_avail); /** * gen_pool_size - get size in bytes of memory managed by the pool * @pool: pool to get size * * Return size in bytes of memory managed by the pool. */ size_t gen_pool_size(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t size = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) size += chunk_size(chunk); rcu_read_unlock(); return size; } EXPORT_SYMBOL_GPL(gen_pool_size); /** * gen_pool_set_algo - set the allocation algorithm * @pool: pool to change allocation algorithm * @algo: custom algorithm function * @data: additional data used by @algo * * Call @algo for each memory allocation in the pool. * If @algo is NULL use gen_pool_first_fit as default * memory allocation function. */ void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) { rcu_read_lock(); pool->algo = algo; if (!pool->algo) pool->algo = gen_pool_first_fit; pool->data = data; rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_set_algo); /** * gen_pool_first_fit - find the first available region * of memory matching the size requirement (no alignment constraint) * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused * @pool: pool to find the fit region memory from * @start_addr: not used in this function */ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr) { return bitmap_find_next_zero_area(map, size, start, nr, 0); } EXPORT_SYMBOL(gen_pool_first_fit); /** * gen_pool_first_fit_align - find the first available region * of memory matching the size requirement (alignment constraint) * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: data for alignment * @pool: pool to get order from * @start_addr: start addr of alloction chunk */ unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_align *alignment; unsigned long align_mask, align_off; int order; alignment = data; order = pool->min_alloc_order; align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; align_off = (start_addr & (alignment->align - 1)) >> order; return bitmap_find_next_zero_area_off(map, size, start, nr, align_mask, align_off); } EXPORT_SYMBOL(gen_pool_first_fit_align); /** * gen_pool_fixed_alloc - reserve a specific region * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: data for alignment * @pool: pool to get order from * @start_addr: not used in this function */ unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr) { struct genpool_data_fixed *fixed_data; int order; unsigned long offset_bit; unsigned long start_bit; fixed_data = data; order = pool->min_alloc_order; offset_bit = fixed_data->offset >> order; if (WARN_ON(fixed_data->offset & ((1UL << order) - 1))) return size; start_bit = bitmap_find_next_zero_area(map, size, start + offset_bit, nr, 0); if (start_bit != offset_bit) start_bit = size; return start_bit; } EXPORT_SYMBOL(gen_pool_fixed_alloc); /** * gen_pool_first_fit_order_align - find the first available region * of memory matching the size requirement. The region will be aligned * to the order of the size specified. * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused * @pool: pool to find the fit region memory from * @start_addr: not used in this function */ unsigned long gen_pool_first_fit_order_align(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr) { unsigned long align_mask = roundup_pow_of_two(nr) - 1; return bitmap_find_next_zero_area(map, size, start, nr, align_mask); } EXPORT_SYMBOL(gen_pool_first_fit_order_align); /** * gen_pool_best_fit - find the best fitting region of memory * matching the size requirement (no alignment constraint) * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @data: additional data - unused * @pool: pool to find the fit region memory from * @start_addr: not used in this function * * Iterate over the bitmap to find the smallest free region * which we can allocate the memory. */ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data, struct gen_pool *pool, unsigned long start_addr) { unsigned long start_bit = size; unsigned long len = size + 1; unsigned long index; index = bitmap_find_next_zero_area(map, size, start, nr, 0); while (index < size) { unsigned long next_bit = find_next_bit(map, size, index + nr); if ((next_bit - index) < len) { len = next_bit - index; start_bit = index; if (len == nr) return start_bit; } index = bitmap_find_next_zero_area(map, size, next_bit + 1, nr, 0); } return start_bit; } EXPORT_SYMBOL(gen_pool_best_fit); static void devm_gen_pool_release(struct device *dev, void *res) { gen_pool_destroy(*(struct gen_pool **)res); } static int devm_gen_pool_match(struct device *dev, void *res, void *data) { struct gen_pool **p = res; /* NULL data matches only a pool without an assigned name */ if (!data && !(*p)->name) return 1; if (!data || !(*p)->name) return 0; return !strcmp((*p)->name, data); } /** * gen_pool_get - Obtain the gen_pool (if any) for a device * @dev: device to retrieve the gen_pool from * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device * * Returns the gen_pool for the device if one is present, or NULL. */ struct gen_pool *gen_pool_get(struct device *dev, const char *name) { struct gen_pool **p; p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, (void *)name); if (!p) return NULL; return *p; } EXPORT_SYMBOL_GPL(gen_pool_get); /** * devm_gen_pool_create - managed gen_pool_create * @dev: device that provides the gen_pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. The pool will be * automatically destroyed by the device management code. */ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid, const char *name) { struct gen_pool **ptr, *pool; const char *pool_name = NULL; /* Check that genpool to be created is uniquely addressed on device */ if (gen_pool_get(dev, name)) return ERR_PTR(-EINVAL); if (name) { pool_name = kstrdup_const(name, GFP_KERNEL); if (!pool_name) return ERR_PTR(-ENOMEM); } ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) goto free_pool_name; pool = gen_pool_create(min_alloc_order, nid); if (!pool) goto free_devres; *ptr = pool; pool->name = pool_name; devres_add(dev, ptr); return pool; free_devres: devres_free(ptr); free_pool_name: kfree_const(pool_name); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(devm_gen_pool_create); #ifdef CONFIG_OF /** * of_gen_pool_get - find a pool by phandle property * @np: device node * @propname: property name containing phandle(s) * @index: index into the phandle array * * Returns the pool that contains the chunk starting at the physical * address of the device tree node pointed at by the phandle property, * or NULL if not found. */ struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; struct device_node *np_pool, *parent; const char *name = NULL; struct gen_pool *pool = NULL; np_pool = of_parse_phandle(np, propname, index); if (!np_pool) return NULL; pdev = of_find_device_by_node(np_pool); if (!pdev) { /* Check if named gen_pool is created by parent node device */ parent = of_get_parent(np_pool); pdev = of_find_device_by_node(parent); of_node_put(parent); of_property_read_string(np_pool, "label", &name); if (!name) name = of_node_full_name(np_pool); } if (pdev) pool = gen_pool_get(&pdev->dev, name); of_node_put(np_pool); return pool; } EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */
linux-master
lib/genalloc.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include "notifier-error-inject.h" static int priority; module_param(priority, int, 0); MODULE_PARM_DESC(priority, "specify OF reconfig notifier priority"); static struct notifier_err_inject reconfig_err_inject = { .actions = { { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ATTACH_NODE) }, { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_DETACH_NODE) }, { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_ADD_PROPERTY) }, { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_REMOVE_PROPERTY) }, { NOTIFIER_ERR_INJECT_ACTION(OF_RECONFIG_UPDATE_PROPERTY) }, {} } }; static struct dentry *dir; static int err_inject_init(void) { int err; dir = notifier_err_inject_init("OF-reconfig", notifier_err_inject_dir, &reconfig_err_inject, priority); if (IS_ERR(dir)) return PTR_ERR(dir); err = of_reconfig_notifier_register(&reconfig_err_inject.nb); if (err) debugfs_remove_recursive(dir); return err; } static void err_inject_exit(void) { of_reconfig_notifier_unregister(&reconfig_err_inject.nb); debugfs_remove_recursive(dir); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_DESCRIPTION("OF reconfig notifier error injection module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akinobu Mita <[email protected]>");
linux-master
lib/of-reconfig-notifier-error-inject.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Testsuite for atomic64_t functions * * Copyright © 2010 Luca Barbieri */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/atomic.h> #include <linux/module.h> #ifdef CONFIG_X86 #include <asm/cpufeature.h> /* for boot_cpu_has below */ #endif #define TEST(bit, op, c_op, val) \ do { \ atomic##bit##_set(&v, v0); \ r = v0; \ atomic##bit##_##op(val, &v); \ r c_op val; \ WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \ (unsigned long long)atomic##bit##_read(&v), \ (unsigned long long)r); \ } while (0) /* * Test for a atomic operation family, * @test should be a macro accepting parameters (bit, op, ...) */ #define FAMILY_TEST(test, bit, op, args...) \ do { \ test(bit, op, ##args); \ test(bit, op##_acquire, ##args); \ test(bit, op##_release, ##args); \ test(bit, op##_relaxed, ##args); \ } while (0) #define TEST_RETURN(bit, op, c_op, val) \ do { \ atomic##bit##_set(&v, v0); \ r = v0; \ r c_op val; \ BUG_ON(atomic##bit##_##op(val, &v) != r); \ BUG_ON(atomic##bit##_read(&v) != r); \ } while (0) #define TEST_FETCH(bit, op, c_op, val) \ do { \ atomic##bit##_set(&v, v0); \ r = v0; \ r c_op val; \ BUG_ON(atomic##bit##_##op(val, &v) != v0); \ BUG_ON(atomic##bit##_read(&v) != r); \ } while (0) #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ do { \ FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ } while (0) #define FETCH_FAMILY_TEST(bit, op, c_op, val) \ do { \ FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \ } while (0) #define TEST_ARGS(bit, op, init, ret, expect, args...) \ do { \ atomic##bit##_set(&v, init); \ BUG_ON(atomic##bit##_##op(&v, ##args) != ret); \ BUG_ON(atomic##bit##_read(&v) != expect); \ } while (0) #define XCHG_FAMILY_TEST(bit, init, new) \ do { \ FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \ } while (0) #define CMPXCHG_FAMILY_TEST(bit, init, new, wrong) \ do { \ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \ init, init, new, init, new); \ FAMILY_TEST(TEST_ARGS, bit, cmpxchg, \ init, init, init, wrong, new); \ } while (0) #define INC_RETURN_FAMILY_TEST(bit, i) \ do { \ FAMILY_TEST(TEST_ARGS, bit, inc_return, \ i, (i) + one, (i) + one); \ } while (0) #define DEC_RETURN_FAMILY_TEST(bit, i) \ do { \ FAMILY_TEST(TEST_ARGS, bit, dec_return, \ i, (i) - one, (i) - one); \ } while (0) static __init void test_atomic(void) { int v0 = 0xaaa31337; int v1 = 0xdeadbeef; int onestwos = 0x11112222; int one = 1; atomic_t v; int r; TEST(, add, +=, onestwos); TEST(, add, +=, -one); TEST(, sub, -=, onestwos); TEST(, sub, -=, -one); TEST(, or, |=, v1); TEST(, and, &=, v1); TEST(, xor, ^=, v1); TEST(, andnot, &= ~, v1); RETURN_FAMILY_TEST(, add_return, +=, onestwos); RETURN_FAMILY_TEST(, add_return, +=, -one); RETURN_FAMILY_TEST(, sub_return, -=, onestwos); RETURN_FAMILY_TEST(, sub_return, -=, -one); FETCH_FAMILY_TEST(, fetch_add, +=, onestwos); FETCH_FAMILY_TEST(, fetch_add, +=, -one); FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos); FETCH_FAMILY_TEST(, fetch_sub, -=, -one); FETCH_FAMILY_TEST(, fetch_or, |=, v1); FETCH_FAMILY_TEST(, fetch_and, &=, v1); FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1); FETCH_FAMILY_TEST(, fetch_xor, ^=, v1); INC_RETURN_FAMILY_TEST(, v0); DEC_RETURN_FAMILY_TEST(, v0); XCHG_FAMILY_TEST(, v0, v1); CMPXCHG_FAMILY_TEST(, v0, v1, onestwos); } #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) static __init void test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long v3 = 0x8000000000000000LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; int r_int; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); TEST(64, add, +=, onestwos); TEST(64, add, +=, -one); TEST(64, sub, -=, onestwos); TEST(64, sub, -=, -one); TEST(64, or, |=, v1); TEST(64, and, &=, v1); TEST(64, xor, ^=, v1); TEST(64, andnot, &= ~, v1); RETURN_FAMILY_TEST(64, add_return, +=, onestwos); RETURN_FAMILY_TEST(64, add_return, +=, -one); RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); RETURN_FAMILY_TEST(64, sub_return, -=, -one); FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos); FETCH_FAMILY_TEST(64, fetch_add, +=, -one); FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos); FETCH_FAMILY_TEST(64, fetch_sub, -=, -one); FETCH_FAMILY_TEST(64, fetch_or, |=, v1); FETCH_FAMILY_TEST(64, fetch_and, &=, v1); FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1); FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INC_RETURN_FAMILY_TEST(64, v0); DEC_RETURN_FAMILY_TEST(64, v0); XCHG_FAMILY_TEST(64, v0, v1); CMPXCHG_FAMILY_TEST(64, v0, v1, v2); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); /* Confirm the return value fits in an int, even if the value doesn't */ INIT(v3); r_int = atomic64_inc_not_zero(&v); BUG_ON(!r_int); } static __init int test_atomics_init(void) { test_atomic(); test_atomic64(); #ifdef CONFIG_X86 pr_info("passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else pr_info("passed\n"); #endif return 0; } static __exit void test_atomics_exit(void) {} module_init(test_atomics_init); module_exit(test_atomics_exit); MODULE_LICENSE("GPL");
linux-master
lib/atomic64_test.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * lib/ts_fsm.c A naive finite state machine text search approach * * Authors: Thomas Graf <[email protected]> * * ========================================================================== * * A finite state machine consists of n states (struct ts_fsm_token) * representing the pattern as a finite automaton. The data is read * sequentially on an octet basis. Every state token specifies the number * of recurrences and the type of value accepted which can be either a * specific character or ctype based set of characters. The available * type of recurrences include 1, (0|1), [0 n], and [1 n]. * * The algorithm differs between strict/non-strict mode specifying * whether the pattern has to start at the first octet. Strict mode * is enabled by default and can be disabled by inserting * TS_FSM_HEAD_IGNORE as the first token in the chain. * * The runtime performance of the algorithm should be around O(n), * however while in strict mode the average runtime can be better. */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/textsearch.h> #include <linux/textsearch_fsm.h> struct ts_fsm { unsigned int ntokens; struct ts_fsm_token tokens[]; }; /* other values derived from ctype.h */ #define _A 0x100 /* ascii */ #define _W 0x200 /* wildcard */ /* Map to _ctype flags and some magic numbers */ static const u16 token_map[TS_FSM_TYPE_MAX+1] = { [TS_FSM_SPECIFIC] = 0, [TS_FSM_WILDCARD] = _W, [TS_FSM_CNTRL] = _C, [TS_FSM_LOWER] = _L, [TS_FSM_UPPER] = _U, [TS_FSM_PUNCT] = _P, [TS_FSM_SPACE] = _S, [TS_FSM_DIGIT] = _D, [TS_FSM_XDIGIT] = _D | _X, [TS_FSM_ALPHA] = _U | _L, [TS_FSM_ALNUM] = _U | _L | _D, [TS_FSM_PRINT] = _P | _U | _L | _D | _SP, [TS_FSM_GRAPH] = _P | _U | _L | _D, [TS_FSM_ASCII] = _A, }; static const u16 token_lookup_tbl[256] = { _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ _W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */ _W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */ _W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */ _W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */ _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */ _W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */ _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */ _W, _W, _W, _W, /* 128-131 */ _W, _W, _W, _W, /* 132-135 */ _W, _W, _W, _W, /* 136-139 */ _W, _W, _W, _W, /* 140-143 */ _W, _W, _W, _W, /* 144-147 */ _W, _W, _W, _W, /* 148-151 */ _W, _W, _W, _W, /* 152-155 */ _W, _W, _W, _W, /* 156-159 */ _W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */ _W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */ _W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */ _W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */ _W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */ static inline int match_token(struct ts_fsm_token *t, u8 d) { if (t->type) return (token_lookup_tbl[d] & t->type) != 0; else return t->value == d; } static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state) { struct ts_fsm *fsm = ts_config_priv(conf); struct ts_fsm_token *cur = NULL, *next; unsigned int match_start, block_idx = 0, tok_idx; unsigned block_len = 0, strict, consumed = state->offset; const u8 *data; #define GET_NEXT_BLOCK() \ ({ consumed += block_idx; \ block_idx = 0; \ block_len = conf->get_next_block(consumed, &data, conf, state); }) #define TOKEN_MISMATCH() \ do { \ if (strict) \ goto no_match; \ block_idx++; \ goto startover; \ } while(0) #define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK()) if (end_of_data()) goto no_match; strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE; startover: match_start = consumed + block_idx; for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) { cur = &fsm->tokens[tok_idx]; if (likely(tok_idx < (fsm->ntokens - 1))) next = &fsm->tokens[tok_idx + 1]; else next = NULL; switch (cur->recur) { case TS_FSM_SINGLE: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); break; case TS_FSM_PERHAPS: if (end_of_data() || !match_token(cur, data[block_idx])) continue; break; case TS_FSM_MULTI: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; fallthrough; case TS_FSM_ANY: if (next == NULL) goto found_match; if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; if (end_of_data()) goto no_match; } continue; /* * Optimization: Prefer small local loop over jumping * back and forth until garbage at head is munched. */ case TS_FSM_HEAD_IGNORE: if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { /* * Special case, don't start over upon * a mismatch, give the user the * chance to specify the type of data * allowed to be ignored. */ if (!match_token(cur, data[block_idx])) goto no_match; block_idx++; if (end_of_data()) goto no_match; } match_start = consumed + block_idx; continue; } block_idx++; } if (end_of_data()) goto found_match; no_match: return UINT_MAX; found_match: state->offset = consumed + block_idx; return match_start; } static struct ts_config *fsm_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) { int i, err = -EINVAL; struct ts_config *conf; struct ts_fsm *fsm; struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern; unsigned int ntokens = len / sizeof(*tokens); size_t priv_size = sizeof(*fsm) + len; if (len % sizeof(struct ts_fsm_token) || ntokens < 1) goto errout; if (flags & TS_IGNORECASE) goto errout; for (i = 0; i < ntokens; i++) { struct ts_fsm_token *t = &tokens[i]; if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX) goto errout; if (t->recur == TS_FSM_HEAD_IGNORE && (i != 0 || i == (ntokens - 1))) goto errout; } conf = alloc_ts_config(priv_size, gfp_mask); if (IS_ERR(conf)) return conf; conf->flags = flags; fsm = ts_config_priv(conf); fsm->ntokens = ntokens; memcpy(fsm->tokens, pattern, len); for (i = 0; i < fsm->ntokens; i++) { struct ts_fsm_token *t = &fsm->tokens[i]; t->type = token_map[t->type]; } return conf; errout: return ERR_PTR(err); } static void *fsm_get_pattern(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->tokens; } static unsigned int fsm_get_pattern_len(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->ntokens * sizeof(struct ts_fsm_token); } static struct ts_ops fsm_ops = { .name = "fsm", .find = fsm_find, .init = fsm_init, .get_pattern = fsm_get_pattern, .get_pattern_len = fsm_get_pattern_len, .owner = THIS_MODULE, .list = LIST_HEAD_INIT(fsm_ops.list) }; static int __init init_fsm(void) { return textsearch_register(&fsm_ops); } static void __exit exit_fsm(void) { textsearch_unregister(&fsm_ops); } MODULE_LICENSE("GPL"); module_init(init_fsm); module_exit(exit_fsm);
linux-master
lib/ts_fsm.c
#include <linux/export.h> #include <linux/generic-radix-tree.h> #include <linux/gfp.h> #include <linux/kmemleak.h> #define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) struct genradix_node { union { /* Interior node: */ struct genradix_node *children[GENRADIX_ARY]; /* Leaf: */ u8 data[PAGE_SIZE]; }; }; static inline int genradix_depth_shift(unsigned depth) { return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth; } /* * Returns size (of data, in bytes) that a tree of a given depth holds: */ static inline size_t genradix_depth_size(unsigned depth) { return 1UL << genradix_depth_shift(depth); } /* depth that's needed for a genradix that can address up to ULONG_MAX: */ #define GENRADIX_MAX_DEPTH \ DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT) #define GENRADIX_DEPTH_MASK \ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) static inline unsigned genradix_root_to_depth(struct genradix_root *r) { return (unsigned long) r & GENRADIX_DEPTH_MASK; } static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r) { return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); } /* * Returns pointer to the specified byte @offset within @radix, or NULL if not * allocated */ void *__genradix_ptr(struct __genradix *radix, size_t offset) { struct genradix_root *r = READ_ONCE(radix->root); struct genradix_node *n = genradix_root_to_node(r); unsigned level = genradix_root_to_depth(r); if (ilog2(offset) >= genradix_depth_shift(level)) return NULL; while (1) { if (!n) return NULL; if (!level) break; level--; n = n->children[offset >> genradix_depth_shift(level)]; offset &= genradix_depth_size(level) - 1; } return &n->data[offset]; } EXPORT_SYMBOL(__genradix_ptr); static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) { struct genradix_node *node; node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); /* * We're using pages (not slab allocations) directly for kernel data * structures, so we need to explicitly inform kmemleak of them in order * to avoid false positive memory leak reports. */ kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); return node; } static inline void genradix_free_node(struct genradix_node *node) { kmemleak_free(node); free_page((unsigned long)node); } /* * Returns pointer to the specified byte @offset within @radix, allocating it if * necessary - newly allocated slots are always zeroed out: */ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset, gfp_t gfp_mask) { struct genradix_root *v = READ_ONCE(radix->root); struct genradix_node *n, *new_node = NULL; unsigned level; /* Increase tree depth if necessary: */ while (1) { struct genradix_root *r = v, *new_root; n = genradix_root_to_node(r); level = genradix_root_to_depth(r); if (n && ilog2(offset) < genradix_depth_shift(level)) break; if (!new_node) { new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } new_node->children[0] = n; new_root = ((struct genradix_root *) ((unsigned long) new_node | (n ? level + 1 : 0))); if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) { v = new_root; new_node = NULL; } } while (level--) { struct genradix_node **p = &n->children[offset >> genradix_depth_shift(level)]; offset &= genradix_depth_size(level) - 1; n = READ_ONCE(*p); if (!n) { if (!new_node) { new_node = genradix_alloc_node(gfp_mask); if (!new_node) return NULL; } if (!(n = cmpxchg_release(p, NULL, new_node))) swap(n, new_node); } } if (new_node) genradix_free_node(new_node); return &n->data[offset]; } EXPORT_SYMBOL(__genradix_ptr_alloc); void *__genradix_iter_peek(struct genradix_iter *iter, struct __genradix *radix, size_t objs_per_page) { struct genradix_root *r; struct genradix_node *n; unsigned level, i; restart: r = READ_ONCE(radix->root); if (!r) return NULL; n = genradix_root_to_node(r); level = genradix_root_to_depth(r); if (ilog2(iter->offset) >= genradix_depth_shift(level)) return NULL; while (level) { level--; i = (iter->offset >> genradix_depth_shift(level)) & (GENRADIX_ARY - 1); while (!n->children[i]) { i++; iter->offset = round_down(iter->offset + genradix_depth_size(level), genradix_depth_size(level)); iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; if (i == GENRADIX_ARY) goto restart; } n = n->children[i]; } return &n->data[iter->offset & (PAGE_SIZE - 1)]; } EXPORT_SYMBOL(__genradix_iter_peek); static void genradix_free_recurse(struct genradix_node *n, unsigned level) { if (level) { unsigned i; for (i = 0; i < GENRADIX_ARY; i++) if (n->children[i]) genradix_free_recurse(n->children[i], level - 1); } genradix_free_node(n); } int __genradix_prealloc(struct __genradix *radix, size_t size, gfp_t gfp_mask) { size_t offset; for (offset = 0; offset < size; offset += PAGE_SIZE) if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) return -ENOMEM; return 0; } EXPORT_SYMBOL(__genradix_prealloc); void __genradix_free(struct __genradix *radix) { struct genradix_root *r = xchg(&radix->root, NULL); genradix_free_recurse(genradix_root_to_node(r), genradix_root_to_depth(r)); } EXPORT_SYMBOL(__genradix_free);
linux-master
lib/generic-radix-tree.c
// SPDX-License-Identifier: GPL-2.0-or-later /* */ #include <linux/export.h> #include <linux/libgcc.h> long long notrace __ashrdi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { /* w.s.high = 1..1 or 0..0 */ w.s.high = uu.s.high >> 31; w.s.low = uu.s.high >> -bm; } else { const unsigned int carries = (unsigned int) uu.s.high << bm; w.s.high = uu.s.high >> b; w.s.low = ((unsigned int) uu.s.low >> b) | carries; } return w.ll; } EXPORT_SYMBOL(__ashrdi3);
linux-master
lib/ashrdi3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* lru_cache.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. Copyright (C) 2003-2008, Philipp Reisner <[email protected]>. Copyright (C) 2003-2008, Lars Ellenberg <[email protected]>. */ #include <linux/module.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/string.h> /* for memset */ #include <linux/seq_file.h> /* for seq_printf */ #include <linux/lru_cache.h> MODULE_AUTHOR("Philipp Reisner <[email protected]>, " "Lars Ellenberg <[email protected]>"); MODULE_DESCRIPTION("lru_cache - Track sets of hot objects"); MODULE_LICENSE("GPL"); /* this is developers aid only. * it catches concurrent access (lack of locking on the users part) */ #define PARANOIA_ENTRY() do { \ BUG_ON(!lc); \ BUG_ON(!lc->nr_elements); \ BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \ } while (0) #define RETURN(x...) do { \ clear_bit_unlock(__LC_PARANOIA, &lc->flags); \ return x ; } while (0) /* BUG() if e is not one of the elements tracked by lc */ #define PARANOIA_LC_ELEMENT(lc, e) do { \ struct lru_cache *lc_ = (lc); \ struct lc_element *e_ = (e); \ unsigned i = e_->lc_index; \ BUG_ON(i >= lc_->nr_elements); \ BUG_ON(lc_->lc_element[i] != e_); } while (0) /* We need to atomically * - try to grab the lock (set LC_LOCKED) * - only if there is no pending transaction * (neither LC_DIRTY nor LC_STARVING is set) * Because of PARANOIA_ENTRY() above abusing lc->flags as well, * it is not sufficient to just say * return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED); */ int lc_try_lock(struct lru_cache *lc) { unsigned long val; do { val = cmpxchg(&lc->flags, 0, LC_LOCKED); } while (unlikely (val == LC_PARANOIA)); /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */ return 0 == val; } /** * lc_create - prepares to track objects in an active set * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details * @cache: cache root pointer * @max_pending_changes: maximum changes to accumulate until a transaction is required * @e_count: number of elements allowed to be active simultaneously * @e_size: size of the tracked objects * @e_off: offset to the &struct lc_element member in a tracked object * * Returns a pointer to a newly initialized struct lru_cache on success, * or NULL on (allocation) failure. */ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, unsigned max_pending_changes, unsigned e_count, size_t e_size, size_t e_off) { struct hlist_head *slot = NULL; struct lc_element **element = NULL; struct lru_cache *lc; struct lc_element *e; unsigned cache_obj_size = kmem_cache_size(cache); unsigned i; WARN_ON(cache_obj_size < e_size); if (cache_obj_size < e_size) return NULL; /* e_count too big; would probably fail the allocation below anyways. * for typical use cases, e_count should be few thousand at most. */ if (e_count > LC_MAX_ACTIVE) return NULL; slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL); if (!slot) goto out_fail; element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL); if (!element) goto out_fail; lc = kzalloc(sizeof(*lc), GFP_KERNEL); if (!lc) goto out_fail; INIT_LIST_HEAD(&lc->in_use); INIT_LIST_HEAD(&lc->lru); INIT_LIST_HEAD(&lc->free); INIT_LIST_HEAD(&lc->to_be_changed); lc->name = name; lc->element_size = e_size; lc->element_off = e_off; lc->nr_elements = e_count; lc->max_pending_changes = max_pending_changes; lc->lc_cache = cache; lc->lc_element = element; lc->lc_slot = slot; /* preallocate all objects */ for (i = 0; i < e_count; i++) { void *p = kmem_cache_alloc(cache, GFP_KERNEL); if (!p) break; memset(p, 0, lc->element_size); e = p + e_off; e->lc_index = i; e->lc_number = LC_FREE; e->lc_new_number = LC_FREE; list_add(&e->list, &lc->free); element[i] = e; } if (i == e_count) return lc; /* else: could not allocate all elements, give up */ while (i) { void *p = element[--i]; kmem_cache_free(cache, p - e_off); } kfree(lc); out_fail: kfree(element); kfree(slot); return NULL; } static void lc_free_by_index(struct lru_cache *lc, unsigned i) { void *p = lc->lc_element[i]; WARN_ON(!p); if (p) { p -= lc->element_off; kmem_cache_free(lc->lc_cache, p); } } /** * lc_destroy - frees memory allocated by lc_create() * @lc: the lru cache to destroy */ void lc_destroy(struct lru_cache *lc) { unsigned i; if (!lc) return; for (i = 0; i < lc->nr_elements; i++) lc_free_by_index(lc, i); kfree(lc->lc_element); kfree(lc->lc_slot); kfree(lc); } /** * lc_reset - does a full reset for @lc and the hash table slots. * @lc: the lru cache to operate on * * It is roughly the equivalent of re-allocating a fresh lru_cache object, * basically a short cut to lc_destroy(lc); lc = lc_create(...); */ void lc_reset(struct lru_cache *lc) { unsigned i; INIT_LIST_HEAD(&lc->in_use); INIT_LIST_HEAD(&lc->lru); INIT_LIST_HEAD(&lc->free); INIT_LIST_HEAD(&lc->to_be_changed); lc->used = 0; lc->hits = 0; lc->misses = 0; lc->starving = 0; lc->locked = 0; lc->changed = 0; lc->pending_changes = 0; lc->flags = 0; memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements); for (i = 0; i < lc->nr_elements; i++) { struct lc_element *e = lc->lc_element[i]; void *p = e; p -= lc->element_off; memset(p, 0, lc->element_size); /* re-init it */ e->lc_index = i; e->lc_number = LC_FREE; e->lc_new_number = LC_FREE; list_add(&e->list, &lc->free); } } /** * lc_seq_printf_stats - print stats about @lc into @seq * @seq: the seq_file to print into * @lc: the lru cache to print statistics of */ void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc) { /* NOTE: * total calls to lc_get are * (starving + hits + misses) * misses include "locked" count (update from an other thread in * progress) and "changed", when this in fact lead to an successful * update of the cache. */ seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n", lc->name, lc->used, lc->nr_elements, lc->hits, lc->misses, lc->starving, lc->locked, lc->changed); } static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr) { return lc->lc_slot + (enr % lc->nr_elements); } static struct lc_element *__lc_find(struct lru_cache *lc, unsigned int enr, bool include_changing) { struct lc_element *e; BUG_ON(!lc); BUG_ON(!lc->nr_elements); hlist_for_each_entry(e, lc_hash_slot(lc, enr), colision) { /* "about to be changed" elements, pending transaction commit, * are hashed by their "new number". "Normal" elements have * lc_number == lc_new_number. */ if (e->lc_new_number != enr) continue; if (e->lc_new_number == e->lc_number || include_changing) return e; break; } return NULL; } /** * lc_find - find element by label, if present in the hash table * @lc: The lru_cache object * @enr: element number * * Returns the pointer to an element, if the element with the requested * "label" or element number is present in the hash table, * or NULL if not found. Does not change the refcnt. * Ignores elements that are "about to be used", i.e. not yet in the active * set, but still pending transaction commit. */ struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr) { return __lc_find(lc, enr, 0); } /** * lc_is_used - find element by label * @lc: The lru_cache object * @enr: element number * * Returns true, if the element with the requested "label" or element number is * present in the hash table, and is used (refcnt > 0). * Also finds elements that are not _currently_ used but only "about to be * used", i.e. on the "to_be_changed" list, pending transaction commit. */ bool lc_is_used(struct lru_cache *lc, unsigned int enr) { struct lc_element *e = __lc_find(lc, enr, 1); return e && e->refcnt; } /** * lc_del - removes an element from the cache * @lc: The lru_cache object * @e: The element to remove * * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list, * sets @e->enr to %LC_FREE. */ void lc_del(struct lru_cache *lc, struct lc_element *e) { PARANOIA_ENTRY(); PARANOIA_LC_ELEMENT(lc, e); BUG_ON(e->refcnt); e->lc_number = e->lc_new_number = LC_FREE; hlist_del_init(&e->colision); list_move(&e->list, &lc->free); RETURN(); } static struct lc_element *lc_prepare_for_change(struct lru_cache *lc, unsigned new_number) { struct list_head *n; struct lc_element *e; if (!list_empty(&lc->free)) n = lc->free.next; else if (!list_empty(&lc->lru)) n = lc->lru.prev; else return NULL; e = list_entry(n, struct lc_element, list); PARANOIA_LC_ELEMENT(lc, e); e->lc_new_number = new_number; if (!hlist_unhashed(&e->colision)) __hlist_del(&e->colision); hlist_add_head(&e->colision, lc_hash_slot(lc, new_number)); list_move(&e->list, &lc->to_be_changed); return e; } static int lc_unused_element_available(struct lru_cache *lc) { if (!list_empty(&lc->free)) return 1; /* something on the free list */ if (!list_empty(&lc->lru)) return 1; /* something to evict */ return 0; } /* used as internal flags to __lc_get */ enum { LC_GET_MAY_CHANGE = 1, LC_GET_MAY_USE_UNCOMMITTED = 2, }; static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags) { struct lc_element *e; PARANOIA_ENTRY(); if (test_bit(__LC_STARVING, &lc->flags)) { ++lc->starving; RETURN(NULL); } e = __lc_find(lc, enr, 1); /* if lc_new_number != lc_number, * this enr is currently being pulled in already, * and will be available once the pending transaction * has been committed. */ if (e) { if (e->lc_new_number != e->lc_number) { /* It has been found above, but on the "to_be_changed" * list, not yet committed. Don't pull it in twice, * wait for the transaction, then try again... */ if (!(flags & LC_GET_MAY_USE_UNCOMMITTED)) RETURN(NULL); /* ... unless the caller is aware of the implications, * probably preparing a cumulative transaction. */ ++e->refcnt; ++lc->hits; RETURN(e); } /* else: lc_new_number == lc_number; a real hit. */ ++lc->hits; if (e->refcnt++ == 0) lc->used++; list_move(&e->list, &lc->in_use); /* Not evictable... */ RETURN(e); } /* e == NULL */ ++lc->misses; if (!(flags & LC_GET_MAY_CHANGE)) RETURN(NULL); /* To avoid races with lc_try_lock(), first, mark us dirty * (using test_and_set_bit, as it implies memory barriers), ... */ test_and_set_bit(__LC_DIRTY, &lc->flags); /* ... only then check if it is locked anyways. If lc_unlock clears * the dirty bit again, that's not a problem, we will come here again. */ if (test_bit(__LC_LOCKED, &lc->flags)) { ++lc->locked; RETURN(NULL); } /* In case there is nothing available and we can not kick out * the LRU element, we have to wait ... */ if (!lc_unused_element_available(lc)) { set_bit(__LC_STARVING, &lc->flags); RETURN(NULL); } /* It was not present in the active set. We are going to recycle an * unused (or even "free") element, but we won't accumulate more than * max_pending_changes changes. */ if (lc->pending_changes >= lc->max_pending_changes) RETURN(NULL); e = lc_prepare_for_change(lc, enr); BUG_ON(!e); clear_bit(__LC_STARVING, &lc->flags); BUG_ON(++e->refcnt != 1); lc->used++; lc->pending_changes++; RETURN(e); } /** * lc_get - get element by label, maybe change the active set * @lc: the lru cache to operate on * @enr: the label to look up * * Finds an element in the cache, increases its usage count, * "touches" and returns it. * * In case the requested number is not present, it needs to be added to the * cache. Therefore it is possible that an other element becomes evicted from * the cache. In either case, the user is notified so he is able to e.g. keep * a persistent log of the cache changes, and therefore the objects in use. * * Return values: * NULL * The cache was marked %LC_STARVING, * or the requested label was not in the active set * and a changing transaction is still pending (@lc was marked %LC_DIRTY). * Or no unused or free element could be recycled (@lc will be marked as * %LC_STARVING, blocking further lc_get() operations). * * pointer to the element with the REQUESTED element number. * In this case, it can be used right away * * pointer to an UNUSED element with some different element number, * where that different number may also be %LC_FREE. * * In this case, the cache is marked %LC_DIRTY, * so lc_try_lock() will no longer succeed. * The returned element pointer is moved to the "to_be_changed" list, * and registered with the new element number on the hash collision chains, * so it is possible to pick it up from lc_is_used(). * Up to "max_pending_changes" (see lc_create()) can be accumulated. * The user now should do whatever housekeeping is necessary, * typically serialize on lc_try_lock_for_transaction(), then call * lc_committed(lc) and lc_unlock(), to finish the change. * * NOTE: The user needs to check the lc_number on EACH use, so he recognizes * any cache set change. */ struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) { return __lc_get(lc, enr, LC_GET_MAY_CHANGE); } /** * lc_get_cumulative - like lc_get; also finds to-be-changed elements * @lc: the lru cache to operate on * @enr: the label to look up * * Unlike lc_get this also returns the element for @enr, if it is belonging to * a pending transaction, so the return values are like for lc_get(), * plus: * * pointer to an element already on the "to_be_changed" list. * In this case, the cache was already marked %LC_DIRTY. * * Caller needs to make sure that the pending transaction is completed, * before proceeding to actually use this element. */ struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr) { return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED); } /** * lc_try_get - get element by label, if present; do not change the active set * @lc: the lru cache to operate on * @enr: the label to look up * * Finds an element in the cache, increases its usage count, * "touches" and returns it. * * Return values: * NULL * The cache was marked %LC_STARVING, * or the requested label was not in the active set * * pointer to the element with the REQUESTED element number. * In this case, it can be used right away */ struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr) { return __lc_get(lc, enr, 0); } /** * lc_committed - tell @lc that pending changes have been recorded * @lc: the lru cache to operate on * * User is expected to serialize on explicit lc_try_lock_for_transaction() * before the transaction is started, and later needs to lc_unlock() explicitly * as well. */ void lc_committed(struct lru_cache *lc) { struct lc_element *e, *tmp; PARANOIA_ENTRY(); list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) { /* count number of changes, not number of transactions */ ++lc->changed; e->lc_number = e->lc_new_number; list_move(&e->list, &lc->in_use); } lc->pending_changes = 0; RETURN(); } /** * lc_put - give up refcnt of @e * @lc: the lru cache to operate on * @e: the element to put * * If refcnt reaches zero, the element is moved to the lru list, * and a %LC_STARVING (if set) is cleared. * Returns the new (post-decrement) refcnt. */ unsigned int lc_put(struct lru_cache *lc, struct lc_element *e) { PARANOIA_ENTRY(); PARANOIA_LC_ELEMENT(lc, e); BUG_ON(e->refcnt == 0); BUG_ON(e->lc_number != e->lc_new_number); if (--e->refcnt == 0) { /* move it to the front of LRU. */ list_move(&e->list, &lc->lru); lc->used--; clear_bit_unlock(__LC_STARVING, &lc->flags); } RETURN(e->refcnt); } /** * lc_element_by_index * @lc: the lru cache to operate on * @i: the index of the element to return */ struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i) { BUG_ON(i >= lc->nr_elements); BUG_ON(lc->lc_element[i] == NULL); BUG_ON(lc->lc_element[i]->lc_index != i); return lc->lc_element[i]; } /** * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form. * @lc: the lru cache to operate on * @seq: the &struct seq_file pointer to seq_printf into * @utext: user supplied additional "heading" or other info * @detail: function pointer the user may provide to dump further details * of the object the lc_element is embedded in. May be NULL. * Note: a leading space ' ' and trailing newline '\n' is implied. */ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, void (*detail) (struct seq_file *, struct lc_element *)) { unsigned int nr_elements = lc->nr_elements; struct lc_element *e; int i; seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext); for (i = 0; i < nr_elements; i++) { e = lc_element_by_index(lc, i); if (e->lc_number != e->lc_new_number) seq_printf(seq, "\t%5d: %6d %8d %6d ", i, e->lc_number, e->lc_new_number, e->refcnt); else seq_printf(seq, "\t%5d: %6d %-8s %6d ", i, e->lc_number, "-\"-", e->refcnt); if (detail) detail(seq, e); seq_putc(seq, '\n'); } } EXPORT_SYMBOL(lc_create); EXPORT_SYMBOL(lc_reset); EXPORT_SYMBOL(lc_destroy); EXPORT_SYMBOL(lc_del); EXPORT_SYMBOL(lc_try_get); EXPORT_SYMBOL(lc_find); EXPORT_SYMBOL(lc_get); EXPORT_SYMBOL(lc_put); EXPORT_SYMBOL(lc_committed); EXPORT_SYMBOL(lc_element_by_index); EXPORT_SYMBOL(lc_seq_printf_stats); EXPORT_SYMBOL(lc_seq_dump_details); EXPORT_SYMBOL(lc_try_lock); EXPORT_SYMBOL(lc_is_used); EXPORT_SYMBOL(lc_get_cumulative);
linux-master
lib/lru_cache.c
// SPDX-License-Identifier: GPL-2.0 /* * Test cases for memcat_p() in lib/memcat_p.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/string.h> #include <linux/slab.h> #include <linux/module.h> struct test_struct { int num; unsigned int magic; }; #define MAGIC 0xf00ff00f /* Size of each of the NULL-terminated input arrays */ #define INPUT_MAX 128 /* Expected number of non-NULL elements in the output array */ #define EXPECT (INPUT_MAX * 2 - 2) static int __init test_memcat_p_init(void) { struct test_struct **in0, **in1, **out, **p; int err = -ENOMEM, i, r, total = 0; in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL); if (!in0) return err; in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL); if (!in1) goto err_free_in0; for (i = 0, r = 1; i < INPUT_MAX - 1; i++) { in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL); if (!in0[i]) goto err_free_elements; in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL); if (!in1[i]) { kfree(in0[i]); goto err_free_elements; } /* lifted from test_sort.c */ r = (r * 725861) % 6599; in0[i]->num = r; in1[i]->num = -r; in0[i]->magic = MAGIC; in1[i]->magic = MAGIC; } in0[i] = in1[i] = NULL; out = memcat_p(in0, in1); if (!out) goto err_free_all_elements; err = -EINVAL; for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) { total += (*p)->num; if ((*p)->magic != MAGIC) { pr_err("test failed: wrong magic at %d: %u\n", i, (*p)->magic); goto err_free_out; } } if (total) { pr_err("test failed: expected zero total, got %d\n", total); goto err_free_out; } if (i != EXPECT) { pr_err("test failed: expected output size %d, got %d\n", EXPECT, i); goto err_free_out; } for (i = 0; i < INPUT_MAX - 1; i++) if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) { pr_err("test failed: wrong element order at %d\n", i); goto err_free_out; } err = 0; pr_info("test passed\n"); err_free_out: kfree(out); err_free_all_elements: i = INPUT_MAX; err_free_elements: for (i--; i >= 0; i--) { kfree(in1[i]); kfree(in0[i]); } kfree(in1); err_free_in0: kfree(in0); return err; } static void __exit test_memcat_p_exit(void) { } module_init(test_memcat_p_init); module_exit(test_memcat_p_exit); MODULE_LICENSE("GPL");
linux-master
lib/test_memcat_p.c
// SPDX-License-Identifier: GPL-2.0-only /* * lib/parser.c - simple parser for mount, etc. options. */ #include <linux/ctype.h> #include <linux/types.h> #include <linux/export.h> #include <linux/kstrtox.h> #include <linux/parser.h> #include <linux/slab.h> #include <linux/string.h> /* * max size needed by different bases to express U64 * HEX: "0xFFFFFFFFFFFFFFFF" --> 18 * DEC: "18446744073709551615" --> 20 * OCT: "01777777777777777777777" --> 23 * pick the max one to define NUMBER_BUF_LEN */ #define NUMBER_BUF_LEN 24 /** * match_one - Determines if a string matches a simple pattern * @s: the string to examine for presence of the pattern * @p: the string containing the pattern * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Determines if the pattern @p is present in string @s. Can only * match extremely simple token=arg style patterns. If the pattern is found, * the location(s) of the arguments will be returned in the @args array. */ static int match_one(char *s, const char *p, substring_t args[]) { char *meta; int argc = 0; if (!p) return 1; while(1) { int len = -1; meta = strchr(p, '%'); if (!meta) return strcmp(p, s) == 0; if (strncmp(p, s, meta-p)) return 0; s += meta - p; p = meta + 1; if (isdigit(*p)) len = simple_strtoul(p, (char **) &p, 10); else if (*p == '%') { if (*s++ != '%') return 0; p++; continue; } if (argc >= MAX_OPT_ARGS) return 0; args[argc].from = s; switch (*p++) { case 's': { size_t str_len = strlen(s); if (str_len == 0) return 0; if (len == -1 || len > str_len) len = str_len; args[argc].to = s + len; break; } case 'd': simple_strtol(s, &args[argc].to, 0); goto num; case 'u': simple_strtoul(s, &args[argc].to, 0); goto num; case 'o': simple_strtoul(s, &args[argc].to, 8); goto num; case 'x': simple_strtoul(s, &args[argc].to, 16); num: if (args[argc].to == args[argc].from) return 0; break; default: return 0; } s = args[argc].to; argc++; } } /** * match_token - Find a token (and optional args) in a string * @s: the string to examine for token/argument pairs * @table: match_table_t describing the set of allowed option tokens and the * arguments that may be associated with them. Must be terminated with a * &struct match_token whose pattern is set to the NULL pointer. * @args: array of %MAX_OPT_ARGS &substring_t elements. Used to return match * locations. * * Description: Detects which if any of a set of token strings has been passed * to it. Tokens can include up to %MAX_OPT_ARGS instances of basic c-style * format identifiers which will be taken into account when matching the * tokens, and whose locations will be returned in the @args array. */ int match_token(char *s, const match_table_t table, substring_t args[]) { const struct match_token *p; for (p = table; !match_one(s, p->pattern, args) ; p++) ; return p->token; } EXPORT_SYMBOL(match_token); /** * match_number - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting integer on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_number(substring_t *s, int *result, int base) { char *endp; char buf[NUMBER_BUF_LEN]; int ret; long val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = 0; val = simple_strtol(buf, &endp, base); if (endp == buf) ret = -EINVAL; else if (val < (long)INT_MIN || val > (long)INT_MAX) ret = -ERANGE; else *result = (int) val; return ret; } /** * match_u64int - scan a number in the given base from a substring_t * @s: substring to be scanned * @result: resulting u64 on success * @base: base to use when converting string * * Description: Given a &substring_t and a base, attempts to parse the substring * as a number in that base. * * Return: On success, sets @result to the integer represented by the * string and returns 0. Returns -EINVAL or -ERANGE on failure. */ static int match_u64int(substring_t *s, u64 *result, int base) { char buf[NUMBER_BUF_LEN]; int ret; u64 val; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; ret = kstrtoull(buf, base, &val); if (!ret) *result = val; return ret; } /** * match_int - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_int(substring_t *s, int *result) { return match_number(s, result, 0); } EXPORT_SYMBOL(match_int); /** * match_uint - scan a decimal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a decimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_uint(substring_t *s, unsigned int *result) { char buf[NUMBER_BUF_LEN]; if (match_strlcpy(buf, s, NUMBER_BUF_LEN) >= NUMBER_BUF_LEN) return -ERANGE; return kstrtouint(buf, 10, result); } EXPORT_SYMBOL(match_uint); /** * match_u64 - scan a decimal representation of a u64 from * a substring_t * @s: substring_t to be scanned * @result: resulting unsigned long long on success * * Description: Attempts to parse the &substring_t @s as a long decimal * integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_u64(substring_t *s, u64 *result) { return match_u64int(s, result, 0); } EXPORT_SYMBOL(match_u64); /** * match_octal - scan an octal representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as an octal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_octal(substring_t *s, int *result) { return match_number(s, result, 8); } EXPORT_SYMBOL(match_octal); /** * match_hex - scan a hex representation of an integer from a substring_t * @s: substring_t to be scanned * @result: resulting integer on success * * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. * * Return: On success, sets @result to the integer represented by the string * and returns 0. Returns -EINVAL or -ERANGE on failure. */ int match_hex(substring_t *s, int *result) { return match_number(s, result, 16); } EXPORT_SYMBOL(match_hex); /** * match_wildcard - parse if a string matches given wildcard pattern * @pattern: wildcard pattern * @str: the string to be parsed * * Description: Parse the string @str to check if matches wildcard * pattern @pattern. The pattern may contain two types of wildcards: * '*' - matches zero or more characters * '?' - matches one character * * Return: If the @str matches the @pattern, return true, else return false. */ bool match_wildcard(const char *pattern, const char *str) { const char *s = str; const char *p = pattern; bool star = false; while (*s) { switch (*p) { case '?': s++; p++; break; case '*': star = true; str = s; if (!*++p) return true; pattern = p; break; default: if (*s == *p) { s++; p++; } else { if (!star) return false; str++; s = str; p = pattern; } break; } } if (*p == '*') ++p; return !*p; } EXPORT_SYMBOL(match_wildcard); /** * match_strlcpy - Copy the characters from a substring_t to a sized buffer * @dest: where to copy to * @src: &substring_t to copy * @size: size of destination buffer * * Description: Copy the characters in &substring_t @src to the * c-style string @dest. Copy no more than @size - 1 characters, plus * the terminating NUL. * * Return: length of @src. */ size_t match_strlcpy(char *dest, const substring_t *src, size_t size) { size_t ret = src->to - src->from; if (size) { size_t len = ret >= size ? size - 1 : ret; memcpy(dest, src->from, len); dest[len] = '\0'; } return ret; } EXPORT_SYMBOL(match_strlcpy); /** * match_strdup - allocate a new string with the contents of a substring_t * @s: &substring_t to copy * * Description: Allocates and returns a string filled with the contents of * the &substring_t @s. The caller is responsible for freeing the returned * string with kfree(). * * Return: the address of the newly allocated NUL-terminated string or * %NULL on error. */ char *match_strdup(const substring_t *s) { return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL); } EXPORT_SYMBOL(match_strdup);
linux-master
lib/parser.c
/* * Test cases for lib/hexdump.c module. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/random.h> #include <linux/string.h> static const unsigned char data_b[] = { '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */ '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */ '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */ '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */ }; static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C..."; static const char * const test_data_1[] __initconst = { "be", "32", "db", "7b", "0a", "18", "93", "b2", "70", "ba", "c4", "24", "7d", "83", "34", "9b", "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9", "4c", "d1", "19", "99", "43", "b1", "af", "0c", }; static const char * const test_data_2_le[] __initconst = { "32be", "7bdb", "180a", "b293", "ba70", "24c4", "837d", "9b34", "9ca6", "ad31", "0f9c", "e9ac", "d14c", "9919", "b143", "0caf", }; static const char * const test_data_2_be[] __initconst = { "be32", "db7b", "0a18", "93b2", "70ba", "c424", "7d83", "349b", "a69c", "31ad", "9c0f", "ace9", "4cd1", "1999", "43b1", "af0c", }; static const char * const test_data_4_le[] __initconst = { "7bdb32be", "b293180a", "24c4ba70", "9b34837d", "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143", }; static const char * const test_data_4_be[] __initconst = { "be32db7b", "0a1893b2", "70bac424", "7d83349b", "a69c31ad", "9c0face9", "4cd11999", "43b1af0c", }; static const char * const test_data_8_le[] __initconst = { "b293180a7bdb32be", "9b34837d24c4ba70", "e9ac0f9cad319ca6", "0cafb1439919d14c", }; static const char * const test_data_8_be[] __initconst = { "be32db7b0a1893b2", "70bac4247d83349b", "a69c31ad9c0face9", "4cd1199943b1af0c", }; #define FILL_CHAR '#' static unsigned total_tests __initdata; static unsigned failed_tests __initdata; static void __init test_hexdump_prepare_test(size_t len, int rowsize, int groupsize, char *test, size_t testlen, bool ascii) { char *p; const char * const *result; size_t l = len; int gs = groupsize, rs = rowsize; unsigned int i; const bool is_be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); if (rs != 16 && rs != 32) rs = 16; if (l > rs) l = rs; if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0)) gs = 1; if (gs == 8) result = is_be ? test_data_8_be : test_data_8_le; else if (gs == 4) result = is_be ? test_data_4_be : test_data_4_le; else if (gs == 2) result = is_be ? test_data_2_be : test_data_2_le; else result = test_data_1; /* hex dump */ p = test; for (i = 0; i < l / gs; i++) { const char *q = *result++; size_t amount = strlen(q); memcpy(p, q, amount); p += amount; *p++ = ' '; } if (i) p--; /* ASCII part */ if (ascii) { do { *p++ = ' '; } while (p < test + rs * 2 + rs / gs + 1); strncpy(p, data_a, l); p += l; } *p = '\0'; } #define TEST_HEXDUMP_BUF_SIZE (32 * 3 + 2 + 32 + 1) static void __init test_hexdump(size_t len, int rowsize, int groupsize, bool ascii) { char test[TEST_HEXDUMP_BUF_SIZE]; char real[TEST_HEXDUMP_BUF_SIZE]; total_tests++; memset(real, FILL_CHAR, sizeof(real)); hex_dump_to_buffer(data_b, len, rowsize, groupsize, real, sizeof(real), ascii); memset(test, FILL_CHAR, sizeof(test)); test_hexdump_prepare_test(len, rowsize, groupsize, test, sizeof(test), ascii); if (memcmp(test, real, TEST_HEXDUMP_BUF_SIZE)) { pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); pr_err("Result: '%s'\n", real); pr_err("Expect: '%s'\n", test); failed_tests++; } } static void __init test_hexdump_set(int rowsize, bool ascii) { size_t d = min_t(size_t, sizeof(data_b), rowsize); size_t len = get_random_u32_inclusive(1, d); test_hexdump(len, rowsize, 4, ascii); test_hexdump(len, rowsize, 2, ascii); test_hexdump(len, rowsize, 8, ascii); test_hexdump(len, rowsize, 1, ascii); } static void __init test_hexdump_overflow(size_t buflen, size_t len, int rowsize, int groupsize, bool ascii) { char test[TEST_HEXDUMP_BUF_SIZE]; char buf[TEST_HEXDUMP_BUF_SIZE]; int rs = rowsize, gs = groupsize; int ae, he, e, f, r; bool a; total_tests++; memset(buf, FILL_CHAR, sizeof(buf)); r = hex_dump_to_buffer(data_b, len, rs, gs, buf, buflen, ascii); /* * Caller must provide the data length multiple of groupsize. The * calculations below are made with that assumption in mind. */ ae = rs * 2 /* hex */ + rs / gs /* spaces */ + 1 /* space */ + len /* ascii */; he = (gs * 2 /* hex */ + 1 /* space */) * len / gs - 1 /* no trailing space */; if (ascii) e = ae; else e = he; f = min_t(int, e + 1, buflen); if (buflen) { test_hexdump_prepare_test(len, rs, gs, test, sizeof(test), ascii); test[f - 1] = '\0'; } memset(test + f, FILL_CHAR, sizeof(test) - f); a = r == e && !memcmp(test, buf, TEST_HEXDUMP_BUF_SIZE); buf[sizeof(buf) - 1] = '\0'; if (!a) { pr_err("Len: %zu buflen: %zu strlen: %zu\n", len, buflen, strnlen(buf, sizeof(buf))); pr_err("Result: %d '%s'\n", r, buf); pr_err("Expect: %d '%s'\n", e, test); failed_tests++; } } static void __init test_hexdump_overflow_set(size_t buflen, bool ascii) { unsigned int i = 0; int rs = get_random_u32_inclusive(1, 2) * 16; do { int gs = 1 << i; size_t len = get_random_u32_below(rs) + gs; test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii); } while (i++ < 3); } static int __init test_hexdump_init(void) { unsigned int i; int rowsize; rowsize = get_random_u32_inclusive(1, 2) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, false); rowsize = get_random_u32_inclusive(1, 2) * 16; for (i = 0; i < 16; i++) test_hexdump_set(rowsize, true); for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) test_hexdump_overflow_set(i, false); for (i = 0; i <= TEST_HEXDUMP_BUF_SIZE; i++) test_hexdump_overflow_set(i, true); if (failed_tests == 0) pr_info("all %u tests passed\n", total_tests); else pr_err("failed %u out of %u tests\n", failed_tests, total_tests); return failed_tests ? -EINVAL : 0; } module_init(test_hexdump_init); static void __exit test_hexdump_exit(void) { /* do nothing */ } module_exit(test_hexdump_exit); MODULE_AUTHOR("Andy Shevchenko <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
lib/test_hexdump.c
// SPDX-License-Identifier: GPL-2.0 /* * lib/minmax.c: windowed min/max tracker * * Kathleen Nichols' algorithm for tracking the minimum (or maximum) * value of a data stream over some fixed time interval. (E.g., * the minimum RTT over the past five minutes.) It uses constant * space and constant time per update yet almost always delivers * the same minimum as an implementation that has to keep all the * data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of * the n'th best >= n-1'th best. It also makes sure that the three * values are widely separated in the time window since that bounds * the worse case error when that data is monotonically increasing * over the window. * * Upon getting a new min, we can forget everything earlier because * it has no value - the new min is <= everything else in the window * by definition and it's the most recent. So we restart fresh on * every new min and overwrites 2nd & 3rd choices. The same property * holds for 2nd & 3rd best. */ #include <linux/module.h> #include <linux/win_minmax.h> /* As time advances, update the 1st, 2nd, and 3rd choices. */ static u32 minmax_subwin_update(struct minmax *m, u32 win, const struct minmax_sample *val) { u32 dt = val->t - m->s[0].t; if (unlikely(dt > win)) { /* * Passed entire window without a new val so make 2nd * choice the new val & 3rd choice the new 2nd choice. * we may have to iterate this since our 2nd choice * may also be outside the window (we checked on entry * that the third choice was in the window). */ m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; if (unlikely(val->t - m->s[0].t > win)) { m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; } } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { /* * We've passed a quarter of the window without a new val * so take a 2nd choice from the 2nd quarter of the window. */ m->s[2] = m->s[1] = *val; } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { /* * We've passed half the window without finding a new val * so take a 3rd choice from the last half of the window */ m->s[2] = *val; } return m->s[0].v; } /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v >= m->s[0].v) || /* found new max? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v >= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v >= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_max); /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v <= m->s[0].v) || /* found new min? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v <= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v <= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); }
linux-master
lib/win_minmax.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> typedef void(*test_ubsan_fp)(void); #define UBSAN_TEST(config, ...) do { \ pr_info("%s " __VA_ARGS__ "%s(%s=%s)\n", __func__, \ sizeof(" " __VA_ARGS__) > 2 ? " " : "", \ #config, IS_ENABLED(config) ? "y" : "n"); \ } while (0) static void test_ubsan_divrem_overflow(void) { volatile int val = 16; volatile int val2 = 0; UBSAN_TEST(CONFIG_UBSAN_DIV_ZERO); val /= val2; } static void test_ubsan_shift_out_of_bounds(void) { volatile int neg = -1, wrap = 4; int val1 = 10; int val2 = INT_MAX; UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent"); val1 <<= neg; UBSAN_TEST(CONFIG_UBSAN_SHIFT, "left overflow"); val2 <<= wrap; } static void test_ubsan_out_of_bounds(void) { volatile int i = 4, j = 5, k = -1; volatile char above[4] = { }; /* Protect surrounding memory. */ volatile int arr[4]; volatile char below[4] = { }; /* Protect surrounding memory. */ above[0] = below[0]; UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above"); arr[j] = i; UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below"); arr[k] = i; } enum ubsan_test_enum { UBSAN_TEST_ZERO = 0, UBSAN_TEST_ONE, UBSAN_TEST_MAX, }; static void test_ubsan_load_invalid_value(void) { volatile char *dst, *src; bool val, val2, *ptr; enum ubsan_test_enum eval, eval2, *eptr; unsigned char c = 0xff; UBSAN_TEST(CONFIG_UBSAN_BOOL, "bool"); dst = (char *)&val; src = &c; *dst = *src; ptr = &val2; val2 = val; UBSAN_TEST(CONFIG_UBSAN_ENUM, "enum"); dst = (char *)&eval; src = &c; *dst = *src; eptr = &eval2; eval2 = eval; } static void test_ubsan_misaligned_access(void) { volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5}; volatile int *ptr, val = 6; UBSAN_TEST(CONFIG_UBSAN_ALIGNMENT); ptr = (int *)(arr + 1); *ptr = val; } static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_shift_out_of_bounds, test_ubsan_out_of_bounds, test_ubsan_load_invalid_value, test_ubsan_misaligned_access, }; /* Excluded because they Oops the module. */ static const test_ubsan_fp skip_ubsan_array[] = { test_ubsan_divrem_overflow, }; static int __init test_ubsan_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++) test_ubsan_array[i](); return 0; } module_init(test_ubsan_init); static void __exit test_ubsan_exit(void) { /* do nothing */ } module_exit(test_ubsan_exit); MODULE_AUTHOR("Jinbum Park <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
lib/test_ubsan.c
// SPDX-License-Identifier: GPL-2.0+ /* * test_xarray.c: Test the XArray API * Copyright (c) 2017-2018 Microsoft Corporation * Copyright (c) 2019-2020 Oracle * Author: Matthew Wilcox <[email protected]> */ #include <linux/xarray.h> #include <linux/module.h> static unsigned int tests_run; static unsigned int tests_passed; static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; #ifndef XA_DEBUG # ifdef __KERNEL__ void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON #define XA_BUG_ON(xa, x) do { \ tests_run++; \ if (x) { \ printk("BUG at %s:%d\n", __func__, __LINE__); \ xa_dump(xa); \ dump_stack(); \ } else { \ tests_passed++; \ } \ } while (0) #endif static void *xa_mk_index(unsigned long index) { return xa_mk_value(index & LONG_MAX); } static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) { return xa_store(xa, index, xa_mk_index(index), gfp); } static void xa_insert_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, gfp) != 0); XA_BUG_ON(xa, id != index); } static void xa_erase_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); } /* * If anyone needs this, please move it to xarray.c. We have no current * users outside the test suite because all current multislot users want * to use the advanced API. */ static void *xa_store_order(struct xarray *xa, unsigned long index, unsigned order, void *entry, gfp_t gfp) { XA_STATE_ORDER(xas, xa, index, order); void *curr; do { xas_lock(&xas); curr = xas_store(&xas, entry); xas_unlock(&xas); } while (xas_nomem(&xas, gfp)); return curr; } static noinline void check_xa_err(struct xarray *xa) { XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ /* The kernel does not fail GFP_NOWAIT allocations */ XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); #endif XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); // kills the test-suite :-( // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } static noinline void check_xas_retry(struct xarray *xa) { XA_STATE(xas, xa, 0); void *entry; xa_store_index(xa, 0, GFP_KERNEL); xa_store_index(xa, 1, GFP_KERNEL); rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); xas_reset(&xas); XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_node != NULL); rcu_read_unlock(); XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); rcu_read_lock(); XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); xas.xa_node = XAS_RESTART; XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); rcu_read_unlock(); /* Make sure we can iterate through retry entries */ xas_lock(&xas); xas_set(&xas, 0); xas_store(&xas, XA_RETRY_ENTRY); xas_set(&xas, 1); xas_store(&xas, XA_RETRY_ENTRY); xas_set(&xas, 0); xas_for_each(&xas, entry, ULONG_MAX) { xas_store(&xas, xa_mk_index(xas.xa_index)); } xas_unlock(&xas); xa_erase_index(xa, 0); xa_erase_index(xa, 1); } static noinline void check_xa_load(struct xarray *xa) { unsigned long i, j; for (i = 0; i < 1024; i++) { for (j = 0; j < 1024; j++) { void *entry = xa_load(xa, j); if (j < i) XA_BUG_ON(xa, xa_to_value(entry) != j); else XA_BUG_ON(xa, entry); } XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); } for (i = 0; i < 1024; i++) { for (j = 0; j < 1024; j++) { void *entry = xa_load(xa, j); if (j >= i) XA_BUG_ON(xa, xa_to_value(entry) != j); else XA_BUG_ON(xa, entry); } xa_erase_index(xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; /* NULL elements have no marks set */ XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); /* Storing a pointer will not make a mark appear */ XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); /* Setting one mark will not set another mark */ XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); /* * Storing a multi-index entry over entries with marks gives the * entire entry the union of the marks */ BUG_ON((index % 4) != 0); for (order = 2; order < max_order; order++) { unsigned long base = round_down(index, 1UL << order); unsigned long next = base + (1UL << order); unsigned long i; XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); xa_set_mark(xa, index + 1, XA_MARK_0); XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); xa_set_mark(xa, index + 2, XA_MARK_2); XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); for (i = base; i < next; i++) { XA_STATE(xas, xa, i); unsigned int seen = 0; void *entry; XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); /* We should see two elements in the array */ rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) seen++; rcu_read_unlock(); XA_BUG_ON(xa, seen != 2); /* One of which is marked */ xas_set(&xas, 0); seen = 0; rcu_read_lock(); xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) seen++; rcu_read_unlock(); XA_BUG_ON(xa, seen != 1); } XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); xa_erase_index(xa, index); xa_erase_index(xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_xa_mark_2(struct xarray *xa) { XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; void *entry; xa_store_index(xa, 0, GFP_KERNEL); xa_set_mark(xa, 0, XA_MARK_0); xas_lock(&xas); xas_load(&xas); xas_init_marks(&xas); xas_unlock(&xas); XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); for (index = 3500; index < 4500; index++) { xa_store_index(xa, index, GFP_KERNEL); xa_set_mark(xa, index, XA_MARK_0); } xas_reset(&xas); rcu_read_lock(); xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) count++; rcu_read_unlock(); XA_BUG_ON(xa, count != 1000); xas_lock(&xas); xas_for_each(&xas, entry, ULONG_MAX) { xas_init_marks(&xas); XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); } xas_unlock(&xas); xa_destroy(xa); } static noinline void check_xa_mark_3(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI XA_STATE(xas, xa, 0x41); void *entry; int count = 0; xa_store_order(xa, 0x40, 2, xa_mk_index(0x40), GFP_KERNEL); xa_set_mark(xa, 0x41, XA_MARK_0); rcu_read_lock(); xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) { count++; XA_BUG_ON(xa, entry != xa_mk_index(0x40)); } XA_BUG_ON(xa, count != 1); rcu_read_unlock(); xa_destroy(xa); #endif } static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; for (index = 0; index < 16384; index += 4) check_xa_mark_1(xa, index); check_xa_mark_2(xa); check_xa_mark_3(xa); } static noinline void check_xa_shrink(struct xarray *xa) { XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); /* * Check that erasing the entry at 1 shrinks the tree and properly * marks the node as being deleted. */ xas_lock(&xas); XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); node = xas.xa_node; XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); XA_BUG_ON(xa, xa_load(xa, 1) != NULL); XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { unsigned long max = (1UL << order) - 1; xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); rcu_read_lock(); node = xa_head(xa); rcu_read_unlock(); XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != NULL); rcu_read_lock(); XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); xa_erase_index(xa, 0); } } static noinline void check_insert(struct xarray *xa) { unsigned long i; for (i = 0; i < 1024; i++) { xa_insert_index(xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); xa_erase_index(xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { xa_insert_index(xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); xa_erase_index(xa, 1UL << i); xa_insert_index(xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); xa_erase_index(xa, (1UL << i) - 1); } xa_insert_index(xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); xa_erase_index(xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_cmpxchg(struct xarray *xa) { void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); xa_erase_index(xa, 12345678); xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_reserve(struct xarray *xa) { void *entry; unsigned long index; int count; /* An array with a reserved entry is not empty */ XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_load(xa, 12345678)); xa_release(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* Releasing a used entry does nothing */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != -EBUSY); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); /* Can iterate through a reserved entry */ xa_store_index(xa, 5, GFP_KERNEL); XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0); xa_store_index(xa, 7, GFP_KERNEL); count = 0; xa_for_each(xa, index, entry) { XA_BUG_ON(xa, index != 5 && index != 7); count++; } XA_BUG_ON(xa, count != 2); /* If we free a reserved entry, we should be able to allocate it */ if (xa->xa_flags & XA_FLAGS_ALLOC) { u32 id; XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8), XA_LIMIT(5, 10), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 8); xa_release(xa, 6); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6), XA_LIMIT(5, 10), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 6); } xa_destroy(xa); } static noinline void check_xas_erase(struct xarray *xa) { XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; for (i = 0; i < 200; i++) { for (j = i; j < 2 * i + 17; j++) { xas_set(&xas, j); do { xas_lock(&xas); xas_store(&xas, xa_mk_index(j)); xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); } xas_set(&xas, ULONG_MAX); do { xas_lock(&xas); xas_store(&xas, xa_mk_value(0)); xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); xas_lock(&xas); xas_store(&xas, NULL); xas_set(&xas, 0); j = i; xas_for_each(&xas, entry, ULONG_MAX) { XA_BUG_ON(xa, entry != xa_mk_index(j)); xas_store(&xas, NULL); j++; } xas_unlock(&xas); XA_BUG_ON(xa, !xa_empty(xa)); } } #ifdef CONFIG_XARRAY_MULTI static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, unsigned int order) { XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); xa_erase_index(xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, unsigned int order) { XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); xas_lock(&xas); XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_index != index); XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); xas_unlock(&xas); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, unsigned int order) { XA_STATE(xas, xa, 0); void *entry; int n = 0; xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); xas_lock(&xas); xas_for_each(&xas, entry, ULONG_MAX) { XA_BUG_ON(xa, entry != xa_mk_index(index)); n++; } XA_BUG_ON(xa, n != 1); xas_set(&xas, index + 1); xas_for_each(&xas, entry, ULONG_MAX) { XA_BUG_ON(xa, entry != xa_mk_index(index)); n++; } XA_BUG_ON(xa, n != 2); xas_unlock(&xas); xa_destroy(xa); } #endif static noinline void check_multi_store(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; /* Loading from any position returns the same value */ xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); XA_BUG_ON(xa, xa_load(xa, 2) != NULL); rcu_read_lock(); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); rcu_read_unlock(); /* Storing adjacent to the value does not alter the value */ xa_store(xa, 3, xa, GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); XA_BUG_ON(xa, xa_load(xa, 2) != NULL); rcu_read_lock(); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); rcu_read_unlock(); /* Overwriting multiple indexes works */ xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); XA_BUG_ON(xa, xa_load(xa, 4) != NULL); rcu_read_lock(); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); rcu_read_unlock(); /* We can erase multiple values with a single store */ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); XA_BUG_ON(xa, !xa_empty(xa)); /* Even when the first slot is empty but the others aren't */ xa_store_index(xa, 1, GFP_KERNEL); xa_store_index(xa, 2, GFP_KERNEL); xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0; i < max_order; i++) { for (j = 0; j < max_order; j++) { xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); for (k = 0; k < max_order; k++) { void *entry = xa_load(xa, (1UL << k) - 1); if ((i < k) && (j < k)) XA_BUG_ON(xa, entry != NULL); else XA_BUG_ON(xa, entry != xa_mk_index(j)); } xa_erase(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } } for (i = 0; i < 20; i++) { check_multi_store_1(xa, 200, i); check_multi_store_1(xa, 0, i); check_multi_store_1(xa, (1UL << i) + 1, i); } check_multi_store_2(xa, 4095, 9); for (i = 1; i < 20; i++) { check_multi_store_3(xa, 0, i); check_multi_store_3(xa, 1UL << i, i); } #endif } static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ xa_alloc_index(xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ xa_erase_index(xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ xa_alloc_index(xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) xa_alloc_index(xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) xa_erase_index(xa, i); xa_alloc_index(xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ xa_alloc_index(xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ xa_alloc_index(xa, base + 1, GFP_KERNEL); xa_erase_index(xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); xa_alloc_index(xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ xa_erase_index(xa, base); xa_alloc_index(xa, base, GFP_KERNEL); xa_erase_index(xa, base + 1); xa_erase_index(xa, base + 2); for (i = 1; i < 5000; i++) { xa_alloc_index(xa, base + i, GFP_KERNEL); } xa_destroy(xa); /* Check that we fail properly at the limit of allocation */ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xfffffffeU); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0xffffffffU); id = 3; XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), XA_LIMIT(UINT_MAX - 1, UINT_MAX), GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, id != 3); xa_destroy(xa); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); xa_erase_index(xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; void *entry; /* Allocate and free a NULL and check xa_empty() behaves */ XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != base); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_erase(xa, id) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); /* Ditto, but check destroy instead of erase */ XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != base); XA_BUG_ON(xa, xa_empty(xa)); xa_destroy(xa); XA_BUG_ON(xa, !xa_empty(xa)); for (i = base; i < base + 10; i++) { XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != i); } XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { xa_erase_index(xa, index); } for (i = base; i < base + 9; i++) { XA_BUG_ON(xa, xa_erase(xa, i) != NULL); XA_BUG_ON(xa, xa_empty(xa)); } XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); XA_BUG_ON(xa, xa_empty(xa)); XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); XA_BUG_ON(xa, !xa_empty(xa)); xa_destroy(xa); } static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; unsigned int i, id; unsigned long index; void *entry; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 1); next = 0x3ffd; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); xa_erase_index(xa, 0x3ffd); xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { if (i < 0x4000) entry = xa_mk_index(i); else entry = xa_mk_index(i - 0x3fff); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, &next, GFP_KERNEL) != (id == 1)); XA_BUG_ON(xa, xa_mk_index(id) != entry); } /* Check wrap-around is handled correctly */ if (base != 0) xa_erase_index(xa, base); xa_erase_index(xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != UINT_MAX); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), xa_limit_32b, &next, GFP_KERNEL) != 1); XA_BUG_ON(xa, id != base); XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), xa_limit_32b, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); static noinline void check_xa_alloc(void) { check_xa_alloc_1(&xa0, 0); check_xa_alloc_1(&xa1, 1); check_xa_alloc_2(&xa0, 0); check_xa_alloc_2(&xa1, 1); check_xa_alloc_3(&xa0, 0); check_xa_alloc_3(&xa1, 1); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, unsigned int order, unsigned int present) { XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; retry: xas_lock(&xas); xas_for_each_conflict(&xas, entry) { XA_BUG_ON(xa, !xa_is_value(entry)); XA_BUG_ON(xa, entry < xa_mk_index(start)); XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); count++; } xas_store(&xas, xa_mk_index(start)); xas_unlock(&xas); if (xas_nomem(&xas, GFP_KERNEL)) { count = 0; goto retry; } XA_BUG_ON(xa, xas_error(&xas)); XA_BUG_ON(xa, count != present); XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); xa_erase_index(xa, start); } static noinline void check_store_iter(struct xarray *xa) { unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; __check_store_iter(xa, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); __check_store_iter(xa, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); __check_store_iter(xa, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); __check_store_iter(xa, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); __check_store_iter(xa, 64, 2, 1); xa_erase_index(xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_multi_find_1(struct xarray *xa, unsigned order) { #ifdef CONFIG_XARRAY_MULTI unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL); XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL); index = 0; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != xa_mk_value(multi)); XA_BUG_ON(xa, index != multi); index = multi + 1; XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != xa_mk_value(multi)); XA_BUG_ON(xa, (index < multi) || (index >= next)); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != xa_mk_value(next)); XA_BUG_ON(xa, index != next); XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); xa_erase_index(xa, multi); xa_erase_index(xa, next); xa_erase_index(xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } static noinline void check_multi_find_2(struct xarray *xa) { unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; for (i = 0; i < max_order; i++) { unsigned long index = 1UL << i; for (j = 0; j < index; j++) { XA_STATE(xas, xa, j + index); xa_store_index(xa, index - 1, GFP_KERNEL); xa_store_order(xa, index, i, xa_mk_index(index), GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { xa_erase_index(xa, index); } rcu_read_unlock(); xa_erase_index(xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } static noinline void check_multi_find_3(struct xarray *xa) { unsigned int order; for (order = 5; order < order_limit; order++) { unsigned long index = 1UL << (order - 5); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); xa_erase_index(xa, 0); } } static noinline void check_find_1(struct xarray *xa) { unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); /* * Check xa_find with all pairs between 0 and 99 inclusive, * starting at every index between 0 and 99 */ for (i = 0; i < 100; i++) { XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); xa_set_mark(xa, i, XA_MARK_0); for (j = 0; j < i; j++) { XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != NULL); xa_set_mark(xa, j, XA_MARK_0); for (k = 0; k < 100; k++) { unsigned long index = k; void *entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); if (k <= j) XA_BUG_ON(xa, index != j); else if (k <= i) XA_BUG_ON(xa, index != i); else XA_BUG_ON(xa, entry != NULL); index = k; entry = xa_find(xa, &index, ULONG_MAX, XA_MARK_0); if (k <= j) XA_BUG_ON(xa, index != j); else if (k <= i) XA_BUG_ON(xa, index != i); else XA_BUG_ON(xa, entry != NULL); } xa_erase_index(xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } xa_erase_index(xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_find_2(struct xarray *xa) { void *entry; unsigned long i, j, index; xa_for_each(xa, index, entry) { XA_BUG_ON(xa, true); } for (i = 0; i < 1024; i++) { xa_store_index(xa, index, GFP_KERNEL); j = 0; xa_for_each(xa, index, entry) { XA_BUG_ON(xa, xa_mk_index(index) != entry); XA_BUG_ON(xa, index != j++); } } xa_destroy(xa); } static noinline void check_find_3(struct xarray *xa) { XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; for (i = 0; i < 100; i++) { for (j = 0; j < 100; j++) { rcu_read_lock(); for (k = 0; k < 100; k++) { xas_set(&xas, j); xas_for_each_marked(&xas, entry, k, XA_MARK_0) ; if (j > k) XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); } rcu_read_unlock(); } xa_store_index(xa, i, GFP_KERNEL); xa_set_mark(xa, i, XA_MARK_0); } xa_destroy(xa); } static noinline void check_find_4(struct xarray *xa) { unsigned long index = 0; void *entry; xa_store_index(xa, ULONG_MAX, GFP_KERNEL); entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX)); entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); xa_erase_index(xa, ULONG_MAX); } static noinline void check_find(struct xarray *xa) { unsigned i; check_find_1(xa); check_find_2(xa); check_find_3(xa); check_find_4(xa); for (i = 2; i < 10; i++) check_multi_find_1(xa, i); check_multi_find_2(xa); check_multi_find_3(xa); } /* See find_swap_entry() in mm/shmem.c */ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) { XA_STATE(xas, xa, 0); unsigned int checked = 0; void *entry; rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { if (xas_retry(&xas, entry)) continue; if (entry == item) break; checked++; if ((checked % 4) != 0) continue; xas_pause(&xas); } rcu_read_unlock(); return entry ? xas.xa_index : -1; } static noinline void check_find_entry(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; for (order = 0; order < 20; order++) { for (offset = 0; offset < (1UL << (order + 3)); offset += (1UL << order)) { for (index = 0; index < (1UL << (order + 5)); index += (1UL << order)) { xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(index)) != index); } XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); xa_destroy(xa); } } #endif XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_pause(struct xarray *xa) { XA_STATE(xas, xa, 0); void *entry; unsigned int order; unsigned long index = 1; unsigned int count = 0; for (order = 0; order < order_limit; order++) { XA_BUG_ON(xa, xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL)); index += 1UL << order; } rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); count++; } rcu_read_unlock(); XA_BUG_ON(xa, count != order_limit); count = 0; xas_set(&xas, 0); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { XA_BUG_ON(xa, entry != xa_mk_index(1UL << count)); count++; xas_pause(&xas); } rcu_read_unlock(); XA_BUG_ON(xa, count != order_limit); xa_destroy(xa); } static noinline void check_move_tiny(struct xarray *xa) { XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); rcu_read_lock(); XA_BUG_ON(xa, xas_next(&xas) != NULL); XA_BUG_ON(xa, xas_next(&xas) != NULL); rcu_read_unlock(); xa_store_index(xa, 0, GFP_KERNEL); rcu_read_lock(); xas_set(&xas, 0); XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_next(&xas) != NULL); xas_set(&xas, 0); XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_move_max(struct xarray *xa) { XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); xas_set(&xas, 0); rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX)); xas_pause(&xas); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_move_small(struct xarray *xa, unsigned long idx) { XA_STATE(xas, xa, 0); unsigned long i; xa_store_index(xa, 0, GFP_KERNEL); xa_store_index(xa, idx, GFP_KERNEL); rcu_read_lock(); for (i = 0; i < idx * 4; i++) { void *entry = xas_next(&xas); if (i <= idx) XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); XA_BUG_ON(xa, xas.xa_index != i); if (i == 0 || i == idx) XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); } xas_next(&xas); XA_BUG_ON(xa, xas.xa_index != i); do { void *entry = xas_prev(&xas); i--; if (i <= idx) XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); XA_BUG_ON(xa, xas.xa_index != i); if (i == 0 || i == idx) XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); } while (i > 0); xas_set(&xas, ULONG_MAX); XA_BUG_ON(xa, xas_next(&xas) != NULL); XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); XA_BUG_ON(xa, xas.xa_index != 0); XA_BUG_ON(xa, xas_prev(&xas) != NULL); XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); xa_erase_index(xa, 0); xa_erase_index(xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_move(struct xarray *xa) { XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; for (i = 0; i < (1 << 16); i++) XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); rcu_read_lock(); do { void *entry = xas_prev(&xas); i--; XA_BUG_ON(xa, entry != xa_mk_index(i)); XA_BUG_ON(xa, i != xas.xa_index); } while (i != 0); XA_BUG_ON(xa, xas_prev(&xas) != NULL); XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); do { void *entry = xas_next(&xas); XA_BUG_ON(xa, entry != xa_mk_index(i)); XA_BUG_ON(xa, i != xas.xa_index); i++; } while (i < (1 << 16)); rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) xa_erase_index(xa, i); i = xas.xa_index; rcu_read_lock(); do { void *entry = xas_prev(&xas); i--; if ((i < (1 << 8)) || (i >= (1 << 15))) XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); XA_BUG_ON(xa, i != xas.xa_index); } while (i != 0); XA_BUG_ON(xa, xas_prev(&xas) != NULL); XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); do { void *entry = xas_next(&xas); if ((i < (1 << 8)) || (i >= (1 << 15))) XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); XA_BUG_ON(xa, i != xas.xa_index); i++; } while (i < (1 << 16)); rcu_read_unlock(); xa_destroy(xa); check_move_tiny(xa); check_move_max(xa); for (i = 0; i < 16; i++) check_move_small(xa, 1UL << i); for (i = 2; i < 16; i++) check_move_small(xa, (1UL << i) - 1); } static noinline void xa_store_many_order(struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); unsigned int i = 0; do { xas_lock(&xas); XA_BUG_ON(xa, xas_find_conflict(&xas)); xas_create_range(&xas); if (xas_error(&xas)) goto unlock; for (i = 0; i < (1U << order); i++) { XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); xas_next(&xas); } unlock: xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); XA_BUG_ON(xa, xas_error(&xas)); } static noinline void check_create_range_1(struct xarray *xa, unsigned long index, unsigned order) { unsigned long i; xa_store_many_order(xa, index, order); for (i = index; i < index + (1UL << order); i++) xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_create_range_2(struct xarray *xa, unsigned order) { unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) xa_store_many_order(xa, i, order); for (i = 0; i < nr * nr; i++) xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_create_range_3(void) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); xas_create_range(&xas); XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } static noinline void check_create_range_4(struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; xa_store_index(xa, index, GFP_KERNEL); do { xas_lock(&xas); xas_create_range(&xas); if (xas_error(&xas)) goto unlock; for (i = 0; i < (1UL << order); i++) { void *old = xas_store(&xas, xa_mk_index(base + i)); if (xas.xa_index == index) XA_BUG_ON(xa, old != xa_mk_index(base + i)); else XA_BUG_ON(xa, old != NULL); xas_next(&xas); } unlock: xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_create_range_5(struct xarray *xa, unsigned long index, unsigned int order) { XA_STATE_ORDER(xas, xa, index, order); unsigned int i; xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); for (i = 0; i < order + 10; i++) { do { xas_lock(&xas); xas_create_range(&xas); xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); } xa_destroy(xa); } static noinline void check_create_range(struct xarray *xa) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { check_create_range_1(xa, 0, order); check_create_range_1(xa, 1U << order, order); check_create_range_1(xa, 2U << order, order); check_create_range_1(xa, 3U << order, order); check_create_range_1(xa, 1U << 24, order); if (order < 10) check_create_range_2(xa, order); check_create_range_4(xa, 0, order); check_create_range_4(xa, 1U << order, order); check_create_range_4(xa, 2U << order, order); check_create_range_4(xa, 3U << order, order); check_create_range_4(xa, 1U << 24, order); check_create_range_4(xa, 1, order); check_create_range_4(xa, (1U << order) + 1, order); check_create_range_4(xa, (2U << order) + 1, order); check_create_range_4(xa, (2U << order) - 1, order); check_create_range_4(xa, (3U << order) + 1, order); check_create_range_4(xa, (3U << order) - 1, order); check_create_range_4(xa, (1U << 24) + 1, order); check_create_range_5(xa, 0, order); check_create_range_5(xa, (1U << order), order); } check_create_range_3(); } static noinline void __check_store_range(struct xarray *xa, unsigned long first, unsigned long last) { #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); xa_store_range(xa, first, last, NULL, GFP_KERNEL); #endif XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_store_range(struct xarray *xa) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { __check_store_range(xa, i, j); __check_store_range(xa, 128 + i, 128 + j); __check_store_range(xa, 4095 + i, 4095 + j); __check_store_range(xa, 4096 + i, 4096 + j); __check_store_range(xa, 123456 + i, 123456 + j); __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI static void check_split_1(struct xarray *xa, unsigned long index, unsigned int order, unsigned int new_order) { XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i; xa_store_order(xa, index, order, xa, GFP_KERNEL); xas_split_alloc(&xas, xa, order, GFP_KERNEL); xas_lock(&xas); xas_split(&xas, xa, order); for (i = 0; i < (1 << order); i += (1 << new_order)) __xa_store(xa, index + i, xa_mk_index(index + i), 0); xas_unlock(&xas); for (i = 0; i < (1 << order); i++) { unsigned int val = index + (i & ~((1 << new_order) - 1)); XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val)); } xa_set_mark(xa, index, XA_MARK_0); XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); xa_destroy(xa); } static noinline void check_split(struct xarray *xa) { unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { check_split_1(xa, 0, order, new_order); check_split_1(xa, 1UL << order, order, new_order); check_split_1(xa, 3UL << order, order, new_order); } } } #else static void check_split(struct xarray *xa) { } #endif static void check_align_1(struct xarray *xa, char *name) { int i; unsigned int id; unsigned long index; void *entry; for (i = 0; i < 8; i++) { XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != i); } xa_for_each(xa, index, entry) XA_BUG_ON(xa, xa_is_err(entry)); xa_destroy(xa); } /* * We should always be able to store without allocating memory after * reserving a slot. */ static void check_align_2(struct xarray *xa, char *name) { int i; XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0; i < 8; i++) { XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL); xa_erase(xa, 0); } for (i = 0; i < 8; i++) { XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL); xa_erase(xa, 0); } XA_BUG_ON(xa, !xa_empty(xa)); } static noinline void check_align(struct xarray *xa) { char name[] = "Motorola 68000"; check_align_1(xa, name); check_align_1(xa, name + 1); check_align_1(xa, name + 2); check_align_1(xa, name + 3); check_align_2(xa, name); } static LIST_HEAD(shadow_nodes); static void test_update_node(struct xa_node *node) { if (node->count && node->count == node->nr_values) { if (list_empty(&node->private_list)) list_add(&shadow_nodes, &node->private_list); } else { if (!list_empty(&node->private_list)) list_del_init(&node->private_list); } } static noinline void shadow_remove(struct xarray *xa) { struct xa_node *node; xa_lock(xa); while ((node = list_first_entry_or_null(&shadow_nodes, struct xa_node, private_list))) { XA_BUG_ON(xa, node->array != xa); list_del_init(&node->private_list); xa_delete_node(node, test_update_node); } xa_unlock(xa); } static noinline void check_workingset(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); do { xas_lock(&xas); xas_store(&xas, xa_mk_value(0)); xas_next(&xas); xas_store(&xas, xa_mk_value(1)); xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); XA_BUG_ON(xa, list_empty(&shadow_nodes)); xas_lock(&xas); xas_next(&xas); xas_store(&xas, &xas); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); xas_store(&xas, xa_mk_value(2)); xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); shadow_remove(xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } /* * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ static noinline void check_account(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI unsigned int order; for (order = 1; order < 12; order++) { XA_STATE(xas, xa, 1 << order); xa_store_order(xa, 0, order, xa, GFP_KERNEL); rcu_read_lock(); xas_load(&xas); XA_BUG_ON(xa, xas.xa_node->count == 0); XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); XA_BUG_ON(xa, xas.xa_node->nr_values != 0); rcu_read_unlock(); xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), GFP_KERNEL); XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); xa_erase(xa, 1 << order); XA_BUG_ON(xa, xas.xa_node->nr_values != 0); xa_erase(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } #endif } static noinline void check_get_order(struct xarray *xa) { unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; for (i = 0; i < 3; i++) XA_BUG_ON(xa, xa_get_order(xa, i) != 0); for (order = 0; order < max_order; order++) { for (i = 0; i < 10; i++) { xa_store_order(xa, i << order, order, xa_mk_index(i << order), GFP_KERNEL); for (j = i << order; j < (i + 1) << order; j++) XA_BUG_ON(xa, xa_get_order(xa, j) != order); xa_erase(xa, i << order); } } } static noinline void check_destroy(struct xarray *xa) { unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); /* Destroying an empty array is a no-op */ xa_destroy(xa); XA_BUG_ON(xa, !xa_empty(xa)); /* Destroying an array with a single entry */ for (index = 0; index < 1000; index++) { xa_store_index(xa, index, GFP_KERNEL); XA_BUG_ON(xa, xa_empty(xa)); xa_destroy(xa); XA_BUG_ON(xa, !xa_empty(xa)); } /* Destroying an array with a single entry at ULONG_MAX */ xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); XA_BUG_ON(xa, xa_empty(xa)); xa_destroy(xa); XA_BUG_ON(xa, !xa_empty(xa)); #ifdef CONFIG_XARRAY_MULTI /* Destroying an array with a multi-index entry */ xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); XA_BUG_ON(xa, xa_empty(xa)); xa_destroy(xa); XA_BUG_ON(xa, !xa_empty(xa)); #endif } static DEFINE_XARRAY(array); static int xarray_checks(void) { check_xa_err(&array); check_xas_retry(&array); check_xa_load(&array); check_xa_mark(&array); check_xa_shrink(&array); check_xas_erase(&array); check_insert(&array); check_cmpxchg(&array); check_reserve(&array); check_reserve(&xa0); check_multi_store(&array); check_get_order(&array); check_xa_alloc(); check_find(&array); check_find_entry(&array); check_pause(&array); check_account(&array); check_destroy(&array); check_move(&array); check_create_range(&array); check_store_range(&array); check_store_iter(&array); check_align(&xa0); check_split(&array); check_workingset(&array, 0); check_workingset(&array, 64); check_workingset(&array, 4096); printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); return (tests_run == tests_passed) ? 0 : -EINVAL; } static void xarray_exit(void) { } module_init(xarray_checks); module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
lib/test_xarray.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic infrastructure for lifetime debugging of objects. * * Copyright (C) 2008, Thomas Gleixner <[email protected]> */ #define pr_fmt(fmt) "ODEBUG: " fmt #include <linux/debugobjects.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/hash.h> #include <linux/kmemleak.h> #include <linux/cpu.h> #define ODEBUG_HASH_BITS 14 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) #define ODEBUG_POOL_SIZE 1024 #define ODEBUG_POOL_MIN_LEVEL 256 #define ODEBUG_POOL_PERCPU_SIZE 64 #define ODEBUG_BATCH_SIZE 16 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) /* * We limit the freeing of debug objects via workqueue at a maximum * frequency of 10Hz and about 1024 objects for each freeing operation. * So it is freeing at most 10k debug objects per second. */ #define ODEBUG_FREE_WORK_MAX 1024 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) struct debug_bucket { struct hlist_head list; raw_spinlock_t lock; }; /* * Debug object percpu free list * Access is protected by disabling irq */ struct debug_percpu_free { struct hlist_head free_objs; int obj_free; }; static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; static DEFINE_RAW_SPINLOCK(pool_lock); static HLIST_HEAD(obj_pool); static HLIST_HEAD(obj_to_free); /* * Because of the presence of percpu free pools, obj_pool_free will * under-count those in the percpu free pools. Similarly, obj_pool_used * will over-count those in the percpu free pools. Adjustments will be * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used * can be off. */ static int obj_pool_min_free = ODEBUG_POOL_SIZE; static int obj_pool_free = ODEBUG_POOL_SIZE; static int obj_pool_used; static int obj_pool_max_used; static bool obj_freeing; /* The number of objs on the global free list */ static int obj_nr_tofree; static int debug_objects_maxchain __read_mostly; static int __maybe_unused debug_objects_maxchecked __read_mostly; static int debug_objects_fixups __read_mostly; static int debug_objects_warnings __read_mostly; static int debug_objects_enabled __read_mostly = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; static int debug_objects_pool_size __read_mostly = ODEBUG_POOL_SIZE; static int debug_objects_pool_min_level __read_mostly = ODEBUG_POOL_MIN_LEVEL; static const struct debug_obj_descr *descr_test __read_mostly; static struct kmem_cache *obj_cache __read_mostly; /* * Track numbers of kmem_cache_alloc()/free() calls done. */ static int debug_objects_allocated; static int debug_objects_freed; static void free_obj_work(struct work_struct *work); static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); static int __init enable_object_debug(char *str) { debug_objects_enabled = 1; return 0; } static int __init disable_object_debug(char *str) { debug_objects_enabled = 0; return 0; } early_param("debug_objects", enable_object_debug); early_param("no_debug_objects", disable_object_debug); static const char *obj_states[ODEBUG_STATE_MAX] = { [ODEBUG_STATE_NONE] = "none", [ODEBUG_STATE_INIT] = "initialized", [ODEBUG_STATE_INACTIVE] = "inactive", [ODEBUG_STATE_ACTIVE] = "active", [ODEBUG_STATE_DESTROYED] = "destroyed", [ODEBUG_STATE_NOTAVAILABLE] = "not available", }; static void fill_pool(void) { gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; struct debug_obj *obj; unsigned long flags; if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) return; /* * Reuse objs from the global free list; they will be reinitialized * when allocating. * * Both obj_nr_tofree and obj_pool_free are checked locklessly; the * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical * sections. */ while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { raw_spin_lock_irqsave(&pool_lock, flags); /* * Recheck with the lock held as the worker thread might have * won the race and freed the global free list already. */ while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); hlist_add_head(&obj->node, &obj_pool); WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } if (unlikely(!obj_cache)) return; while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { struct debug_obj *new[ODEBUG_BATCH_SIZE]; int cnt; for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { new[cnt] = kmem_cache_zalloc(obj_cache, gfp); if (!new[cnt]) break; } if (!cnt) return; raw_spin_lock_irqsave(&pool_lock, flags); while (cnt) { hlist_add_head(&new[--cnt]->node, &obj_pool); debug_objects_allocated++; WRITE_ONCE(obj_pool_free, obj_pool_free + 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); } } /* * Lookup an object in the hash bucket. */ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) { struct debug_obj *obj; int cnt = 0; hlist_for_each_entry(obj, &b->list, node) { cnt++; if (obj->object == addr) return obj; } if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; return NULL; } /* * Allocate a new object from the hlist */ static struct debug_obj *__alloc_object(struct hlist_head *list) { struct debug_obj *obj = NULL; if (list->first) { obj = hlist_entry(list->first, typeof(*obj), node); hlist_del(&obj->node); } return obj; } static struct debug_obj * alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) { struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); struct debug_obj *obj; if (likely(obj_cache)) { obj = __alloc_object(&percpu_pool->free_objs); if (obj) { percpu_pool->obj_free--; goto init_obj; } } raw_spin_lock(&pool_lock); obj = __alloc_object(&obj_pool); if (obj) { obj_pool_used++; WRITE_ONCE(obj_pool_free, obj_pool_free - 1); /* * Looking ahead, allocate one batch of debug objects and * put them into the percpu free pool. */ if (likely(obj_cache)) { int i; for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { struct debug_obj *obj2; obj2 = __alloc_object(&obj_pool); if (!obj2) break; hlist_add_head(&obj2->node, &percpu_pool->free_objs); percpu_pool->obj_free++; obj_pool_used++; WRITE_ONCE(obj_pool_free, obj_pool_free - 1); } } if (obj_pool_used > obj_pool_max_used) obj_pool_max_used = obj_pool_used; if (obj_pool_free < obj_pool_min_free) obj_pool_min_free = obj_pool_free; } raw_spin_unlock(&pool_lock); init_obj: if (obj) { obj->object = addr; obj->descr = descr; obj->state = ODEBUG_STATE_NONE; obj->astate = 0; hlist_add_head(&obj->node, &b->list); } return obj; } /* * workqueue function to free objects. * * To reduce contention on the global pool_lock, the actual freeing of * debug objects will be delayed if the pool_lock is busy. */ static void free_obj_work(struct work_struct *work) { struct hlist_node *tmp; struct debug_obj *obj; unsigned long flags; HLIST_HEAD(tofree); WRITE_ONCE(obj_freeing, false); if (!raw_spin_trylock_irqsave(&pool_lock, flags)) return; if (obj_pool_free >= debug_objects_pool_size) goto free_objs; /* * The objs on the pool list might be allocated before the work is * run, so recheck if pool list it full or not, if not fill pool * list from the global free list. As it is likely that a workload * may be gearing up to use more and more objects, don't free any * of them until the next round. */ while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { obj = hlist_entry(obj_to_free.first, typeof(*obj), node); hlist_del(&obj->node); hlist_add_head(&obj->node, &obj_pool); WRITE_ONCE(obj_pool_free, obj_pool_free + 1); WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); } raw_spin_unlock_irqrestore(&pool_lock, flags); return; free_objs: /* * Pool list is already full and there are still objs on the free * list. Move remaining free objs to a temporary list to free the * memory outside the pool_lock held region. */ if (obj_nr_tofree) { hlist_move_list(&obj_to_free, &tofree); debug_objects_freed += obj_nr_tofree; WRITE_ONCE(obj_nr_tofree, 0); } raw_spin_unlock_irqrestore(&pool_lock, flags); hlist_for_each_entry_safe(obj, tmp, &tofree, node) { hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } } static void __free_object(struct debug_obj *obj) { struct debug_obj *objs[ODEBUG_BATCH_SIZE]; struct debug_percpu_free *percpu_pool; int lookahead_count = 0; unsigned long flags; bool work; local_irq_save(flags); if (!obj_cache) goto free_to_obj_pool; /* * Try to free it into the percpu pool first. */ percpu_pool = this_cpu_ptr(&percpu_obj_pool); if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { hlist_add_head(&obj->node, &percpu_pool->free_objs); percpu_pool->obj_free++; local_irq_restore(flags); return; } /* * As the percpu pool is full, look ahead and pull out a batch * of objects from the percpu pool and free them as well. */ for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); if (!objs[lookahead_count]) break; percpu_pool->obj_free--; } free_to_obj_pool: raw_spin_lock(&pool_lock); work = (obj_pool_free > debug_objects_pool_size) && obj_cache && (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); obj_pool_used--; if (work) { WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); hlist_add_head(&obj->node, &obj_to_free); if (lookahead_count) { WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, &obj_to_free); } } if ((obj_pool_free > debug_objects_pool_size) && (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { int i; /* * Free one more batch of objects from obj_pool. */ for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { obj = __alloc_object(&obj_pool); hlist_add_head(&obj->node, &obj_to_free); WRITE_ONCE(obj_pool_free, obj_pool_free - 1); WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); } } } else { WRITE_ONCE(obj_pool_free, obj_pool_free + 1); hlist_add_head(&obj->node, &obj_pool); if (lookahead_count) { WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); obj_pool_used -= lookahead_count; while (lookahead_count) { hlist_add_head(&objs[--lookahead_count]->node, &obj_pool); } } } raw_spin_unlock(&pool_lock); local_irq_restore(flags); } /* * Put the object back into the pool and schedule work to free objects * if necessary. */ static void free_object(struct debug_obj *obj) { __free_object(obj); if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } } #ifdef CONFIG_HOTPLUG_CPU static int object_cpu_offline(unsigned int cpu) { struct debug_percpu_free *percpu_pool; struct hlist_node *tmp; struct debug_obj *obj; unsigned long flags; /* Remote access is safe as the CPU is dead already */ percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } raw_spin_lock_irqsave(&pool_lock, flags); obj_pool_used -= percpu_pool->obj_free; debug_objects_freed += percpu_pool->obj_free; raw_spin_unlock_irqrestore(&pool_lock, flags); percpu_pool->obj_free = 0; return 0; } #endif /* * We run out of memory. That means we probably have tons of objects * allocated. */ static void debug_objects_oom(void) { struct debug_bucket *db = obj_hash; struct hlist_node *tmp; HLIST_HEAD(freelist); struct debug_obj *obj; unsigned long flags; int i; pr_warn("Out of memory. ODEBUG disabled\n"); for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { raw_spin_lock_irqsave(&db->lock, flags); hlist_move_list(&db->list, &freelist); raw_spin_unlock_irqrestore(&db->lock, flags); /* Now free them */ hlist_for_each_entry_safe(obj, tmp, &freelist, node) { hlist_del(&obj->node); free_object(obj); } } } /* * We use the pfn of the address for the hash. That way we can check * for freed objects simply by checking the affected bucket. */ static struct debug_bucket *get_bucket(unsigned long addr) { unsigned long hash; hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); return &obj_hash[hash]; } static void debug_print_object(struct debug_obj *obj, char *msg) { const struct debug_obj_descr *descr = obj->descr; static int limit; /* * Don't report if lookup_object_or_alloc() by the current thread * failed because lookup_object_or_alloc()/debug_objects_oom() by a * concurrent thread turned off debug_objects_enabled and cleared * the hash buckets. */ if (!debug_objects_enabled) return; if (limit < 5 && descr != descr_test) { void *hint = descr->debug_hint ? descr->debug_hint(obj->object) : NULL; limit++; WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " "object: %p object type: %s hint: %pS\n", msg, obj_states[obj->state], obj->astate, obj->object, descr->name, hint); } debug_objects_warnings++; } /* * Try to repair the damage, so we have a better chance to get useful * debug output. */ static bool debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), void * addr, enum debug_obj_state state) { if (fixup && fixup(addr, state)) { debug_objects_fixups++; return true; } return false; } static void debug_object_is_on_stack(void *addr, int onstack) { int is_on_stack; static int limit; if (limit > 4) return; is_on_stack = object_is_on_stack(addr); if (is_on_stack == onstack) return; limit++; if (is_on_stack) pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, task_stack_page(current)); else pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, task_stack_page(current)); WARN_ON(1); } static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr, bool onstack, bool alloc_ifstatic) { struct debug_obj *obj = lookup_object(addr, b); enum debug_obj_state state = ODEBUG_STATE_NONE; if (likely(obj)) return obj; /* * debug_object_init() unconditionally allocates untracked * objects. It does not matter whether it is a static object or * not. * * debug_object_assert_init() and debug_object_activate() allow * allocation only if the descriptor callback confirms that the * object is static and considered initialized. For non-static * objects the allocation needs to be done from the fixup callback. */ if (unlikely(alloc_ifstatic)) { if (!descr->is_static_object || !descr->is_static_object(addr)) return ERR_PTR(-ENOENT); /* Statically allocated objects are considered initialized */ state = ODEBUG_STATE_INIT; } obj = alloc_object(addr, b, descr); if (likely(obj)) { obj->state = state; debug_object_is_on_stack(addr, onstack); return obj; } /* Out of memory. Do the cleanup outside of the locked region */ debug_objects_enabled = 0; return NULL; } static void debug_objects_fill_pool(void) { /* * On RT enabled kernels the pool refill must happen in preemptible * context -- for !RT kernels we rely on the fact that spinlock_t and * raw_spinlock_t are basically the same type and this lock-type * inversion works just fine. */ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { /* * Annotate away the spinlock_t inside raw_spinlock_t warning * by temporarily raising the wait-type to WAIT_SLEEP, matching * the preemptible() condition above. */ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); lock_map_acquire_try(&fill_pool_map); fill_pool(); lock_map_release(&fill_pool_map); } } static void __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; debug_objects_fill_pool(); db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object_or_alloc(addr, db, descr, onstack, false); if (unlikely(!obj)) { raw_spin_unlock_irqrestore(&db->lock, flags); debug_objects_oom(); return; } switch (obj->state) { case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_INIT; break; case ODEBUG_STATE_ACTIVE: state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "init"); debug_object_fixup(descr->fixup_init, addr, state); return; case ODEBUG_STATE_DESTROYED: raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "init"); return; default: break; } raw_spin_unlock_irqrestore(&db->lock, flags); } /** * debug_object_init - debug checks when an object is initialized * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_init(void *addr, const struct debug_obj_descr *descr) { if (!debug_objects_enabled) return; __debug_object_init(addr, descr, 0); } EXPORT_SYMBOL_GPL(debug_object_init); /** * debug_object_init_on_stack - debug checks when an object on stack is * initialized * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) { if (!debug_objects_enabled) return; __debug_object_init(addr, descr, 1); } EXPORT_SYMBOL_GPL(debug_object_init_on_stack); /** * debug_object_activate - debug checks when an object is activated * @addr: address of the object * @descr: pointer to an object specific debug description structure * Returns 0 for success, -EINVAL for check failed. */ int debug_object_activate(void *addr, const struct debug_obj_descr *descr) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; int ret; if (!debug_objects_enabled) return 0; debug_objects_fill_pool(); db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object_or_alloc(addr, db, descr, false, true); if (likely(!IS_ERR_OR_NULL(obj))) { bool print_object = false; switch (obj->state) { case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_ACTIVE; ret = 0; break; case ODEBUG_STATE_ACTIVE: state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "activate"); ret = debug_object_fixup(descr->fixup_activate, addr, state); return ret ? 0 : -EINVAL; case ODEBUG_STATE_DESTROYED: print_object = true; ret = -EINVAL; break; default: ret = 0; break; } raw_spin_unlock_irqrestore(&db->lock, flags); if (print_object) debug_print_object(obj, "activate"); return ret; } raw_spin_unlock_irqrestore(&db->lock, flags); /* If NULL the allocation has hit OOM */ if (!obj) { debug_objects_oom(); return 0; } /* Object is neither static nor tracked. It's not initialized */ debug_print_object(&o, "activate"); ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE); return ret ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(debug_object_activate); /** * debug_object_deactivate - debug checks when an object is deactivated * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; bool print_object = false; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { switch (obj->state) { case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: case ODEBUG_STATE_ACTIVE: if (!obj->astate) obj->state = ODEBUG_STATE_INACTIVE; else print_object = true; break; case ODEBUG_STATE_DESTROYED: print_object = true; break; default: break; } } raw_spin_unlock_irqrestore(&db->lock, flags); if (!obj) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; debug_print_object(&o, "deactivate"); } else if (print_object) { debug_print_object(obj, "deactivate"); } } EXPORT_SYMBOL_GPL(debug_object_deactivate); /** * debug_object_destroy - debug checks when an object is destroyed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; bool print_object = false; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) goto out_unlock; switch (obj->state) { case ODEBUG_STATE_NONE: case ODEBUG_STATE_INIT: case ODEBUG_STATE_INACTIVE: obj->state = ODEBUG_STATE_DESTROYED; break; case ODEBUG_STATE_ACTIVE: state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "destroy"); debug_object_fixup(descr->fixup_destroy, addr, state); return; case ODEBUG_STATE_DESTROYED: print_object = true; break; default: break; } out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); if (print_object) debug_print_object(obj, "destroy"); } EXPORT_SYMBOL_GPL(debug_object_destroy); /** * debug_object_free - debug checks when an object is freed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_free(void *addr, const struct debug_obj_descr *descr) { enum debug_obj_state state; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj) goto out_unlock; switch (obj->state) { case ODEBUG_STATE_ACTIVE: state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "free"); debug_object_fixup(descr->fixup_free, addr, state); return; default: hlist_del(&obj->node); raw_spin_unlock_irqrestore(&db->lock, flags); free_object(obj); return; } out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } EXPORT_SYMBOL_GPL(debug_object_free); /** * debug_object_assert_init - debug checks when object should be init-ed * @addr: address of the object * @descr: pointer to an object specific debug description structure */ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; if (!debug_objects_enabled) return; debug_objects_fill_pool(); db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object_or_alloc(addr, db, descr, false, true); raw_spin_unlock_irqrestore(&db->lock, flags); if (likely(!IS_ERR_OR_NULL(obj))) return; /* If NULL the allocation has hit OOM */ if (!obj) { debug_objects_oom(); return; } /* Object is neither tracked nor static. It's not initialized. */ debug_print_object(&o, "assert_init"); debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE); } EXPORT_SYMBOL_GPL(debug_object_assert_init); /** * debug_object_active_state - debug checks object usage state machine * @addr: address of the object * @descr: pointer to an object specific debug description structure * @expect: expected state * @next: state to move to if expected state is found */ void debug_object_active_state(void *addr, const struct debug_obj_descr *descr, unsigned int expect, unsigned int next) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; bool print_object = false; if (!debug_objects_enabled) return; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (obj) { switch (obj->state) { case ODEBUG_STATE_ACTIVE: if (obj->astate == expect) obj->astate = next; else print_object = true; break; default: print_object = true; break; } } raw_spin_unlock_irqrestore(&db->lock, flags); if (!obj) { struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr }; debug_print_object(&o, "active_state"); } else if (print_object) { debug_print_object(obj, "active_state"); } } EXPORT_SYMBOL_GPL(debug_object_active_state); #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) { unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; const struct debug_obj_descr *descr; enum debug_obj_state state; struct debug_bucket *db; struct hlist_node *tmp; struct debug_obj *obj; int cnt, objs_checked = 0; saddr = (unsigned long) address; eaddr = saddr + size; paddr = saddr & ODEBUG_CHUNK_MASK; chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); chunks >>= ODEBUG_CHUNK_SHIFT; for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { db = get_bucket(paddr); repeat: cnt = 0; raw_spin_lock_irqsave(&db->lock, flags); hlist_for_each_entry_safe(obj, tmp, &db->list, node) { cnt++; oaddr = (unsigned long) obj->object; if (oaddr < saddr || oaddr >= eaddr) continue; switch (obj->state) { case ODEBUG_STATE_ACTIVE: descr = obj->descr; state = obj->state; raw_spin_unlock_irqrestore(&db->lock, flags); debug_print_object(obj, "free"); debug_object_fixup(descr->fixup_free, (void *) oaddr, state); goto repeat; default: hlist_del(&obj->node); __free_object(obj); break; } } raw_spin_unlock_irqrestore(&db->lock, flags); if (cnt > debug_objects_maxchain) debug_objects_maxchain = cnt; objs_checked += cnt; } if (objs_checked > debug_objects_maxchecked) debug_objects_maxchecked = objs_checked; /* Schedule work to actually kmem_cache_free() objects */ if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { WRITE_ONCE(obj_freeing, true); schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); } } void debug_check_no_obj_freed(const void *address, unsigned long size) { if (debug_objects_enabled) __debug_check_no_obj_freed(address, size); } #endif #ifdef CONFIG_DEBUG_FS static int debug_stats_show(struct seq_file *m, void *v) { int cpu, obj_percpu_free = 0; for_each_possible_cpu(cpu) obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); seq_printf(m, "warnings :%d\n", debug_objects_warnings); seq_printf(m, "fixups :%d\n", debug_objects_fixups); seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); seq_printf(m, "objs_freed :%d\n", debug_objects_freed); return 0; } DEFINE_SHOW_ATTRIBUTE(debug_stats); static int __init debug_objects_init_debugfs(void) { struct dentry *dbgdir; if (!debug_objects_enabled) return 0; dbgdir = debugfs_create_dir("debug_objects", NULL); debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); return 0; } __initcall(debug_objects_init_debugfs); #else static inline void debug_objects_init_debugfs(void) { } #endif #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST /* Random data structure for the self test */ struct self_test { unsigned long dummy1[6]; int static_init; unsigned long dummy2[3]; }; static __initconst const struct debug_obj_descr descr_type_test; static bool __init is_static_object(void *addr) { struct self_test *obj = addr; return obj->static_init; } /* * fixup_init is called when: * - an active object is initialized */ static bool __init fixup_init(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_init(obj, &descr_type_test); return true; default: return false; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown non-static object is activated */ static bool __init fixup_activate(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: return true; case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_activate(obj, &descr_type_test); return true; default: return false; } } /* * fixup_destroy is called when: * - an active object is destroyed */ static bool __init fixup_destroy(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_destroy(obj, &descr_type_test); return true; default: return false; } } /* * fixup_free is called when: * - an active object is freed */ static bool __init fixup_free(void *addr, enum debug_obj_state state) { struct self_test *obj = addr; switch (state) { case ODEBUG_STATE_ACTIVE: debug_object_deactivate(obj, &descr_type_test); debug_object_free(obj, &descr_type_test); return true; default: return false; } } static int __init check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) { struct debug_bucket *db; struct debug_obj *obj; unsigned long flags; int res = -EINVAL; db = get_bucket((unsigned long) addr); raw_spin_lock_irqsave(&db->lock, flags); obj = lookup_object(addr, db); if (!obj && state != ODEBUG_STATE_NONE) { WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); goto out; } if (obj && obj->state != state) { WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", obj->state, state); goto out; } if (fixups != debug_objects_fixups) { WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", fixups, debug_objects_fixups); goto out; } if (warnings != debug_objects_warnings) { WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", warnings, debug_objects_warnings); goto out; } res = 0; out: raw_spin_unlock_irqrestore(&db->lock, flags); if (res) debug_objects_enabled = 0; return res; } static __initconst const struct debug_obj_descr descr_type_test = { .name = "selftest", .is_static_object = is_static_object, .fixup_init = fixup_init, .fixup_activate = fixup_activate, .fixup_destroy = fixup_destroy, .fixup_free = fixup_free, }; static __initdata struct self_test obj = { .static_init = 0 }; static void __init debug_objects_selftest(void) { int fixups, oldfixups, warnings, oldwarnings; unsigned long flags; local_irq_save(flags); fixups = oldfixups = debug_objects_fixups; warnings = oldwarnings = debug_objects_warnings; descr_test = &descr_type_test; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) goto out; debug_object_deactivate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) goto out; debug_object_destroy(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) goto out; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_deactivate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) goto out; debug_object_free(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) goto out; obj.static_init = 1; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) goto out; debug_object_free(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) goto out; #ifdef CONFIG_DEBUG_OBJECTS_FREE debug_object_init(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) goto out; debug_object_activate(&obj, &descr_type_test); if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) goto out; __debug_check_no_obj_freed(&obj, sizeof(obj)); if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) goto out; #endif pr_info("selftest passed\n"); out: debug_objects_fixups = oldfixups; debug_objects_warnings = oldwarnings; descr_test = NULL; local_irq_restore(flags); } #else static inline void debug_objects_selftest(void) { } #endif /* * Called during early boot to initialize the hash buckets and link * the static object pool objects into the poll list. After this call * the object tracker is fully operational. */ void __init debug_objects_early_init(void) { int i; for (i = 0; i < ODEBUG_HASH_SIZE; i++) raw_spin_lock_init(&obj_hash[i].lock); for (i = 0; i < ODEBUG_POOL_SIZE; i++) hlist_add_head(&obj_static_pool[i].node, &obj_pool); } /* * Convert the statically allocated objects to dynamic ones: */ static int __init debug_objects_replace_static_objects(void) { struct debug_bucket *db = obj_hash; struct hlist_node *tmp; struct debug_obj *obj, *new; HLIST_HEAD(objects); int i, cnt = 0; for (i = 0; i < ODEBUG_POOL_SIZE; i++) { obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); if (!obj) goto free; hlist_add_head(&obj->node, &objects); } debug_objects_allocated += i; /* * debug_objects_mem_init() is now called early that only one CPU is up * and interrupts have been disabled, so it is safe to replace the * active object references. */ /* Remove the statically allocated objects from the pool */ hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) hlist_del(&obj->node); /* Move the allocated objects to the pool */ hlist_move_list(&objects, &obj_pool); /* Replace the active object references */ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { hlist_move_list(&db->list, &objects); hlist_for_each_entry(obj, &objects, node) { new = hlist_entry(obj_pool.first, typeof(*obj), node); hlist_del(&new->node); /* copy object data */ *new = *obj; hlist_add_head(&new->node, &db->list); cnt++; } } pr_debug("%d of %d active objects replaced\n", cnt, obj_pool_used); return 0; free: hlist_for_each_entry_safe(obj, tmp, &objects, node) { hlist_del(&obj->node); kmem_cache_free(obj_cache, obj); } return -ENOMEM; } /* * Called after the kmem_caches are functional to setup a dedicated * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag * prevents that the debug code is called on kmem_cache_free() for the * debug tracker objects to avoid recursive calls. */ void __init debug_objects_mem_init(void) { int cpu, extras; if (!debug_objects_enabled) return; /* * Initialize the percpu object pools * * Initialization is not strictly necessary, but was done for * completeness. */ for_each_possible_cpu(cpu) INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); obj_cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0, SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL); if (!obj_cache || debug_objects_replace_static_objects()) { debug_objects_enabled = 0; kmem_cache_destroy(obj_cache); pr_warn("out of memory.\n"); return; } else debug_objects_selftest(); #ifdef CONFIG_HOTPLUG_CPU cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, object_cpu_offline); #endif /* * Increase the thresholds for allocating and freeing objects * according to the number of possible CPUs available in the system. */ extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; debug_objects_pool_size += extras; debug_objects_pool_min_level += extras; }
linux-master
lib/debugobjects.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2011 Intel Corporation * * Author: * Dmitry Kasatkin <[email protected]> * <[email protected]> * * File: sign.c * implements signature (RSA) verification * pkcs decoding is based on LibTomCrypt code */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha1.h> #include <keys/user-type.h> #include <linux/mpi.h> #include <linux/digsig.h> static struct crypto_shash *shash; static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, unsigned long msglen, unsigned long modulus_bitlen, unsigned long *outlen) { unsigned long modulus_len, ps_len, i; modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) return NULL; /* separate encoded message */ if (msg[0] != 0x00 || msg[1] != 0x01) return NULL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) break; /* separator check */ if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ return NULL; ps_len = i - 2; *outlen = (msglen - (2 + ps_len + 1)); return msg + 2 + ps_len + 1; } /* * RSA Signature verification with public key */ static int digsig_verify_rsa(struct key *key, const char *sig, int siglen, const char *h, int hlen) { int err = -EINVAL; unsigned long len; unsigned long mlen, mblen; unsigned nret, l; int head, i; unsigned char *out1 = NULL; const char *m; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap; const uint8_t *endp; const struct user_key_payload *ukp; struct pubkey_hdr *pkh; down_read(&key->sem); ukp = user_key_payload_locked(key); if (!ukp) { /* key was revoked before we acquired its semaphore */ err = -EKEYREVOKED; goto err1; } if (ukp->datalen < sizeof(*pkh)) goto err1; pkh = (struct pubkey_hdr *)ukp->data; if (pkh->version != 1) goto err1; if (pkh->algo != PUBKEY_ALGO_RSA) goto err1; if (pkh->nmpi != 2) goto err1; datap = pkh->mpi; endp = ukp->data + ukp->datalen; for (i = 0; i < pkh->nmpi; i++) { unsigned int remaining = endp - datap; pkey[i] = mpi_read_from_buffer(datap, &remaining); if (IS_ERR(pkey[i])) { err = PTR_ERR(pkey[i]); goto err; } datap += remaining; } mblen = mpi_get_nbits(pkey[0]); mlen = DIV_ROUND_UP(mblen, 8); if (mlen == 0) { err = -EINVAL; goto err; } err = -ENOMEM; out1 = kzalloc(mlen, GFP_KERNEL); if (!out1) goto err; nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (IS_ERR(in)) { err = PTR_ERR(in); goto err; } res = mpi_alloc(mpi_get_nlimbs(in) * 2); if (!res) goto err; err = mpi_powm(res, in, pkey[1], pkey[0]); if (err) goto err; if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) { err = -EINVAL; goto err; } p = mpi_get_buffer(res, &l, NULL); if (!p) { err = -EINVAL; goto err; } len = mlen; head = len - l; memset(out1, 0, head); memcpy(out1 + head, p, l); kfree(p); m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); if (!m || len != hlen || memcmp(m, h, hlen)) err = -EINVAL; err: mpi_free(in); mpi_free(res); kfree(out1); while (--i >= 0) mpi_free(pkey[i]); err1: up_read(&key->sem); return err; } /** * digsig_verify() - digital signature verification with public key * @keyring: keyring to search key in * @sig: digital signature * @siglen: length of the signature * @data: data * @datalen: length of the data * * Returns 0 on success, -EINVAL otherwise * * Verifies data integrity against digital signature. * Currently only RSA is supported. * Normally hash of the content is used as a data for this function. * */ int digsig_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen) { int err = -ENOMEM; struct signature_hdr *sh = (struct signature_hdr *)sig; struct shash_desc *desc = NULL; unsigned char hash[SHA1_DIGEST_SIZE]; struct key *key; char name[20]; if (siglen < sizeof(*sh) + 2) return -EINVAL; if (sh->algo != PUBKEY_ALGO_RSA) return -ENOTSUPP; sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); if (keyring) { /* search in specific keyring */ key_ref_t kref; kref = keyring_search(make_key_ref(keyring, 1UL), &key_type_user, name, true); if (IS_ERR(kref)) key = ERR_CAST(kref); else key = key_ref_to_ptr(kref); } else { key = request_key(&key_type_user, name, NULL); } if (IS_ERR(key)) { pr_err("key not found, id: %s\n", name); return PTR_ERR(key); } desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), GFP_KERNEL); if (!desc) goto err; desc->tfm = shash; crypto_shash_init(desc); crypto_shash_update(desc, data, datalen); crypto_shash_update(desc, sig, sizeof(*sh)); crypto_shash_final(desc, hash); kfree(desc); /* pass signature mpis address */ err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), hash, sizeof(hash)); err: key_put(key); return err ? -EINVAL : 0; } EXPORT_SYMBOL_GPL(digsig_verify); static int __init digsig_init(void) { shash = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(shash)) { pr_err("shash allocation failed\n"); return PTR_ERR(shash); } return 0; } static void __exit digsig_cleanup(void) { crypto_free_shash(shash); } module_init(digsig_init); module_exit(digsig_cleanup); MODULE_LICENSE("GPL");
linux-master
lib/digsig.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* **************************************************************************** * * "DHRYSTONE" Benchmark Program * ----------------------------- * * Version: C, Version 2.1 * * File: dhry_2.c (part 3 of 3) * * Date: May 25, 1988 * * Author: Reinhold P. Weicker * **************************************************************************** */ #include "dhry.h" #include <linux/string.h> static Boolean Func_3(Enumeration Enum_Par_Val) /***************************/ /* executed once */ /* Enum_Par_Val == Ident_3 */ { Enumeration Enum_Loc; Enum_Loc = Enum_Par_Val; if (Enum_Loc == Ident_3) { /* then, executed */ return true; } else { /* not executed */ return false; } } /* Func_3 */ void Proc_6(Enumeration Enum_Val_Par, Enumeration *Enum_Ref_Par) /*********************************/ /* executed once */ /* Enum_Val_Par == Ident_3, Enum_Ref_Par becomes Ident_2 */ { *Enum_Ref_Par = Enum_Val_Par; if (!Func_3(Enum_Val_Par)) { /* then, not executed */ *Enum_Ref_Par = Ident_4; } switch (Enum_Val_Par) { case Ident_1: *Enum_Ref_Par = Ident_1; break; case Ident_2: if (Int_Glob > 100) { /* then */ *Enum_Ref_Par = Ident_1; } else { *Enum_Ref_Par = Ident_4; } break; case Ident_3: /* executed */ *Enum_Ref_Par = Ident_2; break; case Ident_4: break; case Ident_5: *Enum_Ref_Par = Ident_3; break; } /* switch */ } /* Proc_6 */ void Proc_7(One_Fifty Int_1_Par_Val, One_Fifty Int_2_Par_Val, One_Fifty *Int_Par_Ref) /**********************************************/ /* executed three times */ /* first call: Int_1_Par_Val == 2, Int_2_Par_Val == 3, */ /* Int_Par_Ref becomes 7 */ /* second call: Int_1_Par_Val == 10, Int_2_Par_Val == 5, */ /* Int_Par_Ref becomes 17 */ /* third call: Int_1_Par_Val == 6, Int_2_Par_Val == 10, */ /* Int_Par_Ref becomes 18 */ { One_Fifty Int_Loc; Int_Loc = Int_1_Par_Val + 2; *Int_Par_Ref = Int_2_Par_Val + Int_Loc; } /* Proc_7 */ void Proc_8(Arr_1_Dim Arr_1_Par_Ref, Arr_2_Dim Arr_2_Par_Ref, int Int_1_Par_Val, int Int_2_Par_Val) /*********************************************************************/ /* executed once */ /* Int_Par_Val_1 == 3 */ /* Int_Par_Val_2 == 7 */ { One_Fifty Int_Index; One_Fifty Int_Loc; Int_Loc = Int_1_Par_Val + 5; Arr_1_Par_Ref[Int_Loc] = Int_2_Par_Val; Arr_1_Par_Ref[Int_Loc+1] = Arr_1_Par_Ref[Int_Loc]; Arr_1_Par_Ref[Int_Loc+30] = Int_Loc; for (Int_Index = Int_Loc; Int_Index <= Int_Loc+1; ++Int_Index) Arr_2_Par_Ref[Int_Loc][Int_Index] = Int_Loc; Arr_2_Par_Ref[Int_Loc][Int_Loc-1] += 1; Arr_2_Par_Ref[Int_Loc+20][Int_Loc] = Arr_1_Par_Ref[Int_Loc]; Int_Glob = 5; } /* Proc_8 */ Enumeration Func_1(Capital_Letter Ch_1_Par_Val, Capital_Letter Ch_2_Par_Val) /*************************************************/ /* executed three times */ /* first call: Ch_1_Par_Val == 'H', Ch_2_Par_Val == 'R' */ /* second call: Ch_1_Par_Val == 'A', Ch_2_Par_Val == 'C' */ /* third call: Ch_1_Par_Val == 'B', Ch_2_Par_Val == 'C' */ { Capital_Letter Ch_1_Loc; Capital_Letter Ch_2_Loc; Ch_1_Loc = Ch_1_Par_Val; Ch_2_Loc = Ch_1_Loc; if (Ch_2_Loc != Ch_2_Par_Val) { /* then, executed */ return Ident_1; } else { /* not executed */ Ch_1_Glob = Ch_1_Loc; return Ident_2; } } /* Func_1 */ Boolean Func_2(Str_30 Str_1_Par_Ref, Str_30 Str_2_Par_Ref) /*************************************************/ /* executed once */ /* Str_1_Par_Ref == "DHRYSTONE PROGRAM, 1'ST STRING" */ /* Str_2_Par_Ref == "DHRYSTONE PROGRAM, 2'ND STRING" */ { One_Thirty Int_Loc; Capital_Letter Ch_Loc; Int_Loc = 2; while (Int_Loc <= 2) { /* loop body executed once */ if (Func_1(Str_1_Par_Ref[Int_Loc], Str_2_Par_Ref[Int_Loc+1]) == Ident_1) { /* then, executed */ Ch_Loc = 'A'; Int_Loc += 1; } } /* if, while */ if (Ch_Loc >= 'W' && Ch_Loc < 'Z') { /* then, not executed */ Int_Loc = 7; } if (Ch_Loc == 'R') { /* then, not executed */ return true; } else { /* executed */ if (strcmp(Str_1_Par_Ref, Str_2_Par_Ref) > 0) { /* then, not executed */ Int_Loc += 7; Int_Glob = Int_Loc; return true; } else { /* executed */ return false; } } /* if Ch_Loc */ } /* Func_2 */
linux-master
lib/dhry_2.c
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ #include <linux/module.h> #include <linux/slab.h> #include <linux/rhashtable.h> #include <linux/idr.h> #include <linux/list.h> #include <linux/sort.h> #include <linux/objagg.h> #define CREATE_TRACE_POINTS #include <trace/events/objagg.h> struct objagg_hints { struct rhashtable node_ht; struct rhashtable_params ht_params; struct list_head node_list; unsigned int node_count; unsigned int root_count; unsigned int refcount; const struct objagg_ops *ops; }; struct objagg_hints_node { struct rhash_head ht_node; /* member of objagg_hints->node_ht */ struct list_head list; /* member of objagg_hints->node_list */ struct objagg_hints_node *parent; unsigned int root_id; struct objagg_obj_stats_info stats_info; unsigned long obj[]; }; static struct objagg_hints_node * objagg_hints_lookup(struct objagg_hints *objagg_hints, void *obj) { if (!objagg_hints) return NULL; return rhashtable_lookup_fast(&objagg_hints->node_ht, obj, objagg_hints->ht_params); } struct objagg { const struct objagg_ops *ops; void *priv; struct rhashtable obj_ht; struct rhashtable_params ht_params; struct list_head obj_list; unsigned int obj_count; struct ida root_ida; struct objagg_hints *hints; }; struct objagg_obj { struct rhash_head ht_node; /* member of objagg->obj_ht */ struct list_head list; /* member of objagg->obj_list */ struct objagg_obj *parent; /* if the object is nested, this * holds pointer to parent, otherwise NULL */ union { void *delta_priv; /* user delta private */ void *root_priv; /* user root private */ }; unsigned int root_id; unsigned int refcount; /* counts number of users of this object * including nested objects */ struct objagg_obj_stats stats; unsigned long obj[]; }; static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj) { return ++objagg_obj->refcount; } static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj) { return --objagg_obj->refcount; } static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj) { objagg_obj->stats.user_count++; objagg_obj->stats.delta_user_count++; if (objagg_obj->parent) objagg_obj->parent->stats.delta_user_count++; } static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj) { objagg_obj->stats.user_count--; objagg_obj->stats.delta_user_count--; if (objagg_obj->parent) objagg_obj->parent->stats.delta_user_count--; } static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj) { /* Nesting is not supported, so we can use ->parent * to figure out if the object is root. */ return !objagg_obj->parent; } /** * objagg_obj_root_priv - obtains root private for an object * @objagg_obj: objagg object instance * * Note: all locking must be provided by the caller. * * Either the object is root itself when the private is returned * directly, or the parent is root and its private is returned * instead. * * Returns a user private root pointer. */ const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj) { if (objagg_obj_is_root(objagg_obj)) return objagg_obj->root_priv; WARN_ON(!objagg_obj_is_root(objagg_obj->parent)); return objagg_obj->parent->root_priv; } EXPORT_SYMBOL(objagg_obj_root_priv); /** * objagg_obj_delta_priv - obtains delta private for an object * @objagg_obj: objagg object instance * * Note: all locking must be provided by the caller. * * Returns user private delta pointer or NULL in case the passed * object is root. */ const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj) { if (objagg_obj_is_root(objagg_obj)) return NULL; return objagg_obj->delta_priv; } EXPORT_SYMBOL(objagg_obj_delta_priv); /** * objagg_obj_raw - obtains object user private pointer * @objagg_obj: objagg object instance * * Note: all locking must be provided by the caller. * * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg. */ const void *objagg_obj_raw(const struct objagg_obj *objagg_obj) { return objagg_obj->obj; } EXPORT_SYMBOL(objagg_obj_raw); static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj) { return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params); } static int objagg_obj_parent_assign(struct objagg *objagg, struct objagg_obj *objagg_obj, struct objagg_obj *parent, bool take_parent_ref) { void *delta_priv; delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, objagg_obj->obj); if (IS_ERR(delta_priv)) return PTR_ERR(delta_priv); /* User returned a delta private, that means that * our object can be aggregated into the parent. */ objagg_obj->parent = parent; objagg_obj->delta_priv = delta_priv; if (take_parent_ref) objagg_obj_ref_inc(objagg_obj->parent); trace_objagg_obj_parent_assign(objagg, objagg_obj, parent, parent->refcount); return 0; } static int objagg_obj_parent_lookup_assign(struct objagg *objagg, struct objagg_obj *objagg_obj) { struct objagg_obj *objagg_obj_cur; int err; list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) { /* Nesting is not supported. In case the object * is not root, it cannot be assigned as parent. */ if (!objagg_obj_is_root(objagg_obj_cur)) continue; err = objagg_obj_parent_assign(objagg, objagg_obj, objagg_obj_cur, true); if (!err) return 0; } return -ENOENT; } static void __objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); static void objagg_obj_parent_unassign(struct objagg *objagg, struct objagg_obj *objagg_obj) { trace_objagg_obj_parent_unassign(objagg, objagg_obj, objagg_obj->parent, objagg_obj->parent->refcount); objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv); __objagg_obj_put(objagg, objagg_obj->parent); } static int objagg_obj_root_id_alloc(struct objagg *objagg, struct objagg_obj *objagg_obj, struct objagg_hints_node *hnode) { unsigned int min, max; int root_id; /* In case there are no hints available, the root id is invalid. */ if (!objagg->hints) { objagg_obj->root_id = OBJAGG_OBJ_ROOT_ID_INVALID; return 0; } if (hnode) { min = hnode->root_id; max = hnode->root_id; } else { /* For objects with no hint, start after the last * hinted root_id. */ min = objagg->hints->root_count; max = ~0; } root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL); if (root_id < 0) return root_id; objagg_obj->root_id = root_id; return 0; } static void objagg_obj_root_id_free(struct objagg *objagg, struct objagg_obj *objagg_obj) { if (!objagg->hints) return; ida_free(&objagg->root_ida, objagg_obj->root_id); } static int objagg_obj_root_create(struct objagg *objagg, struct objagg_obj *objagg_obj, struct objagg_hints_node *hnode) { int err; err = objagg_obj_root_id_alloc(objagg, objagg_obj, hnode); if (err) return err; objagg_obj->root_priv = objagg->ops->root_create(objagg->priv, objagg_obj->obj, objagg_obj->root_id); if (IS_ERR(objagg_obj->root_priv)) { err = PTR_ERR(objagg_obj->root_priv); goto err_root_create; } trace_objagg_obj_root_create(objagg, objagg_obj); return 0; err_root_create: objagg_obj_root_id_free(objagg, objagg_obj); return err; } static void objagg_obj_root_destroy(struct objagg *objagg, struct objagg_obj *objagg_obj) { trace_objagg_obj_root_destroy(objagg, objagg_obj); objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv); objagg_obj_root_id_free(objagg, objagg_obj); } static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj); static int objagg_obj_init_with_hints(struct objagg *objagg, struct objagg_obj *objagg_obj, bool *hint_found) { struct objagg_hints_node *hnode; struct objagg_obj *parent; int err; hnode = objagg_hints_lookup(objagg->hints, objagg_obj->obj); if (!hnode) { *hint_found = false; return 0; } *hint_found = true; if (!hnode->parent) return objagg_obj_root_create(objagg, objagg_obj, hnode); parent = __objagg_obj_get(objagg, hnode->parent->obj); if (IS_ERR(parent)) return PTR_ERR(parent); err = objagg_obj_parent_assign(objagg, objagg_obj, parent, false); if (err) { *hint_found = false; err = 0; goto err_parent_assign; } return 0; err_parent_assign: objagg_obj_put(objagg, parent); return err; } static int objagg_obj_init(struct objagg *objagg, struct objagg_obj *objagg_obj) { bool hint_found; int err; /* First, try to use hints if they are available and * if they provide result. */ err = objagg_obj_init_with_hints(objagg, objagg_obj, &hint_found); if (err) return err; if (hint_found) return 0; /* Try to find if the object can be aggregated under an existing one. */ err = objagg_obj_parent_lookup_assign(objagg, objagg_obj); if (!err) return 0; /* If aggregation is not possible, make the object a root. */ return objagg_obj_root_create(objagg, objagg_obj, NULL); } static void objagg_obj_fini(struct objagg *objagg, struct objagg_obj *objagg_obj) { if (!objagg_obj_is_root(objagg_obj)) objagg_obj_parent_unassign(objagg, objagg_obj); else objagg_obj_root_destroy(objagg, objagg_obj); } static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj) { struct objagg_obj *objagg_obj; int err; objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size, GFP_KERNEL); if (!objagg_obj) return ERR_PTR(-ENOMEM); objagg_obj_ref_inc(objagg_obj); memcpy(objagg_obj->obj, obj, objagg->ops->obj_size); err = objagg_obj_init(objagg, objagg_obj); if (err) goto err_obj_init; err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node, objagg->ht_params); if (err) goto err_ht_insert; list_add(&objagg_obj->list, &objagg->obj_list); objagg->obj_count++; trace_objagg_obj_create(objagg, objagg_obj); return objagg_obj; err_ht_insert: objagg_obj_fini(objagg, objagg_obj); err_obj_init: kfree(objagg_obj); return ERR_PTR(err); } static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj) { struct objagg_obj *objagg_obj; /* First, try to find the object exactly as user passed it, * perhaps it is already in use. */ objagg_obj = objagg_obj_lookup(objagg, obj); if (objagg_obj) { objagg_obj_ref_inc(objagg_obj); return objagg_obj; } return objagg_obj_create(objagg, obj); } /** * objagg_obj_get - gets an object within objagg instance * @objagg: objagg instance * @obj: user-specific private object pointer * * Note: all locking must be provided by the caller. * * Size of the "obj" memory is specified in "objagg->ops". * * There are 3 main options this function wraps: * 1) The object according to "obj" already exist. In that case * the reference counter is incrementes and the object is returned. * 2) The object does not exist, but it can be aggregated within * another object. In that case, user ops->delta_create() is called * to obtain delta data and a new object is created with returned * user-delta private pointer. * 3) The object does not exist and cannot be aggregated into * any of the existing objects. In that case, user ops->root_create() * is called to create the root and a new object is created with * returned user-root private pointer. * * Returns a pointer to objagg object instance in case of success, * otherwise it returns pointer error using ERR_PTR macro. */ struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj) { struct objagg_obj *objagg_obj; objagg_obj = __objagg_obj_get(objagg, obj); if (IS_ERR(objagg_obj)) return objagg_obj; objagg_obj_stats_inc(objagg_obj); trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount); return objagg_obj; } EXPORT_SYMBOL(objagg_obj_get); static void objagg_obj_destroy(struct objagg *objagg, struct objagg_obj *objagg_obj) { trace_objagg_obj_destroy(objagg, objagg_obj); --objagg->obj_count; list_del(&objagg_obj->list); rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node, objagg->ht_params); objagg_obj_fini(objagg, objagg_obj); kfree(objagg_obj); } static void __objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj) { if (!objagg_obj_ref_dec(objagg_obj)) objagg_obj_destroy(objagg, objagg_obj); } /** * objagg_obj_put - puts an object within objagg instance * @objagg: objagg instance * @objagg_obj: objagg object instance * * Note: all locking must be provided by the caller. * * Symmetric to objagg_obj_get(). */ void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj) { trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount); objagg_obj_stats_dec(objagg_obj); __objagg_obj_put(objagg, objagg_obj); } EXPORT_SYMBOL(objagg_obj_put); /** * objagg_create - creates a new objagg instance * @ops: user-specific callbacks * @objagg_hints: hints, can be NULL * @priv: pointer to a private data passed to the ops * * Note: all locking must be provided by the caller. * * The purpose of the library is to provide an infrastructure to * aggregate user-specified objects. Library does not care about the type * of the object. User fills-up ops which take care of the specific * user object manipulation. * * As a very stupid example, consider integer numbers. For example * number 8 as a root object. That can aggregate number 9 with delta 1, * number 10 with delta 2, etc. This example is implemented as * a part of a testing module in test_objagg.c file. * * Each objagg instance contains multiple trees. Each tree node is * represented by "an object". In the current implementation there can be * only roots and leafs nodes. Leaf nodes are called deltas. * But in general, this can be easily extended for intermediate nodes. * In that extension, a delta would be associated with all non-root * nodes. * * Returns a pointer to newly created objagg instance in case of success, * otherwise it returns pointer error using ERR_PTR macro. */ struct objagg *objagg_create(const struct objagg_ops *ops, struct objagg_hints *objagg_hints, void *priv) { struct objagg *objagg; int err; if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy || !ops->delta_check || !ops->delta_create || !ops->delta_destroy)) return ERR_PTR(-EINVAL); objagg = kzalloc(sizeof(*objagg), GFP_KERNEL); if (!objagg) return ERR_PTR(-ENOMEM); objagg->ops = ops; if (objagg_hints) { objagg->hints = objagg_hints; objagg_hints->refcount++; } objagg->priv = priv; INIT_LIST_HEAD(&objagg->obj_list); objagg->ht_params.key_len = ops->obj_size; objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj); objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node); err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params); if (err) goto err_rhashtable_init; ida_init(&objagg->root_ida); trace_objagg_create(objagg); return objagg; err_rhashtable_init: kfree(objagg); return ERR_PTR(err); } EXPORT_SYMBOL(objagg_create); /** * objagg_destroy - destroys a new objagg instance * @objagg: objagg instance * * Note: all locking must be provided by the caller. */ void objagg_destroy(struct objagg *objagg) { trace_objagg_destroy(objagg); ida_destroy(&objagg->root_ida); WARN_ON(!list_empty(&objagg->obj_list)); rhashtable_destroy(&objagg->obj_ht); if (objagg->hints) objagg_hints_put(objagg->hints); kfree(objagg); } EXPORT_SYMBOL(objagg_destroy); static int objagg_stats_info_sort_cmp_func(const void *a, const void *b) { const struct objagg_obj_stats_info *stats_info1 = a; const struct objagg_obj_stats_info *stats_info2 = b; if (stats_info1->is_root != stats_info2->is_root) return stats_info2->is_root - stats_info1->is_root; if (stats_info1->stats.delta_user_count != stats_info2->stats.delta_user_count) return stats_info2->stats.delta_user_count - stats_info1->stats.delta_user_count; return stats_info2->stats.user_count - stats_info1->stats.user_count; } /** * objagg_stats_get - obtains stats of the objagg instance * @objagg: objagg instance * * Note: all locking must be provided by the caller. * * The returned structure contains statistics of all object * currently in use, ordered by following rules: * 1) Root objects are always on lower indexes than the rest. * 2) Objects with higher delta user count are always on lower * indexes. * 3) In case more objects have the same delta user count, * the objects are ordered by user count. * * Returns a pointer to stats instance in case of success, * otherwise it returns pointer error using ERR_PTR macro. */ const struct objagg_stats *objagg_stats_get(struct objagg *objagg) { struct objagg_stats *objagg_stats; struct objagg_obj *objagg_obj; int i; objagg_stats = kzalloc(struct_size(objagg_stats, stats_info, objagg->obj_count), GFP_KERNEL); if (!objagg_stats) return ERR_PTR(-ENOMEM); i = 0; list_for_each_entry(objagg_obj, &objagg->obj_list, list) { memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats, sizeof(objagg_stats->stats_info[0].stats)); objagg_stats->stats_info[i].objagg_obj = objagg_obj; objagg_stats->stats_info[i].is_root = objagg_obj_is_root(objagg_obj); if (objagg_stats->stats_info[i].is_root) objagg_stats->root_count++; i++; } objagg_stats->stats_info_count = i; sort(objagg_stats->stats_info, objagg_stats->stats_info_count, sizeof(struct objagg_obj_stats_info), objagg_stats_info_sort_cmp_func, NULL); return objagg_stats; } EXPORT_SYMBOL(objagg_stats_get); /** * objagg_stats_put - puts stats of the objagg instance * @objagg_stats: objagg instance stats * * Note: all locking must be provided by the caller. */ void objagg_stats_put(const struct objagg_stats *objagg_stats) { kfree(objagg_stats); } EXPORT_SYMBOL(objagg_stats_put); static struct objagg_hints_node * objagg_hints_node_create(struct objagg_hints *objagg_hints, struct objagg_obj *objagg_obj, size_t obj_size, struct objagg_hints_node *parent_hnode) { unsigned int user_count = objagg_obj->stats.user_count; struct objagg_hints_node *hnode; int err; hnode = kzalloc(sizeof(*hnode) + obj_size, GFP_KERNEL); if (!hnode) return ERR_PTR(-ENOMEM); memcpy(hnode->obj, &objagg_obj->obj, obj_size); hnode->stats_info.stats.user_count = user_count; hnode->stats_info.stats.delta_user_count = user_count; if (parent_hnode) { parent_hnode->stats_info.stats.delta_user_count += user_count; } else { hnode->root_id = objagg_hints->root_count++; hnode->stats_info.is_root = true; } hnode->stats_info.objagg_obj = objagg_obj; err = rhashtable_insert_fast(&objagg_hints->node_ht, &hnode->ht_node, objagg_hints->ht_params); if (err) goto err_ht_insert; list_add(&hnode->list, &objagg_hints->node_list); hnode->parent = parent_hnode; objagg_hints->node_count++; return hnode; err_ht_insert: kfree(hnode); return ERR_PTR(err); } static void objagg_hints_flush(struct objagg_hints *objagg_hints) { struct objagg_hints_node *hnode, *tmp; list_for_each_entry_safe(hnode, tmp, &objagg_hints->node_list, list) { list_del(&hnode->list); rhashtable_remove_fast(&objagg_hints->node_ht, &hnode->ht_node, objagg_hints->ht_params); kfree(hnode); } } struct objagg_tmp_node { struct objagg_obj *objagg_obj; bool crossed_out; }; struct objagg_tmp_graph { struct objagg_tmp_node *nodes; unsigned long nodes_count; unsigned long *edges; }; static int objagg_tmp_graph_edge_index(struct objagg_tmp_graph *graph, int parent_index, int index) { return index * graph->nodes_count + parent_index; } static void objagg_tmp_graph_edge_set(struct objagg_tmp_graph *graph, int parent_index, int index) { int edge_index = objagg_tmp_graph_edge_index(graph, index, parent_index); __set_bit(edge_index, graph->edges); } static bool objagg_tmp_graph_is_edge(struct objagg_tmp_graph *graph, int parent_index, int index) { int edge_index = objagg_tmp_graph_edge_index(graph, index, parent_index); return test_bit(edge_index, graph->edges); } static unsigned int objagg_tmp_graph_node_weight(struct objagg_tmp_graph *graph, unsigned int index) { struct objagg_tmp_node *node = &graph->nodes[index]; unsigned int weight = node->objagg_obj->stats.user_count; int j; /* Node weight is sum of node users and all other nodes users * that this node can represent with delta. */ for (j = 0; j < graph->nodes_count; j++) { if (!objagg_tmp_graph_is_edge(graph, index, j)) continue; node = &graph->nodes[j]; if (node->crossed_out) continue; weight += node->objagg_obj->stats.user_count; } return weight; } static int objagg_tmp_graph_node_max_weight(struct objagg_tmp_graph *graph) { struct objagg_tmp_node *node; unsigned int max_weight = 0; unsigned int weight; int max_index = -1; int i; for (i = 0; i < graph->nodes_count; i++) { node = &graph->nodes[i]; if (node->crossed_out) continue; weight = objagg_tmp_graph_node_weight(graph, i); if (weight >= max_weight) { max_weight = weight; max_index = i; } } return max_index; } static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg) { unsigned int nodes_count = objagg->obj_count; struct objagg_tmp_graph *graph; struct objagg_tmp_node *node; struct objagg_tmp_node *pnode; struct objagg_obj *objagg_obj; int i, j; graph = kzalloc(sizeof(*graph), GFP_KERNEL); if (!graph) return NULL; graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL); if (!graph->nodes) goto err_nodes_alloc; graph->nodes_count = nodes_count; graph->edges = bitmap_zalloc(nodes_count * nodes_count, GFP_KERNEL); if (!graph->edges) goto err_edges_alloc; i = 0; list_for_each_entry(objagg_obj, &objagg->obj_list, list) { node = &graph->nodes[i++]; node->objagg_obj = objagg_obj; } /* Assemble a temporary graph. Insert edge X->Y in case Y can be * in delta of X. */ for (i = 0; i < nodes_count; i++) { for (j = 0; j < nodes_count; j++) { if (i == j) continue; pnode = &graph->nodes[i]; node = &graph->nodes[j]; if (objagg->ops->delta_check(objagg->priv, pnode->objagg_obj->obj, node->objagg_obj->obj)) { objagg_tmp_graph_edge_set(graph, i, j); } } } return graph; err_edges_alloc: kfree(graph->nodes); err_nodes_alloc: kfree(graph); return NULL; } static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph) { bitmap_free(graph->edges); kfree(graph->nodes); kfree(graph); } static int objagg_opt_simple_greedy_fillup_hints(struct objagg_hints *objagg_hints, struct objagg *objagg) { struct objagg_hints_node *hnode, *parent_hnode; struct objagg_tmp_graph *graph; struct objagg_tmp_node *node; int index; int j; int err; graph = objagg_tmp_graph_create(objagg); if (!graph) return -ENOMEM; /* Find the nodes from the ones that can accommodate most users * and cross them out of the graph. Save them to the hint list. */ while ((index = objagg_tmp_graph_node_max_weight(graph)) != -1) { node = &graph->nodes[index]; node->crossed_out = true; hnode = objagg_hints_node_create(objagg_hints, node->objagg_obj, objagg->ops->obj_size, NULL); if (IS_ERR(hnode)) { err = PTR_ERR(hnode); goto out; } parent_hnode = hnode; for (j = 0; j < graph->nodes_count; j++) { if (!objagg_tmp_graph_is_edge(graph, index, j)) continue; node = &graph->nodes[j]; if (node->crossed_out) continue; node->crossed_out = true; hnode = objagg_hints_node_create(objagg_hints, node->objagg_obj, objagg->ops->obj_size, parent_hnode); if (IS_ERR(hnode)) { err = PTR_ERR(hnode); goto out; } } } err = 0; out: objagg_tmp_graph_destroy(graph); return err; } struct objagg_opt_algo { int (*fillup_hints)(struct objagg_hints *objagg_hints, struct objagg *objagg); }; static const struct objagg_opt_algo objagg_opt_simple_greedy = { .fillup_hints = objagg_opt_simple_greedy_fillup_hints, }; static const struct objagg_opt_algo *objagg_opt_algos[] = { [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy, }; static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg, const void *obj) { struct rhashtable *ht = arg->ht; struct objagg_hints *objagg_hints = container_of(ht, struct objagg_hints, node_ht); const struct objagg_ops *ops = objagg_hints->ops; const char *ptr = obj; ptr += ht->p.key_offset; return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) : memcmp(ptr, arg->key, ht->p.key_len); } /** * objagg_hints_get - obtains hints instance * @objagg: objagg instance * @opt_algo_type: type of hints finding algorithm * * Note: all locking must be provided by the caller. * * According to the algo type, the existing objects of objagg instance * are going to be went-through to assemble an optimal tree. We call this * tree hints. These hints can be later on used for creation of * a new objagg instance. There, the future object creations are going * to be consulted with these hints in order to find out, where exactly * the new object should be put as a root or delta. * * Returns a pointer to hints instance in case of success, * otherwise it returns pointer error using ERR_PTR macro. */ struct objagg_hints *objagg_hints_get(struct objagg *objagg, enum objagg_opt_algo_type opt_algo_type) { const struct objagg_opt_algo *algo = objagg_opt_algos[opt_algo_type]; struct objagg_hints *objagg_hints; int err; objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL); if (!objagg_hints) return ERR_PTR(-ENOMEM); objagg_hints->ops = objagg->ops; objagg_hints->refcount = 1; INIT_LIST_HEAD(&objagg_hints->node_list); objagg_hints->ht_params.key_len = objagg->ops->obj_size; objagg_hints->ht_params.key_offset = offsetof(struct objagg_hints_node, obj); objagg_hints->ht_params.head_offset = offsetof(struct objagg_hints_node, ht_node); objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp; err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params); if (err) goto err_rhashtable_init; err = algo->fillup_hints(objagg_hints, objagg); if (err) goto err_fillup_hints; if (WARN_ON(objagg_hints->node_count != objagg->obj_count)) { err = -EINVAL; goto err_node_count_check; } return objagg_hints; err_node_count_check: err_fillup_hints: objagg_hints_flush(objagg_hints); rhashtable_destroy(&objagg_hints->node_ht); err_rhashtable_init: kfree(objagg_hints); return ERR_PTR(err); } EXPORT_SYMBOL(objagg_hints_get); /** * objagg_hints_put - puts hints instance * @objagg_hints: objagg hints instance * * Note: all locking must be provided by the caller. */ void objagg_hints_put(struct objagg_hints *objagg_hints) { if (--objagg_hints->refcount) return; objagg_hints_flush(objagg_hints); rhashtable_destroy(&objagg_hints->node_ht); kfree(objagg_hints); } EXPORT_SYMBOL(objagg_hints_put); /** * objagg_hints_stats_get - obtains stats of the hints instance * @objagg_hints: hints instance * * Note: all locking must be provided by the caller. * * The returned structure contains statistics of all objects * currently in use, ordered by following rules: * 1) Root objects are always on lower indexes than the rest. * 2) Objects with higher delta user count are always on lower * indexes. * 3) In case multiple objects have the same delta user count, * the objects are ordered by user count. * * Returns a pointer to stats instance in case of success, * otherwise it returns pointer error using ERR_PTR macro. */ const struct objagg_stats * objagg_hints_stats_get(struct objagg_hints *objagg_hints) { struct objagg_stats *objagg_stats; struct objagg_hints_node *hnode; int i; objagg_stats = kzalloc(struct_size(objagg_stats, stats_info, objagg_hints->node_count), GFP_KERNEL); if (!objagg_stats) return ERR_PTR(-ENOMEM); i = 0; list_for_each_entry(hnode, &objagg_hints->node_list, list) { memcpy(&objagg_stats->stats_info[i], &hnode->stats_info, sizeof(objagg_stats->stats_info[0])); if (objagg_stats->stats_info[i].is_root) objagg_stats->root_count++; i++; } objagg_stats->stats_info_count = i; sort(objagg_stats->stats_info, objagg_stats->stats_info_count, sizeof(struct objagg_obj_stats_info), objagg_stats_info_sort_cmp_func, NULL); return objagg_stats; } EXPORT_SYMBOL(objagg_hints_stats_get); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Jiri Pirko <[email protected]>"); MODULE_DESCRIPTION("Object aggregation manager");
linux-master
lib/objagg.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/bitops.h> #include <asm/word-at-a-time.h> /* * Do a strnlen, return length of string *with* final '\0'. * 'count' is the user-supplied count, while 'max' is the * address space maximum. * * Return 0 for exceptions (which includes hitting the address * space maximum), or 'count+1' if hitting the user-supplied * maximum count. * * NOTE! We can sometimes overshoot the user-supplied maximum * if it fits in a aligned 'long'. The caller needs to check * the return value against "> max". */ static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long align, res = 0; unsigned long c; /* * Do everything aligned. But that means that we * need to also expand the maximum.. */ align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; unsafe_get_user(c, (unsigned long __user *)src, efault); c |= aligned_byte_mask(align); for (;;) { unsigned long data; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); /* We already handled 'unsigned long' bytes. Did we do it all ? */ if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); unsafe_get_user(c, (unsigned long __user *)(src+res), efault); } res -= align; /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, return the marker for "too long". */ if (res >= count) return count+1; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ efault: return 0; } /** * strnlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * @count: Maximum count (including NUL character) * * Context: User context only. This function may sleep if pagefaults are * enabled. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * If the string is too long, returns a number larger than @count. User * has to check the return value against "> count". * On exception (or invalid count), returns 0. * * NOTE! You should basically never use this function. There is * almost never any valid case for using the length of a user space * string, since the string can be changed at any time by other * threads. Use "strncpy_from_user()" instead to get a stable copy * of the string. */ long strnlen_user(const char __user *str, long count) { unsigned long max_addr, src_addr; if (unlikely(count <= 0)) return 0; max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(str); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; long retval; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; if (user_read_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); user_read_access_end(); return retval; } } return 0; } EXPORT_SYMBOL(strnlen_user);
linux-master
lib/strnlen_user.c