python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/crc-ccitt.c
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-ccitt.h>
/*
* This mysterious table is just the CRC of each possible byte. It can be
* computed using the standard bit-at-a-time methods. The polynomial can
* be seen in entry 128, 0x8408. This corresponds to x^0 + x^5 + x^12.
* Add the implicit x^16, and you have the standard CRC-CCITT.
*/
u16 const crc_ccitt_table[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
EXPORT_SYMBOL(crc_ccitt_table);
/*
* Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
* Reflected bits order, does not augment final value.
*/
u16 const crc_ccitt_false_table[256] = {
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
};
EXPORT_SYMBOL(crc_ccitt_false_table);
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*/
u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc_ccitt_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_ccitt);
/**
* crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
* for the data buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
*/
u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
{
while (len--)
crc = crc_ccitt_false_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc_ccitt_false);
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
| linux-master | lib/crc-ccitt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This module provides an interface to trigger and test firmware loading.
*
* It is designed to be used for basic evaluation of the firmware loading
* subsystem (for example when validating firmware verification). It lacks
* any extra dependencies, and will not normally be loaded by the system
* unless explicitly requested by name.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/completion.h>
#include <linux/firmware.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/kstrtox.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/efi_embedded_fw.h>
MODULE_IMPORT_NS(TEST_FIRMWARE);
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
#define TEST_FIRMWARE_BUF_SIZE SZ_1K
#define TEST_UPLOAD_MAX_SIZE SZ_2K
#define TEST_UPLOAD_BLK_SIZE 37 /* Avoid powers of two in testing */
static DEFINE_MUTEX(test_fw_mutex);
static const struct firmware *test_firmware;
static LIST_HEAD(test_upload_list);
struct test_batched_req {
u8 idx;
int rc;
bool sent;
const struct firmware *fw;
const char *name;
const char *fw_buf;
struct completion completion;
struct task_struct *task;
struct device *dev;
};
/**
* struct test_config - represents configuration for the test for different triggers
*
* @name: the name of the firmware file to look for
* @into_buf: when the into_buf is used if this is true
* request_firmware_into_buf() will be used instead.
* @buf_size: size of buf to allocate when into_buf is true
* @file_offset: file offset to request when calling request_firmware_into_buf
* @partial: partial read opt when calling request_firmware_into_buf
* @sync_direct: when the sync trigger is used if this is true
* request_firmware_direct() will be used instead.
* @send_uevent: whether or not to send a uevent for async requests
* @num_requests: number of requests to try per test case. This is trigger
* specific.
* @reqs: stores all requests information
* @read_fw_idx: index of thread from which we want to read firmware results
* from through the read_fw trigger.
* @upload_name: firmware name to be used with upload_read sysfs node
* @test_result: a test may use this to collect the result from the call
* of the request_firmware*() calls used in their tests. In order of
* priority we always keep first any setup error. If no setup errors were
* found then we move on to the first error encountered while running the
* API. Note that for async calls this typically will be a successful
* result (0) unless of course you've used bogus parameters, or the system
* is out of memory. In the async case the callback is expected to do a
* bit more homework to figure out what happened, unfortunately the only
* information passed today on error is the fact that no firmware was
* found so we can only assume -ENOENT on async calls if the firmware is
* NULL.
*
* Errors you can expect:
*
* API specific:
*
* 0: success for sync, for async it means request was sent
* -EINVAL: invalid parameters or request
* -ENOENT: files not found
*
* System environment:
*
* -ENOMEM: memory pressure on system
* -ENODEV: out of number of devices to test
* -EINVAL: an unexpected error has occurred
* @req_firmware: if @sync_direct is true this is set to
* request_firmware_direct(), otherwise request_firmware()
*/
struct test_config {
char *name;
bool into_buf;
size_t buf_size;
size_t file_offset;
bool partial;
bool sync_direct;
bool send_uevent;
u8 num_requests;
u8 read_fw_idx;
char *upload_name;
/*
* These below don't belong her but we'll move them once we create
* a struct fw_test_device and stuff the misc_dev under there later.
*/
struct test_batched_req *reqs;
int test_result;
int (*req_firmware)(const struct firmware **fw, const char *name,
struct device *device);
};
struct upload_inject_err {
const char *prog;
enum fw_upload_err err_code;
};
struct test_firmware_upload {
char *name;
struct list_head node;
char *buf;
size_t size;
bool cancel_request;
struct upload_inject_err inject;
struct fw_upload *fwl;
};
static struct test_config *test_fw_config;
static struct test_firmware_upload *upload_lookup_name(const char *name)
{
struct test_firmware_upload *tst;
list_for_each_entry(tst, &test_upload_list, node)
if (strncmp(name, tst->name, strlen(tst->name)) == 0)
return tst;
return NULL;
}
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
{
ssize_t rc = 0;
mutex_lock(&test_fw_mutex);
if (test_firmware)
rc = simple_read_from_buffer(buf, size, offset,
test_firmware->data,
test_firmware->size);
mutex_unlock(&test_fw_mutex);
return rc;
}
static const struct file_operations test_fw_fops = {
.owner = THIS_MODULE,
.read = test_fw_misc_read,
};
static void __test_release_all_firmware(void)
{
struct test_batched_req *req;
u8 i;
if (!test_fw_config->reqs)
return;
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
if (req->fw) {
if (req->fw_buf) {
kfree_const(req->fw_buf);
req->fw_buf = NULL;
}
release_firmware(req->fw);
req->fw = NULL;
}
}
vfree(test_fw_config->reqs);
test_fw_config->reqs = NULL;
}
static void test_release_all_firmware(void)
{
mutex_lock(&test_fw_mutex);
__test_release_all_firmware();
mutex_unlock(&test_fw_mutex);
}
static void __test_firmware_config_free(void)
{
__test_release_all_firmware();
kfree_const(test_fw_config->name);
test_fw_config->name = NULL;
}
/*
* XXX: move to kstrncpy() once merged.
*
* Users should use kfree_const() when freeing these.
*/
static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
{
*dst = kstrndup(name, count, gfp);
if (!*dst)
return -ENOMEM;
return count;
}
static int __test_firmware_config_init(void)
{
int ret;
ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
if (ret < 0)
goto out;
test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
test_fw_config->send_uevent = true;
test_fw_config->into_buf = false;
test_fw_config->buf_size = TEST_FIRMWARE_BUF_SIZE;
test_fw_config->file_offset = 0;
test_fw_config->partial = false;
test_fw_config->sync_direct = false;
test_fw_config->req_firmware = request_firmware;
test_fw_config->test_result = 0;
test_fw_config->reqs = NULL;
test_fw_config->upload_name = NULL;
return 0;
out:
__test_firmware_config_free();
return ret;
}
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
mutex_lock(&test_fw_mutex);
__test_firmware_config_free();
ret = __test_firmware_config_init();
if (ret < 0) {
ret = -ENOMEM;
pr_err("could not alloc settings for config trigger: %d\n",
ret);
goto out;
}
pr_info("reset\n");
ret = count;
out:
mutex_unlock(&test_fw_mutex);
return ret;
}
static DEVICE_ATTR_WO(reset);
static ssize_t config_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int len = 0;
mutex_lock(&test_fw_mutex);
len += scnprintf(buf, PAGE_SIZE - len,
"Custom trigger configuration for: %s\n",
dev_name(dev));
if (test_fw_config->name)
len += scnprintf(buf + len, PAGE_SIZE - len,
"name:\t%s\n",
test_fw_config->name);
else
len += scnprintf(buf + len, PAGE_SIZE - len,
"name:\tEMPTY\n");
len += scnprintf(buf + len, PAGE_SIZE - len,
"num_requests:\t%u\n", test_fw_config->num_requests);
len += scnprintf(buf + len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
"FW_ACTION_UEVENT" :
"FW_ACTION_NOUEVENT");
len += scnprintf(buf + len, PAGE_SIZE - len,
"into_buf:\t\t%s\n",
test_fw_config->into_buf ? "true" : "false");
len += scnprintf(buf + len, PAGE_SIZE - len,
"buf_size:\t%zu\n", test_fw_config->buf_size);
len += scnprintf(buf + len, PAGE_SIZE - len,
"file_offset:\t%zu\n", test_fw_config->file_offset);
len += scnprintf(buf + len, PAGE_SIZE - len,
"partial:\t\t%s\n",
test_fw_config->partial ? "true" : "false");
len += scnprintf(buf + len, PAGE_SIZE - len,
"sync_direct:\t\t%s\n",
test_fw_config->sync_direct ? "true" : "false");
len += scnprintf(buf + len, PAGE_SIZE - len,
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
if (test_fw_config->upload_name)
len += scnprintf(buf + len, PAGE_SIZE - len,
"upload_name:\t%s\n",
test_fw_config->upload_name);
else
len += scnprintf(buf + len, PAGE_SIZE - len,
"upload_name:\tEMPTY\n");
mutex_unlock(&test_fw_mutex);
return len;
}
static DEVICE_ATTR_RO(config);
static ssize_t config_name_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
mutex_lock(&test_fw_mutex);
kfree_const(test_fw_config->name);
ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
mutex_unlock(&test_fw_mutex);
return ret;
}
/*
* As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
*/
static ssize_t config_test_show_str(char *dst,
char *src)
{
int len;
mutex_lock(&test_fw_mutex);
len = snprintf(dst, PAGE_SIZE, "%s\n", src);
mutex_unlock(&test_fw_mutex);
return len;
}
static inline int __test_dev_config_update_bool(const char *buf, size_t size,
bool *cfg)
{
int ret;
if (kstrtobool(buf, cfg) < 0)
ret = -EINVAL;
else
ret = size;
return ret;
}
static int test_dev_config_update_bool(const char *buf, size_t size,
bool *cfg)
{
int ret;
mutex_lock(&test_fw_mutex);
ret = __test_dev_config_update_bool(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
return ret;
}
static ssize_t test_dev_config_show_bool(char *buf, bool val)
{
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static int __test_dev_config_update_size_t(
const char *buf,
size_t size,
size_t *cfg)
{
int ret;
long new;
ret = kstrtol(buf, 10, &new);
if (ret)
return ret;
*(size_t *)cfg = new;
/* Always return full write size even if we didn't consume all */
return size;
}
static ssize_t test_dev_config_show_size_t(char *buf, size_t val)
{
return snprintf(buf, PAGE_SIZE, "%zu\n", val);
}
static ssize_t test_dev_config_show_int(char *buf, int val)
{
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
u8 val;
int ret;
ret = kstrtou8(buf, 10, &val);
if (ret)
return ret;
*(u8 *)cfg = val;
/* Always return full write size even if we didn't consume all */
return size;
}
static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
{
int ret;
mutex_lock(&test_fw_mutex);
ret = __test_dev_config_update_u8(buf, size, cfg);
mutex_unlock(&test_fw_mutex);
return ret;
}
static ssize_t test_dev_config_show_u8(char *buf, u8 val)
{
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
static ssize_t config_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return config_test_show_str(buf, test_fw_config->name);
}
static DEVICE_ATTR_RW(config_name);
static ssize_t config_upload_name_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct test_firmware_upload *tst;
int ret = count;
mutex_lock(&test_fw_mutex);
tst = upload_lookup_name(buf);
if (tst)
test_fw_config->upload_name = tst->name;
else
ret = -EINVAL;
mutex_unlock(&test_fw_mutex);
return ret;
}
static ssize_t config_upload_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return config_test_show_str(buf, test_fw_config->upload_name);
}
static DEVICE_ATTR_RW(config_upload_name);
static ssize_t config_num_requests_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
mutex_lock(&test_fw_mutex);
if (test_fw_config->reqs) {
pr_err("Must call release_all_firmware prior to changing config\n");
rc = -EINVAL;
mutex_unlock(&test_fw_mutex);
goto out;
}
rc = __test_dev_config_update_u8(buf, count,
&test_fw_config->num_requests);
mutex_unlock(&test_fw_mutex);
out:
return rc;
}
static ssize_t config_num_requests_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_u8(buf, test_fw_config->num_requests);
}
static DEVICE_ATTR_RW(config_num_requests);
static ssize_t config_into_buf_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return test_dev_config_update_bool(buf,
count,
&test_fw_config->into_buf);
}
static ssize_t config_into_buf_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_bool(buf, test_fw_config->into_buf);
}
static DEVICE_ATTR_RW(config_into_buf);
static ssize_t config_buf_size_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
mutex_lock(&test_fw_mutex);
if (test_fw_config->reqs) {
pr_err("Must call release_all_firmware prior to changing config\n");
rc = -EINVAL;
mutex_unlock(&test_fw_mutex);
goto out;
}
rc = __test_dev_config_update_size_t(buf, count,
&test_fw_config->buf_size);
mutex_unlock(&test_fw_mutex);
out:
return rc;
}
static ssize_t config_buf_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_size_t(buf, test_fw_config->buf_size);
}
static DEVICE_ATTR_RW(config_buf_size);
static ssize_t config_file_offset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
mutex_lock(&test_fw_mutex);
if (test_fw_config->reqs) {
pr_err("Must call release_all_firmware prior to changing config\n");
rc = -EINVAL;
mutex_unlock(&test_fw_mutex);
goto out;
}
rc = __test_dev_config_update_size_t(buf, count,
&test_fw_config->file_offset);
mutex_unlock(&test_fw_mutex);
out:
return rc;
}
static ssize_t config_file_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_size_t(buf, test_fw_config->file_offset);
}
static DEVICE_ATTR_RW(config_file_offset);
static ssize_t config_partial_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return test_dev_config_update_bool(buf,
count,
&test_fw_config->partial);
}
static ssize_t config_partial_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_bool(buf, test_fw_config->partial);
}
static DEVICE_ATTR_RW(config_partial);
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc = test_dev_config_update_bool(buf, count,
&test_fw_config->sync_direct);
if (rc == count)
test_fw_config->req_firmware = test_fw_config->sync_direct ?
request_firmware_direct :
request_firmware;
return rc;
}
static ssize_t config_sync_direct_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
}
static DEVICE_ATTR_RW(config_sync_direct);
static ssize_t config_send_uevent_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return test_dev_config_update_bool(buf, count,
&test_fw_config->send_uevent);
}
static ssize_t config_send_uevent_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
}
static DEVICE_ATTR_RW(config_send_uevent);
static ssize_t config_read_fw_idx_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return test_dev_config_update_u8(buf, count,
&test_fw_config->read_fw_idx);
}
static ssize_t config_read_fw_idx_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
}
static DEVICE_ATTR_RW(config_read_fw_idx);
static ssize_t trigger_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
char *name;
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
if (test_fw_config->reqs)
__test_release_all_firmware();
test_firmware = NULL;
rc = request_firmware(&test_firmware, name, dev);
if (rc) {
pr_info("load of '%s' failed: %d\n", name, rc);
goto out;
}
pr_info("loaded: %zu\n", test_firmware->size);
rc = count;
out:
mutex_unlock(&test_fw_mutex);
kfree(name);
return rc;
}
static DEVICE_ATTR_WO(trigger_request);
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
extern struct list_head efi_embedded_fw_list;
extern bool efi_embedded_fw_checked;
static ssize_t trigger_request_platform_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
static const u8 test_data[] = {
0x55, 0xaa, 0x55, 0xaa, 0x01, 0x02, 0x03, 0x04,
0x55, 0xaa, 0x55, 0xaa, 0x05, 0x06, 0x07, 0x08,
0x55, 0xaa, 0x55, 0xaa, 0x10, 0x20, 0x30, 0x40,
0x55, 0xaa, 0x55, 0xaa, 0x50, 0x60, 0x70, 0x80
};
struct efi_embedded_fw efi_embedded_fw;
const struct firmware *firmware = NULL;
bool saved_efi_embedded_fw_checked;
char *name;
int rc;
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
return -ENOMEM;
pr_info("inserting test platform fw '%s'\n", name);
efi_embedded_fw.name = name;
efi_embedded_fw.data = (void *)test_data;
efi_embedded_fw.length = sizeof(test_data);
list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
efi_embedded_fw_checked = true;
pr_info("loading '%s'\n", name);
rc = firmware_request_platform(&firmware, name, dev);
if (rc) {
pr_info("load of '%s' failed: %d\n", name, rc);
goto out;
}
if (firmware->size != sizeof(test_data) ||
memcmp(firmware->data, test_data, sizeof(test_data)) != 0) {
pr_info("firmware contents mismatch for '%s'\n", name);
rc = -EINVAL;
goto out;
}
pr_info("loaded: %zu\n", firmware->size);
rc = count;
out:
efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
release_firmware(firmware);
list_del(&efi_embedded_fw.list);
kfree(name);
return rc;
}
static DEVICE_ATTR_WO(trigger_request_platform);
#endif
static DECLARE_COMPLETION(async_fw_done);
static void trigger_async_request_cb(const struct firmware *fw, void *context)
{
test_firmware = fw;
complete(&async_fw_done);
}
static ssize_t trigger_async_request_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
char *name;
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
return -ENOMEM;
pr_info("loading '%s'\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
test_firmware = NULL;
if (test_fw_config->reqs)
__test_release_all_firmware();
rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
NULL, trigger_async_request_cb);
if (rc) {
pr_info("async load of '%s' failed: %d\n", name, rc);
kfree(name);
goto out;
}
/* Free 'name' ASAP, to test for race conditions */
kfree(name);
wait_for_completion(&async_fw_done);
if (test_firmware) {
pr_info("loaded: %zu\n", test_firmware->size);
rc = count;
} else {
pr_err("failed to async load firmware\n");
rc = -ENOMEM;
}
out:
mutex_unlock(&test_fw_mutex);
return rc;
}
static DEVICE_ATTR_WO(trigger_async_request);
static ssize_t trigger_custom_fallback_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int rc;
char *name;
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
return -ENOMEM;
pr_info("loading '%s' using custom fallback mechanism\n", name);
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
if (test_fw_config->reqs)
__test_release_all_firmware();
test_firmware = NULL;
rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
dev, GFP_KERNEL, NULL,
trigger_async_request_cb);
if (rc) {
pr_info("async load of '%s' failed: %d\n", name, rc);
kfree(name);
goto out;
}
/* Free 'name' ASAP, to test for race conditions */
kfree(name);
wait_for_completion(&async_fw_done);
if (test_firmware) {
pr_info("loaded: %zu\n", test_firmware->size);
rc = count;
} else {
pr_err("failed to async load firmware\n");
rc = -ENODEV;
}
out:
mutex_unlock(&test_fw_mutex);
return rc;
}
static DEVICE_ATTR_WO(trigger_custom_fallback);
static int test_fw_run_batch_request(void *data)
{
struct test_batched_req *req = data;
if (!req) {
test_fw_config->test_result = -EINVAL;
return -EINVAL;
}
if (test_fw_config->into_buf) {
void *test_buf;
test_buf = kzalloc(TEST_FIRMWARE_BUF_SIZE, GFP_KERNEL);
if (!test_buf)
return -ENOMEM;
if (test_fw_config->partial)
req->rc = request_partial_firmware_into_buf
(&req->fw,
req->name,
req->dev,
test_buf,
test_fw_config->buf_size,
test_fw_config->file_offset);
else
req->rc = request_firmware_into_buf
(&req->fw,
req->name,
req->dev,
test_buf,
test_fw_config->buf_size);
if (!req->fw)
kfree(test_buf);
else
req->fw_buf = test_buf;
} else {
req->rc = test_fw_config->req_firmware(&req->fw,
req->name,
req->dev);
}
if (req->rc) {
pr_info("#%u: batched sync load failed: %d\n",
req->idx, req->rc);
if (!test_fw_config->test_result)
test_fw_config->test_result = req->rc;
} else if (req->fw) {
req->sent = true;
pr_info("#%u: batched sync loaded %zu\n",
req->idx, req->fw->size);
}
complete(&req->completion);
req->task = NULL;
return 0;
}
/*
* We use a kthread as otherwise the kernel serializes all our sync requests
* and we would not be able to mimic batched requests on a sync call. Batched
* requests on a sync call can for instance happen on a device driver when
* multiple cards are used and firmware loading happens outside of probe.
*/
static ssize_t trigger_batched_requests_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct test_batched_req *req;
int rc;
u8 i;
mutex_lock(&test_fw_mutex);
if (test_fw_config->reqs) {
rc = -EBUSY;
goto out_bail;
}
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out_unlock;
}
pr_info("batched sync firmware loading '%s' %u times\n",
test_fw_config->name, test_fw_config->num_requests);
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
req->fw = NULL;
req->idx = i;
req->name = test_fw_config->name;
req->fw_buf = NULL;
req->dev = dev;
init_completion(&req->completion);
req->task = kthread_run(test_fw_run_batch_request, req,
"%s-%u", KBUILD_MODNAME, req->idx);
if (!req->task || IS_ERR(req->task)) {
pr_err("Setting up thread %u failed\n", req->idx);
req->task = NULL;
rc = -ENOMEM;
goto out_bail;
}
}
rc = count;
/*
* We require an explicit release to enable more time and delay of
* calling release_firmware() to improve our chances of forcing a
* batched request. If we instead called release_firmware() right away
* then we might miss on an opportunity of having a successful firmware
* request pass on the opportunity to be come a batched request.
*/
out_bail:
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
if (req->task || req->sent)
wait_for_completion(&req->completion);
}
/* Override any worker error if we had a general setup error */
if (rc < 0)
test_fw_config->test_result = rc;
out_unlock:
mutex_unlock(&test_fw_mutex);
return rc;
}
static DEVICE_ATTR_WO(trigger_batched_requests);
/*
* We wait for each callback to return with the lock held, no need to lock here
*/
static void trigger_batched_cb(const struct firmware *fw, void *context)
{
struct test_batched_req *req = context;
if (!req) {
test_fw_config->test_result = -EINVAL;
return;
}
/* forces *some* batched requests to queue up */
if (!req->idx)
ssleep(2);
req->fw = fw;
/*
* Unfortunately the firmware API gives us nothing other than a null FW
* if the firmware was not found on async requests. Best we can do is
* just assume -ENOENT. A better API would pass the actual return
* value to the callback.
*/
if (!fw && !test_fw_config->test_result)
test_fw_config->test_result = -ENOENT;
complete(&req->completion);
}
static
ssize_t trigger_batched_requests_async_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct test_batched_req *req;
bool send_uevent;
int rc;
u8 i;
mutex_lock(&test_fw_mutex);
if (test_fw_config->reqs) {
rc = -EBUSY;
goto out_bail;
}
test_fw_config->reqs =
vzalloc(array3_size(sizeof(struct test_batched_req),
test_fw_config->num_requests, 2));
if (!test_fw_config->reqs) {
rc = -ENOMEM;
goto out;
}
pr_info("batched loading '%s' custom fallback mechanism %u times\n",
test_fw_config->name, test_fw_config->num_requests);
send_uevent = test_fw_config->send_uevent ? FW_ACTION_UEVENT :
FW_ACTION_NOUEVENT;
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
req->name = test_fw_config->name;
req->fw_buf = NULL;
req->fw = NULL;
req->idx = i;
init_completion(&req->completion);
rc = request_firmware_nowait(THIS_MODULE, send_uevent,
req->name,
dev, GFP_KERNEL, req,
trigger_batched_cb);
if (rc) {
pr_info("#%u: batched async load failed setup: %d\n",
i, rc);
req->rc = rc;
goto out_bail;
} else
req->sent = true;
}
rc = count;
out_bail:
/*
* We require an explicit release to enable more time and delay of
* calling release_firmware() to improve our chances of forcing a
* batched request. If we instead called release_firmware() right away
* then we might miss on an opportunity of having a successful firmware
* request pass on the opportunity to be come a batched request.
*/
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
if (req->sent)
wait_for_completion(&req->completion);
}
/* Override any worker error if we had a general setup error */
if (rc < 0)
test_fw_config->test_result = rc;
out:
mutex_unlock(&test_fw_mutex);
return rc;
}
static DEVICE_ATTR_WO(trigger_batched_requests_async);
static void upload_release(struct test_firmware_upload *tst)
{
firmware_upload_unregister(tst->fwl);
kfree(tst->buf);
kfree(tst->name);
kfree(tst);
}
static void upload_release_all(void)
{
struct test_firmware_upload *tst, *tmp;
list_for_each_entry_safe(tst, tmp, &test_upload_list, node) {
list_del(&tst->node);
upload_release(tst);
}
test_fw_config->upload_name = NULL;
}
/*
* This table is replicated from .../firmware_loader/sysfs_upload.c
* and needs to be kept in sync.
*/
static const char * const fw_upload_err_str[] = {
[FW_UPLOAD_ERR_NONE] = "none",
[FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
[FW_UPLOAD_ERR_TIMEOUT] = "timeout",
[FW_UPLOAD_ERR_CANCELED] = "user-abort",
[FW_UPLOAD_ERR_BUSY] = "device-busy",
[FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
[FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
[FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
};
static void upload_err_inject_error(struct test_firmware_upload *tst,
const u8 *p, const char *prog)
{
enum fw_upload_err err;
for (err = FW_UPLOAD_ERR_NONE + 1; err < FW_UPLOAD_ERR_MAX; err++) {
if (strncmp(p, fw_upload_err_str[err],
strlen(fw_upload_err_str[err])) == 0) {
tst->inject.prog = prog;
tst->inject.err_code = err;
return;
}
}
}
static void upload_err_inject_prog(struct test_firmware_upload *tst,
const u8 *p)
{
static const char * const progs[] = {
"preparing:", "transferring:", "programming:"
};
int i;
for (i = 0; i < ARRAY_SIZE(progs); i++) {
if (strncmp(p, progs[i], strlen(progs[i])) == 0) {
upload_err_inject_error(tst, p + strlen(progs[i]),
progs[i]);
return;
}
}
}
#define FIVE_MINUTES_MS (5 * 60 * 1000)
static enum fw_upload_err
fw_upload_wait_on_cancel(struct test_firmware_upload *tst)
{
int ms_delay;
for (ms_delay = 0; ms_delay < FIVE_MINUTES_MS; ms_delay += 100) {
msleep(100);
if (tst->cancel_request)
return FW_UPLOAD_ERR_CANCELED;
}
return FW_UPLOAD_ERR_NONE;
}
static enum fw_upload_err test_fw_upload_prepare(struct fw_upload *fwl,
const u8 *data, u32 size)
{
struct test_firmware_upload *tst = fwl->dd_handle;
enum fw_upload_err ret = FW_UPLOAD_ERR_NONE;
const char *progress = "preparing:";
tst->cancel_request = false;
if (!size || size > TEST_UPLOAD_MAX_SIZE) {
ret = FW_UPLOAD_ERR_INVALID_SIZE;
goto err_out;
}
if (strncmp(data, "inject:", strlen("inject:")) == 0)
upload_err_inject_prog(tst, data + strlen("inject:"));
memset(tst->buf, 0, TEST_UPLOAD_MAX_SIZE);
tst->size = size;
if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
return FW_UPLOAD_ERR_NONE;
if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
ret = fw_upload_wait_on_cancel(tst);
else
ret = tst->inject.err_code;
err_out:
/*
* The cleanup op only executes if the prepare op succeeds.
* If the prepare op fails, it must do it's own clean-up.
*/
tst->inject.err_code = FW_UPLOAD_ERR_NONE;
tst->inject.prog = NULL;
return ret;
}
static enum fw_upload_err test_fw_upload_write(struct fw_upload *fwl,
const u8 *data, u32 offset,
u32 size, u32 *written)
{
struct test_firmware_upload *tst = fwl->dd_handle;
const char *progress = "transferring:";
u32 blk_size;
if (tst->cancel_request)
return FW_UPLOAD_ERR_CANCELED;
blk_size = min_t(u32, TEST_UPLOAD_BLK_SIZE, size);
memcpy(tst->buf + offset, data + offset, blk_size);
*written = blk_size;
if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
return FW_UPLOAD_ERR_NONE;
if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
return fw_upload_wait_on_cancel(tst);
return tst->inject.err_code;
}
static enum fw_upload_err test_fw_upload_complete(struct fw_upload *fwl)
{
struct test_firmware_upload *tst = fwl->dd_handle;
const char *progress = "programming:";
if (tst->cancel_request)
return FW_UPLOAD_ERR_CANCELED;
if (tst->inject.err_code == FW_UPLOAD_ERR_NONE ||
strncmp(tst->inject.prog, progress, strlen(progress)) != 0)
return FW_UPLOAD_ERR_NONE;
if (tst->inject.err_code == FW_UPLOAD_ERR_CANCELED)
return fw_upload_wait_on_cancel(tst);
return tst->inject.err_code;
}
static void test_fw_upload_cancel(struct fw_upload *fwl)
{
struct test_firmware_upload *tst = fwl->dd_handle;
tst->cancel_request = true;
}
static void test_fw_cleanup(struct fw_upload *fwl)
{
struct test_firmware_upload *tst = fwl->dd_handle;
tst->inject.err_code = FW_UPLOAD_ERR_NONE;
tst->inject.prog = NULL;
}
static const struct fw_upload_ops upload_test_ops = {
.prepare = test_fw_upload_prepare,
.write = test_fw_upload_write,
.poll_complete = test_fw_upload_complete,
.cancel = test_fw_upload_cancel,
.cleanup = test_fw_cleanup
};
static ssize_t upload_register_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct test_firmware_upload *tst;
struct fw_upload *fwl;
char *name;
int ret;
name = kstrndup(buf, count, GFP_KERNEL);
if (!name)
return -ENOMEM;
mutex_lock(&test_fw_mutex);
tst = upload_lookup_name(name);
if (tst) {
ret = -EEXIST;
goto free_name;
}
tst = kzalloc(sizeof(*tst), GFP_KERNEL);
if (!tst) {
ret = -ENOMEM;
goto free_name;
}
tst->name = name;
tst->buf = kzalloc(TEST_UPLOAD_MAX_SIZE, GFP_KERNEL);
if (!tst->buf) {
ret = -ENOMEM;
goto free_tst;
}
fwl = firmware_upload_register(THIS_MODULE, dev, tst->name,
&upload_test_ops, tst);
if (IS_ERR(fwl)) {
ret = PTR_ERR(fwl);
goto free_buf;
}
tst->fwl = fwl;
list_add_tail(&tst->node, &test_upload_list);
mutex_unlock(&test_fw_mutex);
return count;
free_buf:
kfree(tst->buf);
free_tst:
kfree(tst);
free_name:
mutex_unlock(&test_fw_mutex);
kfree(name);
return ret;
}
static DEVICE_ATTR_WO(upload_register);
static ssize_t upload_unregister_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct test_firmware_upload *tst;
int ret = count;
mutex_lock(&test_fw_mutex);
tst = upload_lookup_name(buf);
if (!tst) {
ret = -EINVAL;
goto out;
}
if (test_fw_config->upload_name == tst->name)
test_fw_config->upload_name = NULL;
list_del(&tst->node);
upload_release(tst);
out:
mutex_unlock(&test_fw_mutex);
return ret;
}
static DEVICE_ATTR_WO(upload_unregister);
static ssize_t test_result_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return test_dev_config_show_int(buf, test_fw_config->test_result);
}
static DEVICE_ATTR_RO(test_result);
static ssize_t release_all_firmware_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
test_release_all_firmware();
return count;
}
static DEVICE_ATTR_WO(release_all_firmware);
static ssize_t read_firmware_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct test_batched_req *req;
u8 idx;
ssize_t rc = 0;
mutex_lock(&test_fw_mutex);
idx = test_fw_config->read_fw_idx;
if (idx >= test_fw_config->num_requests) {
rc = -ERANGE;
goto out;
}
if (!test_fw_config->reqs) {
rc = -EINVAL;
goto out;
}
req = &test_fw_config->reqs[idx];
if (!req->fw) {
pr_err("#%u: failed to async load firmware\n", idx);
rc = -ENOENT;
goto out;
}
pr_info("#%u: loaded %zu\n", idx, req->fw->size);
if (req->fw->size > PAGE_SIZE) {
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
rc = -EINVAL;
goto out;
}
memcpy(buf, req->fw->data, req->fw->size);
rc = req->fw->size;
out:
mutex_unlock(&test_fw_mutex);
return rc;
}
static DEVICE_ATTR_RO(read_firmware);
static ssize_t upload_read_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct test_firmware_upload *tst = NULL;
struct test_firmware_upload *tst_iter;
int ret = -EINVAL;
if (!test_fw_config->upload_name) {
pr_err("Set config_upload_name before using upload_read\n");
return -EINVAL;
}
mutex_lock(&test_fw_mutex);
list_for_each_entry(tst_iter, &test_upload_list, node)
if (tst_iter->name == test_fw_config->upload_name) {
tst = tst_iter;
break;
}
if (!tst) {
pr_err("Firmware name not found: %s\n",
test_fw_config->upload_name);
goto out;
}
if (tst->size > PAGE_SIZE) {
pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
goto out;
}
memcpy(buf, tst->buf, tst->size);
ret = tst->size;
out:
mutex_unlock(&test_fw_mutex);
return ret;
}
static DEVICE_ATTR_RO(upload_read);
#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
static struct attribute *test_dev_attrs[] = {
TEST_FW_DEV_ATTR(reset),
TEST_FW_DEV_ATTR(config),
TEST_FW_DEV_ATTR(config_name),
TEST_FW_DEV_ATTR(config_num_requests),
TEST_FW_DEV_ATTR(config_into_buf),
TEST_FW_DEV_ATTR(config_buf_size),
TEST_FW_DEV_ATTR(config_file_offset),
TEST_FW_DEV_ATTR(config_partial),
TEST_FW_DEV_ATTR(config_sync_direct),
TEST_FW_DEV_ATTR(config_send_uevent),
TEST_FW_DEV_ATTR(config_read_fw_idx),
TEST_FW_DEV_ATTR(config_upload_name),
/* These don't use the config at all - they could be ported! */
TEST_FW_DEV_ATTR(trigger_request),
TEST_FW_DEV_ATTR(trigger_async_request),
TEST_FW_DEV_ATTR(trigger_custom_fallback),
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
TEST_FW_DEV_ATTR(trigger_request_platform),
#endif
/* These use the config and can use the test_result */
TEST_FW_DEV_ATTR(trigger_batched_requests),
TEST_FW_DEV_ATTR(trigger_batched_requests_async),
TEST_FW_DEV_ATTR(release_all_firmware),
TEST_FW_DEV_ATTR(test_result),
TEST_FW_DEV_ATTR(read_firmware),
TEST_FW_DEV_ATTR(upload_read),
TEST_FW_DEV_ATTR(upload_register),
TEST_FW_DEV_ATTR(upload_unregister),
NULL,
};
ATTRIBUTE_GROUPS(test_dev);
static struct miscdevice test_fw_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "test_firmware",
.fops = &test_fw_fops,
.groups = test_dev_groups,
};
static int __init test_firmware_init(void)
{
int rc;
test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
if (!test_fw_config)
return -ENOMEM;
rc = __test_firmware_config_init();
if (rc) {
kfree(test_fw_config);
pr_err("could not init firmware test config: %d\n", rc);
return rc;
}
rc = misc_register(&test_fw_misc_device);
if (rc) {
__test_firmware_config_free();
kfree(test_fw_config);
pr_err("could not register misc device: %d\n", rc);
return rc;
}
pr_warn("interface ready\n");
return 0;
}
module_init(test_firmware_init);
static void __exit test_firmware_exit(void)
{
mutex_lock(&test_fw_mutex);
release_firmware(test_firmware);
misc_deregister(&test_fw_misc_device);
upload_release_all();
__test_firmware_config_free();
kfree(test_fw_config);
mutex_unlock(&test_fw_mutex);
pr_warn("removed interface\n");
}
module_exit(test_firmware_exit);
MODULE_AUTHOR("Kees Cook <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/test_firmware.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lib/ts_bm.c Boyer-Moore text search implementation
*
* Authors: Pablo Neira Ayuso <[email protected]>
*
* ==========================================================================
*
* Implements Boyer-Moore string matching algorithm:
*
* [1] A Fast String Searching Algorithm, R.S. Boyer and Moore.
* Communications of the Association for Computing Machinery,
* 20(10), 1977, pp. 762-772.
* https://www.cs.utexas.edu/users/moore/publications/fstrpos.pdf
*
* [2] Handbook of Exact String Matching Algorithms, Thierry Lecroq, 2004
* http://www-igm.univ-mlv.fr/~lecroq/string/string.pdf
*
* Note: Since Boyer-Moore (BM) performs searches for matchings from right
* to left, it's still possible that a matching could be spread over
* multiple blocks, in that case this algorithm won't find any coincidence.
*
* If you're willing to ensure that such thing won't ever happen, use the
* Knuth-Pratt-Morris (KMP) implementation instead. In conclusion, choose
* the proper string search algorithm depending on your setting.
*
* Say you're using the textsearch infrastructure for filtering, NIDS or
* any similar security focused purpose, then go KMP. Otherwise, if you
* really care about performance, say you're classifying packets to apply
* Quality of Service (QoS) policies, and you don't mind about possible
* matchings spread over multiple fragments, then go BM.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
#define ASIZE 256
#if 0
#define DEBUGP printk
#else
#define DEBUGP(args, format...)
#endif
struct ts_bm
{
u8 * pattern;
unsigned int patlen;
unsigned int bad_shift[ASIZE];
unsigned int good_shift[];
};
static unsigned int matchpat(const u8 *pattern, unsigned int patlen,
const u8 *text, bool icase)
{
unsigned int i;
for (i = 0; i < patlen; i++) {
u8 t = *(text-i);
if (icase)
t = toupper(t);
if (t != *(pattern-i))
break;
}
return i;
}
static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int bs;
const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
int shift = bm->patlen - 1;
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
while (shift < text_len) {
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
i = matchpat(&bm->pattern[bm->patlen-1], bm->patlen,
&text[shift], icase);
if (i == bm->patlen) {
/* London calling... */
DEBUGP("found!\n");
return consumed + (shift-(bm->patlen-1));
}
bs = bm->bad_shift[text[shift-i]];
/* Now jumping to... */
shift = max_t(int, shift-i+bs, shift+bm->good_shift[i]);
}
consumed += text_len;
}
return UINT_MAX;
}
static int subpattern(u8 *pattern, int i, int j, int g)
{
int x = i+g-1, y = j+g-1, ret = 0;
while(pattern[x--] == pattern[y--]) {
if (y < 0) {
ret = 1;
break;
}
if (--g == 0) {
ret = pattern[i-1] != pattern[j-1];
break;
}
}
return ret;
}
static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
if (flags & TS_IGNORECASE)
bm->bad_shift[tolower(bm->pattern[i])]
= bm->patlen - 1 - i;
}
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
bm->good_shift[0] = 1;
for (i = 1; i < bm->patlen; i++)
bm->good_shift[i] = bm->patlen;
for (i = bm->patlen-1, g = 1; i > 0; g++, i--) {
for (j = i-1; j >= 1-g ; j--)
if (subpattern(bm->pattern, i, j, g)) {
bm->good_shift[g] = bm->patlen-j-g;
break;
}
}
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
if (flags & TS_IGNORECASE)
for (i = 0; i < len; i++)
bm->pattern[i] = toupper(((u8 *)pattern)[i]);
else
memcpy(bm->pattern, pattern, len);
compute_prefix_tbl(bm, flags);
return conf;
}
static void *bm_get_pattern(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->pattern;
}
static unsigned int bm_get_pattern_len(struct ts_config *conf)
{
struct ts_bm *bm = ts_config_priv(conf);
return bm->patlen;
}
static struct ts_ops bm_ops = {
.name = "bm",
.find = bm_find,
.init = bm_init,
.get_pattern = bm_get_pattern,
.get_pattern_len = bm_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(bm_ops.list)
};
static int __init init_bm(void)
{
return textsearch_register(&bm_ops);
}
static void __exit exit_bm(void)
{
textsearch_unregister(&bm_ops);
}
MODULE_LICENSE("GPL");
module_init(init_bm);
module_exit(exit_bm);
| linux-master | lib/ts_bm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
*/
#include <linux/cpu_rmap.h>
#include <linux/interrupt.h>
#include <linux/export.h>
/*
* These functions maintain a mapping from CPUs to some ordered set of
* objects with CPU affinities. This can be seen as a reverse-map of
* CPU affinity. However, we do not assume that the object affinities
* cover all CPUs in the system. For those CPUs not directly covered
* by object affinities, we attempt to find a nearest object based on
* CPU topology.
*/
/**
* alloc_cpu_rmap - allocate CPU affinity reverse-map
* @size: Number of objects to be mapped
* @flags: Allocation flags e.g. %GFP_KERNEL
*/
struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
{
struct cpu_rmap *rmap;
unsigned int cpu;
size_t obj_offset;
/* This is a silly number of objects, and we use u16 indices. */
if (size > 0xffff)
return NULL;
/* Offset of object pointer array from base structure */
obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
sizeof(void *));
rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
if (!rmap)
return NULL;
kref_init(&rmap->refcount);
rmap->obj = (void **)((char *)rmap + obj_offset);
/* Initially assign CPUs to objects on a rota, since we have
* no idea where the objects are. Use infinite distance, so
* any object with known distance is preferable. Include the
* CPUs that are not present/online, since we definitely want
* any newly-hotplugged CPUs to have some object assigned.
*/
for_each_possible_cpu(cpu) {
rmap->near[cpu].index = cpu % size;
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
}
rmap->size = size;
return rmap;
}
EXPORT_SYMBOL(alloc_cpu_rmap);
/**
* cpu_rmap_release - internal reclaiming helper called from kref_put
* @ref: kref to struct cpu_rmap
*/
static void cpu_rmap_release(struct kref *ref)
{
struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
kfree(rmap);
}
/**
* cpu_rmap_get - internal helper to get new ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
static inline void cpu_rmap_get(struct cpu_rmap *rmap)
{
kref_get(&rmap->refcount);
}
/**
* cpu_rmap_put - release ref on a cpu_rmap
* @rmap: reverse-map allocated with alloc_cpu_rmap()
*/
int cpu_rmap_put(struct cpu_rmap *rmap)
{
return kref_put(&rmap->refcount, cpu_rmap_release);
}
EXPORT_SYMBOL(cpu_rmap_put);
/* Reevaluate nearest object for given CPU, comparing with the given
* neighbours at the given distance.
*/
static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
const struct cpumask *mask, u16 dist)
{
int neigh;
for_each_cpu(neigh, mask) {
if (rmap->near[cpu].dist > dist &&
rmap->near[neigh].dist <= dist) {
rmap->near[cpu].index = rmap->near[neigh].index;
rmap->near[cpu].dist = dist;
return true;
}
}
return false;
}
#ifdef DEBUG
static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
{
unsigned index;
unsigned int cpu;
pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
for_each_possible_cpu(cpu) {
index = rmap->near[cpu].index;
pr_info("cpu %d -> obj %u (distance %u)\n",
cpu, index, rmap->near[cpu].dist);
}
}
#else
static inline void
debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
{
}
#endif
static int get_free_index(struct cpu_rmap *rmap)
{
int i;
for (i = 0; i < rmap->size; i++)
if (!rmap->obj[i])
return i;
return -ENOSPC;
}
/**
* cpu_rmap_add - add object to a rmap
* @rmap: CPU rmap allocated with alloc_cpu_rmap()
* @obj: Object to add to rmap
*
* Return index of object or -ENOSPC if no free entry was found
*/
int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
{
int index = get_free_index(rmap);
if (index < 0)
return index;
rmap->obj[index] = obj;
return index;
}
EXPORT_SYMBOL(cpu_rmap_add);
/**
* cpu_rmap_update - update CPU rmap following a change of object affinity
* @rmap: CPU rmap to update
* @index: Index of object whose affinity changed
* @affinity: New CPU affinity of object
*/
int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
const struct cpumask *affinity)
{
cpumask_var_t update_mask;
unsigned int cpu;
if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
return -ENOMEM;
/* Invalidate distance for all CPUs for which this used to be
* the nearest object. Mark those CPUs for update.
*/
for_each_online_cpu(cpu) {
if (rmap->near[cpu].index == index) {
rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
cpumask_set_cpu(cpu, update_mask);
}
}
debug_print_rmap(rmap, "after invalidating old distances");
/* Set distance to 0 for all CPUs in the new affinity mask.
* Mark all CPUs within their NUMA nodes for update.
*/
for_each_cpu(cpu, affinity) {
rmap->near[cpu].index = index;
rmap->near[cpu].dist = 0;
cpumask_or(update_mask, update_mask,
cpumask_of_node(cpu_to_node(cpu)));
}
debug_print_rmap(rmap, "after updating neighbours");
/* Update distances based on topology */
for_each_cpu(cpu, update_mask) {
if (cpu_rmap_copy_neigh(rmap, cpu,
topology_sibling_cpumask(cpu), 1))
continue;
if (cpu_rmap_copy_neigh(rmap, cpu,
topology_core_cpumask(cpu), 2))
continue;
if (cpu_rmap_copy_neigh(rmap, cpu,
cpumask_of_node(cpu_to_node(cpu)), 3))
continue;
/* We could continue into NUMA node distances, but for now
* we give up.
*/
}
debug_print_rmap(rmap, "after copying neighbours");
free_cpumask_var(update_mask);
return 0;
}
EXPORT_SYMBOL(cpu_rmap_update);
/* Glue between IRQ affinity notifiers and CPU rmaps */
struct irq_glue {
struct irq_affinity_notify notify;
struct cpu_rmap *rmap;
u16 index;
};
/**
* free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
* @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
*
* Must be called in process context, before freeing the IRQs.
*/
void free_irq_cpu_rmap(struct cpu_rmap *rmap)
{
struct irq_glue *glue;
u16 index;
if (!rmap)
return;
for (index = 0; index < rmap->size; index++) {
glue = rmap->obj[index];
if (glue)
irq_set_affinity_notifier(glue->notify.irq, NULL);
}
cpu_rmap_put(rmap);
}
EXPORT_SYMBOL(free_irq_cpu_rmap);
/**
* irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
* @notify: struct irq_affinity_notify passed by irq/manage.c
* @mask: cpu mask for new SMP affinity
*
* This is executed in workqueue context.
*/
static void
irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
{
struct irq_glue *glue =
container_of(notify, struct irq_glue, notify);
int rc;
rc = cpu_rmap_update(glue->rmap, glue->index, mask);
if (rc)
pr_warn("irq_cpu_rmap_notify: update failed: %d\n", rc);
}
/**
* irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
* @ref: kref to struct irq_affinity_notify passed by irq/manage.c
*/
static void irq_cpu_rmap_release(struct kref *ref)
{
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
glue->rmap->obj[glue->index] = NULL;
cpu_rmap_put(glue->rmap);
kfree(glue);
}
/**
* irq_cpu_rmap_remove - remove an IRQ from a CPU affinity reverse-map
* @rmap: The reverse-map
* @irq: The IRQ number
*/
int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq)
{
return irq_set_affinity_notifier(irq, NULL);
}
EXPORT_SYMBOL(irq_cpu_rmap_remove);
/**
* irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
* @rmap: The reverse-map
* @irq: The IRQ number
*
* This adds an IRQ affinity notifier that will update the reverse-map
* automatically.
*
* Must be called in process context, after the IRQ is allocated but
* before it is bound with request_irq().
*/
int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
{
struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
int rc;
if (!glue)
return -ENOMEM;
glue->notify.notify = irq_cpu_rmap_notify;
glue->notify.release = irq_cpu_rmap_release;
glue->rmap = rmap;
cpu_rmap_get(rmap);
rc = cpu_rmap_add(rmap, glue);
if (rc < 0)
goto err_add;
glue->index = rc;
rc = irq_set_affinity_notifier(irq, &glue->notify);
if (rc)
goto err_set;
return rc;
err_set:
rmap->obj[glue->index] = NULL;
err_add:
cpu_rmap_put(glue->rmap);
kfree(glue);
return rc;
}
EXPORT_SYMBOL(irq_cpu_rmap_add);
| linux-master | lib/cpu_rmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Generic associative array implementation.
*
* See Documentation/core-api/assoc_array.rst for information.
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
//#define DEBUG
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/assoc_array_priv.h>
/*
* Iterate over an associative array. The caller must hold the RCU read lock
* or better.
*/
static int assoc_array_subtree_iterate(const struct assoc_array_ptr *root,
const struct assoc_array_ptr *stop,
int (*iterator)(const void *leaf,
void *iterator_data),
void *iterator_data)
{
const struct assoc_array_shortcut *shortcut;
const struct assoc_array_node *node;
const struct assoc_array_ptr *cursor, *ptr, *parent;
unsigned long has_meta;
int slot, ret;
cursor = root;
begin_node:
if (assoc_array_ptr_is_shortcut(cursor)) {
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
}
node = assoc_array_ptr_to_node(cursor);
slot = 0;
/* We perform two passes of each node.
*
* The first pass does all the leaves in this node. This means we
* don't miss any leaves if the node is split up by insertion whilst
* we're iterating over the branches rooted here (we may, however, see
* some leaves twice).
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer,
* which is supplied by the above READ_ONCE().
*/
/* Invoke the callback */
ret = iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data);
if (ret)
return ret;
}
}
/* The second pass attends to all the metadata pointers. If we follow
* one of these we may find that we don't come back here, but rather go
* back to a replacement node with the leaves in a different layout.
*
* We are guaranteed to make progress, however, as the slot number for
* a particular portion of the key space cannot change - and we
* continue at the back pointer + 1.
*/
if (!(has_meta & ASSOC_ARRAY_PTR_META_TYPE))
goto finished_node;
slot = 0;
continue_node:
node = assoc_array_ptr_to_node(cursor);
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
}
}
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
parent = READ_ONCE(node->back_pointer); /* Address dependency. */
slot = node->parent_slot;
if (parent == stop)
return 0;
if (assoc_array_ptr_is_shortcut(parent)) {
shortcut = assoc_array_ptr_to_shortcut(parent);
cursor = parent;
parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
}
/* Ascend to next slot in parent node */
cursor = parent;
slot++;
goto continue_node;
}
/**
* assoc_array_iterate - Pass all objects in the array to a callback
* @array: The array to iterate over.
* @iterator: The callback function.
* @iterator_data: Private data for the callback function.
*
* Iterate over all the objects in an associative array. Each one will be
* presented to the iterator function.
*
* If the array is being modified concurrently with the iteration then it is
* possible that some objects in the array will be passed to the iterator
* callback more than once - though every object should be passed at least
* once. If this is undesirable then the caller must lock against modification
* for the duration of this function.
*
* The function will return 0 if no objects were in the array or else it will
* return the result of the last iterator function called. Iteration stops
* immediately if any call to the iteration function results in a non-zero
* return.
*
* The caller should hold the RCU read lock or better if concurrent
* modification is possible.
*/
int assoc_array_iterate(const struct assoc_array *array,
int (*iterator)(const void *object,
void *iterator_data),
void *iterator_data)
{
struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
if (!root)
return 0;
return assoc_array_subtree_iterate(root, NULL, iterator, iterator_data);
}
enum assoc_array_walk_status {
assoc_array_walk_tree_empty,
assoc_array_walk_found_terminal_node,
assoc_array_walk_found_wrong_shortcut,
};
struct assoc_array_walk_result {
struct {
struct assoc_array_node *node; /* Node in which leaf might be found */
int level;
int slot;
} terminal_node;
struct {
struct assoc_array_shortcut *shortcut;
int level;
int sc_level;
unsigned long sc_segments;
unsigned long dissimilarity;
} wrong_shortcut;
};
/*
* Navigate through the internal tree looking for the closest node to the key.
*/
static enum assoc_array_walk_status
assoc_array_walk(const struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut;
struct assoc_array_node *node;
struct assoc_array_ptr *cursor, *ptr;
unsigned long sc_segments, dissimilarity;
unsigned long segments;
int level, sc_level, next_sc_level;
int slot;
pr_devel("-->%s()\n", __func__);
cursor = READ_ONCE(array->root); /* Address dependency. */
if (!cursor)
return assoc_array_walk_tree_empty;
level = 0;
/* Use segments from the key for the new leaf to navigate through the
* internal tree, skipping through nodes and shortcuts that are on
* route to the destination. Eventually we'll come to a slot that is
* either empty or contains a leaf at which point we've found a node in
* which the leaf we're looking for might be found or into which it
* should be inserted.
*/
jumped:
segments = ops->get_key_chunk(index_key, level);
pr_devel("segments[%d]: %lx\n", level, segments);
if (assoc_array_ptr_is_shortcut(cursor))
goto follow_shortcut;
consider_node:
node = assoc_array_ptr_to_node(cursor);
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
if (!assoc_array_ptr_is_meta(ptr)) {
/* The node doesn't have a node/shortcut pointer in the slot
* corresponding to the index key that we have to follow.
*/
result->terminal_node.node = node;
result->terminal_node.level = level;
result->terminal_node.slot = slot;
pr_devel("<--%s() = terminal_node\n", __func__);
return assoc_array_walk_found_terminal_node;
}
if (assoc_array_ptr_is_node(ptr)) {
/* There is a pointer to a node in the slot corresponding to
* this index key segment, so we need to follow it.
*/
cursor = ptr;
level += ASSOC_ARRAY_LEVEL_STEP;
if ((level & ASSOC_ARRAY_KEY_CHUNK_MASK) != 0)
goto consider_node;
goto jumped;
}
/* There is a shortcut in the slot corresponding to the index key
* segment. We follow the shortcut if its partial index key matches
* this leaf's. Otherwise we need to split the shortcut.
*/
cursor = ptr;
follow_shortcut:
shortcut = assoc_array_ptr_to_shortcut(cursor);
pr_devel("shortcut to %d\n", shortcut->skip_to_level);
sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
BUG_ON(sc_level > shortcut->skip_to_level);
do {
/* Check the leaf against the shortcut's index key a word at a
* time, trimming the final word (the shortcut stores the index
* key completely from the root to the shortcut's target).
*/
if ((sc_level & ASSOC_ARRAY_KEY_CHUNK_MASK) == 0)
segments = ops->get_key_chunk(index_key, sc_level);
sc_segments = shortcut->index_key[sc_level >> ASSOC_ARRAY_KEY_CHUNK_SHIFT];
dissimilarity = segments ^ sc_segments;
if (round_up(sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE) > shortcut->skip_to_level) {
/* Trim segments that are beyond the shortcut */
int shift = shortcut->skip_to_level & ASSOC_ARRAY_KEY_CHUNK_MASK;
dissimilarity &= ~(ULONG_MAX << shift);
next_sc_level = shortcut->skip_to_level;
} else {
next_sc_level = sc_level + ASSOC_ARRAY_KEY_CHUNK_SIZE;
next_sc_level = round_down(next_sc_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
}
if (dissimilarity != 0) {
/* This shortcut points elsewhere */
result->wrong_shortcut.shortcut = shortcut;
result->wrong_shortcut.level = level;
result->wrong_shortcut.sc_level = sc_level;
result->wrong_shortcut.sc_segments = sc_segments;
result->wrong_shortcut.dissimilarity = dissimilarity;
return assoc_array_walk_found_wrong_shortcut;
}
sc_level = next_sc_level;
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
} else {
level = sc_level;
goto consider_node;
}
}
/**
* assoc_array_find - Find an object by index key
* @array: The associative array to search.
* @ops: The operations to use.
* @index_key: The key to the object.
*
* Find an object in an associative array by walking through the internal tree
* to the node that should contain the object and then searching the leaves
* there. NULL is returned if the requested object was not found in the array.
*
* The caller must hold the RCU read lock or better.
*/
void *assoc_array_find(const struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key)
{
struct assoc_array_walk_result result;
const struct assoc_array_node *node;
const struct assoc_array_ptr *ptr;
const void *leaf;
int slot;
if (assoc_array_walk(array, ops, index_key, &result) !=
assoc_array_walk_found_terminal_node)
return NULL;
node = result.terminal_node.node;
/* If the target key is available to us, it's has to be pointed to by
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
* actually going to dereference it.
*/
leaf = assoc_array_ptr_to_leaf(ptr);
if (ops->compare_object(leaf, index_key))
return (void *)leaf;
}
}
return NULL;
}
/*
* Destructively iterate over an associative array. The caller must prevent
* other simultaneous accesses.
*/
static void assoc_array_destroy_subtree(struct assoc_array_ptr *root,
const struct assoc_array_ops *ops)
{
struct assoc_array_shortcut *shortcut;
struct assoc_array_node *node;
struct assoc_array_ptr *cursor, *parent = NULL;
int slot = -1;
pr_devel("-->%s()\n", __func__);
cursor = root;
if (!cursor) {
pr_devel("empty\n");
return;
}
move_to_meta:
if (assoc_array_ptr_is_shortcut(cursor)) {
/* Descend through a shortcut */
pr_devel("[%d] shortcut\n", slot);
BUG_ON(!assoc_array_ptr_is_shortcut(cursor));
shortcut = assoc_array_ptr_to_shortcut(cursor);
BUG_ON(shortcut->back_pointer != parent);
BUG_ON(slot != -1 && shortcut->parent_slot != slot);
parent = cursor;
cursor = shortcut->next_node;
slot = -1;
BUG_ON(!assoc_array_ptr_is_node(cursor));
}
pr_devel("[%d] node\n", slot);
node = assoc_array_ptr_to_node(cursor);
BUG_ON(node->back_pointer != parent);
BUG_ON(slot != -1 && node->parent_slot != slot);
slot = 0;
continue_node:
pr_devel("Node %p [back=%p]\n", node, node->back_pointer);
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_ptr *ptr = node->slots[slot];
if (!ptr)
continue;
if (assoc_array_ptr_is_meta(ptr)) {
parent = cursor;
cursor = ptr;
goto move_to_meta;
}
if (ops) {
pr_devel("[%d] free leaf\n", slot);
ops->free_object(assoc_array_ptr_to_leaf(ptr));
}
}
parent = node->back_pointer;
slot = node->parent_slot;
pr_devel("free node\n");
kfree(node);
if (!parent)
return; /* Done */
/* Move back up to the parent (may need to free a shortcut on
* the way up) */
if (assoc_array_ptr_is_shortcut(parent)) {
shortcut = assoc_array_ptr_to_shortcut(parent);
BUG_ON(shortcut->next_node != cursor);
cursor = parent;
parent = shortcut->back_pointer;
slot = shortcut->parent_slot;
pr_devel("free shortcut\n");
kfree(shortcut);
if (!parent)
return;
BUG_ON(!assoc_array_ptr_is_node(parent));
}
/* Ascend to next slot in parent node */
pr_devel("ascend to %p[%d]\n", parent, slot);
cursor = parent;
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
}
/**
* assoc_array_destroy - Destroy an associative array
* @array: The array to destroy.
* @ops: The operations to use.
*
* Discard all metadata and free all objects in an associative array. The
* array will be empty and ready to use again upon completion. This function
* cannot fail.
*
* The caller must prevent all other accesses whilst this takes place as no
* attempt is made to adjust pointers gracefully to permit RCU readlock-holding
* accesses to continue. On the other hand, no memory allocation is required.
*/
void assoc_array_destroy(struct assoc_array *array,
const struct assoc_array_ops *ops)
{
assoc_array_destroy_subtree(array->root, ops);
array->root = NULL;
}
/*
* Handle insertion into an empty tree.
*/
static bool assoc_array_insert_in_empty_tree(struct assoc_array_edit *edit)
{
struct assoc_array_node *new_n0;
pr_devel("-->%s()\n", __func__);
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[0];
edit->adjust_count_on = new_n0;
edit->set[0].ptr = &edit->array->root;
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
pr_devel("<--%s() = ok [no root]\n", __func__);
return true;
}
/*
* Handle insertion into a terminal node.
*/
static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
const void *index_key,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0;
struct assoc_array_node *node, *new_n0, *new_n1, *side;
struct assoc_array_ptr *ptr;
unsigned long dissimilarity, base_seg, blank;
size_t keylen;
bool have_meta;
int level, diff;
int slot, next_slot, free_slot, i, j;
node = result->terminal_node.node;
level = result->terminal_node.level;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = result->terminal_node.slot;
pr_devel("-->%s()\n", __func__);
/* We arrived at a node which doesn't have an onward node or shortcut
* pointer that we have to follow. This means that (a) the leaf we
* want must go here (either by insertion or replacement) or (b) we
* need to split this node and insert in one of the fragments.
*/
free_slot = -1;
/* Firstly, we have to check the leaves in this node to see if there's
* a matching one we should replace in place.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (!ptr) {
free_slot = i;
continue;
}
if (assoc_array_ptr_is_leaf(ptr) &&
ops->compare_object(assoc_array_ptr_to_leaf(ptr),
index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
pr_devel("<--%s() = ok [replace]\n", __func__);
return true;
}
}
/* If there is a free slot in this node then we can just insert the
* leaf here.
*/
if (free_slot >= 0) {
pr_devel("insert in free slot %d\n", free_slot);
edit->leaf_p = &node->slots[free_slot];
edit->adjust_count_on = node;
pr_devel("<--%s() = ok [insert]\n", __func__);
return true;
}
/* The node has no spare slots - so we're either going to have to split
* it or insert another node before it.
*
* Whatever, we're going to need at least two new nodes - so allocate
* those now. We may also need a new shortcut, but we deal with that
* when we need it.
*/
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n1 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n1)
return false;
edit->new_meta[1] = assoc_array_node_to_ptr(new_n1);
/* We need to find out how similar the leaves are. */
pr_devel("no spare slots\n");
have_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
edit->segment_cache[i] = 0xff;
have_meta = true;
continue;
}
base_seg = ops->get_object_key_chunk(
assoc_array_ptr_to_leaf(ptr), level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
if (have_meta) {
pr_devel("have meta\n");
goto split_node;
}
/* The node contains only leaves */
dissimilarity = 0;
base_seg = edit->segment_cache[0];
for (i = 1; i < ASSOC_ARRAY_FAN_OUT; i++)
dissimilarity |= edit->segment_cache[i] ^ base_seg;
pr_devel("only leaves; dissimilarity=%lx\n", dissimilarity);
if ((dissimilarity & ASSOC_ARRAY_FAN_MASK) == 0) {
/* The old leaves all cluster in the same slot. We will need
* to insert a shortcut if the new node wants to cluster with them.
*/
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise all the old leaves cluster in the same slot, but
* the new leaf wants to go into a different slot - so we
* create a new node (n0) to hold the new leaf and a pointer to
* a new node (n1) holding all the old leaves.
*
* This can be done by falling through to the node splitting
* path.
*/
pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
/* We need to split the current node. The node must contain anything
* from a single leaf (in the one leaf case, this leaf will cluster
* with the new leaf) and the rest meta-pointers, to all leaves, some
* of which may cluster.
*
* It won't contain the case in which all the current leaves plus the
* new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf. The current meta pointers can
* just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
*/
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
do_split_node:
pr_devel("do_split_node\n");
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->nr_leaves_on_branch = 0;
/* Begin by finding two matching leaves. There have to be at least two
* that match - even if there are meta pointers - because any leaf that
* would match a slot with a meta pointer in it must be somewhere
* behind that meta pointer and cannot be here. Further, given N
* remaining leaf slots, we now have N+1 leaves to go in them.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
slot = edit->segment_cache[i];
if (slot != 0xff)
for (j = i + 1; j < ASSOC_ARRAY_FAN_OUT + 1; j++)
if (edit->segment_cache[j] == slot)
goto found_slot_for_multiple_occupancy;
}
found_slot_for_multiple_occupancy:
pr_devel("same slot: %x %x [%02x]\n", i, j, slot);
BUG_ON(i >= ASSOC_ARRAY_FAN_OUT);
BUG_ON(j >= ASSOC_ARRAY_FAN_OUT + 1);
BUG_ON(slot >= ASSOC_ARRAY_FAN_OUT);
new_n1->parent_slot = slot;
/* Metadata pointers cannot change slot */
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
if (assoc_array_ptr_is_meta(node->slots[i]))
new_n0->slots[i] = node->slots[i];
else
new_n0->slots[i] = NULL;
BUG_ON(new_n0->slots[slot] != NULL);
new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1);
/* Filter the leaf pointers between the new nodes */
free_slot = -1;
next_slot = 0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (assoc_array_ptr_is_meta(node->slots[i]))
continue;
if (edit->segment_cache[i] == slot) {
new_n1->slots[next_slot++] = node->slots[i];
new_n1->nr_leaves_on_branch++;
} else {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
new_n0->slots[free_slot] = node->slots[i];
}
}
pr_devel("filtered: f=%x n=%x\n", free_slot, next_slot);
if (edit->segment_cache[ASSOC_ARRAY_FAN_OUT] != slot) {
do {
free_slot++;
} while (new_n0->slots[free_slot] != NULL);
edit->leaf_p = &new_n0->slots[free_slot];
edit->adjust_count_on = new_n0;
} else {
edit->leaf_p = &new_n1->slots[next_slot++];
edit->adjust_count_on = new_n1;
}
BUG_ON(next_slot <= 1);
edit->set_backpointers_to = assoc_array_node_to_ptr(new_n0);
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
if (edit->segment_cache[i] == 0xff) {
ptr = node->slots[i];
BUG_ON(assoc_array_ptr_is_leaf(ptr));
if (assoc_array_ptr_is_node(ptr)) {
side = assoc_array_ptr_to_node(ptr);
edit->set_backpointers[i] = &side->back_pointer;
} else {
shortcut = assoc_array_ptr_to_shortcut(ptr);
edit->set_backpointers[i] = &shortcut->back_pointer;
}
}
}
ptr = node->back_pointer;
if (!ptr)
edit->set[0].ptr = &edit->array->root;
else if (assoc_array_ptr_is_node(ptr))
edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot];
else
edit->set[0].ptr = &assoc_array_ptr_to_shortcut(ptr)->next_node;
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
* skip over the identical parts of the key and then place a pair of
* nodes, one inside the other, at the end of the shortcut and
* distribute the keys between them.
*
* Firstly we need to work out where the leaves start diverging as a
* bit position into their keys so that we know how big the shortcut
* needs to be.
*
* We only need to make a single pass of N of the N+1 leaves because if
* any keys differ between themselves at bit X then at least one of
* them must also differ with the base key at bit X or before.
*/
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
}
}
BUG_ON(diff == INT_MAX);
BUG_ON(diff < level + ASSOC_ARRAY_LEVEL_STEP);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen), GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = node->back_pointer;
new_s0->parent_slot = node->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = -1; /* Need to calculate this */
new_s0->skip_to_level = level = diff & ~ASSOC_ARRAY_LEVEL_STEP_MASK;
pr_devel("skip_to_level = %d [diff %d]\n", level, diff);
BUG_ON(level <= 0);
for (i = 0; i < keylen; i++)
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
new_s0->index_key[keylen - 1] &= ~blank;
}
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
*/
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
base_seg = ops->get_object_key_chunk(assoc_array_ptr_to_leaf(ptr),
level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[i] = base_seg & ASSOC_ARRAY_FAN_MASK;
}
base_seg = ops->get_key_chunk(index_key, level);
base_seg >>= level & ASSOC_ARRAY_KEY_CHUNK_MASK;
edit->segment_cache[ASSOC_ARRAY_FAN_OUT] = base_seg & ASSOC_ARRAY_FAN_MASK;
goto do_split_node;
}
/*
* Handle insertion into the middle of a shortcut.
*/
static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
const struct assoc_array_ops *ops,
struct assoc_array_walk_result *result)
{
struct assoc_array_shortcut *shortcut, *new_s0, *new_s1;
struct assoc_array_node *node, *new_n0, *side;
unsigned long sc_segments, dissimilarity, blank;
size_t keylen;
int level, sc_level, diff;
int sc_slot;
shortcut = result->wrong_shortcut.shortcut;
level = result->wrong_shortcut.level;
sc_level = result->wrong_shortcut.sc_level;
sc_segments = result->wrong_shortcut.sc_segments;
dissimilarity = result->wrong_shortcut.dissimilarity;
pr_devel("-->%s(ix=%d dis=%lx scix=%d)\n",
__func__, level, dissimilarity, sc_level);
/* We need to split a shortcut and insert a node between the two
* pieces. Zero-length pieces will be dispensed with entirely.
*
* First of all, we need to find out in which level the first
* difference was.
*/
diff = __ffs(dissimilarity);
diff &= ~ASSOC_ARRAY_LEVEL_STEP_MASK;
diff += sc_level & ~ASSOC_ARRAY_KEY_CHUNK_MASK;
pr_devel("diff=%d\n", diff);
if (!shortcut->back_pointer) {
edit->set[0].ptr = &edit->array->root;
} else if (assoc_array_ptr_is_node(shortcut->back_pointer)) {
node = assoc_array_ptr_to_node(shortcut->back_pointer);
edit->set[0].ptr = &node->slots[shortcut->parent_slot];
} else {
BUG();
}
edit->excised_meta[0] = assoc_array_shortcut_to_ptr(shortcut);
/* Create a new node now since we're going to need it anyway */
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
return false;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
edit->adjust_count_on = new_n0;
/* Insert a new shortcut before the new node if this segment isn't of
* zero length - otherwise we just connect the new node directly to the
* parent.
*/
level += ASSOC_ARRAY_LEVEL_STEP;
if (diff > level) {
pr_devel("pre-shortcut %d...%d\n", level, diff);
keylen = round_up(diff, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s0 = kzalloc(struct_size(new_s0, index_key, keylen),
GFP_KERNEL);
if (!new_s0)
return false;
edit->new_meta[1] = assoc_array_shortcut_to_ptr(new_s0);
edit->set[0].to = assoc_array_shortcut_to_ptr(new_s0);
new_s0->back_pointer = shortcut->back_pointer;
new_s0->parent_slot = shortcut->parent_slot;
new_s0->next_node = assoc_array_node_to_ptr(new_n0);
new_s0->skip_to_level = diff;
new_n0->back_pointer = assoc_array_shortcut_to_ptr(new_s0);
new_n0->parent_slot = 0;
memcpy(new_s0->index_key, shortcut->index_key,
flex_array_size(new_s0, index_key, keylen));
blank = ULONG_MAX << (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, diff, blank);
new_s0->index_key[keylen - 1] &= ~blank;
} else {
pr_devel("no pre-shortcut\n");
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = shortcut->back_pointer;
new_n0->parent_slot = shortcut->parent_slot;
}
side = assoc_array_ptr_to_node(shortcut->next_node);
new_n0->nr_leaves_on_branch = side->nr_leaves_on_branch;
/* We need to know which slot in the new node is going to take a
* metadata pointer.
*/
sc_slot = sc_segments >> (diff & ASSOC_ARRAY_KEY_CHUNK_MASK);
sc_slot &= ASSOC_ARRAY_FAN_MASK;
pr_devel("new slot %lx >> %d -> %d\n",
sc_segments, diff & ASSOC_ARRAY_KEY_CHUNK_MASK, sc_slot);
/* Determine whether we need to follow the new node with a replacement
* for the current shortcut. We could in theory reuse the current
* shortcut if its parent slot number doesn't change - but that's a
* 1-in-16 chance so not worth expending the code upon.
*/
level = diff + ASSOC_ARRAY_LEVEL_STEP;
if (level < shortcut->skip_to_level) {
pr_devel("post-shortcut %d...%d\n", level, shortcut->skip_to_level);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s1 = kzalloc(struct_size(new_s1, index_key, keylen),
GFP_KERNEL);
if (!new_s1)
return false;
edit->new_meta[2] = assoc_array_shortcut_to_ptr(new_s1);
new_s1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_s1->parent_slot = sc_slot;
new_s1->next_node = shortcut->next_node;
new_s1->skip_to_level = shortcut->skip_to_level;
new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1);
memcpy(new_s1->index_key, shortcut->index_key,
flex_array_size(new_s1, index_key, keylen));
edit->set[1].ptr = &side->back_pointer;
edit->set[1].to = assoc_array_shortcut_to_ptr(new_s1);
} else {
pr_devel("no post-shortcut\n");
/* We don't have to replace the pointed-to node as long as we
* use memory barriers to make sure the parent slot number is
* changed before the back pointer (the parent slot number is
* irrelevant to the old parent shortcut).
*/
new_n0->slots[sc_slot] = shortcut->next_node;
edit->set_parent_slot[0].p = &side->parent_slot;
edit->set_parent_slot[0].to = sc_slot;
edit->set[1].ptr = &side->back_pointer;
edit->set[1].to = assoc_array_node_to_ptr(new_n0);
}
/* Install the new leaf in a spare slot in the new node. */
if (sc_slot == 0)
edit->leaf_p = &new_n0->slots[1];
else
edit->leaf_p = &new_n0->slots[0];
pr_devel("<--%s() = ok [split shortcut]\n", __func__);
return edit;
}
/**
* assoc_array_insert - Script insertion of an object into an associative array
* @array: The array to insert into.
* @ops: The operations to use.
* @index_key: The key to insert at.
* @object: The object to insert.
*
* Precalculate and preallocate a script for the insertion or replacement of an
* object in an associative array. This results in an edit script that can
* either be applied or cancelled.
*
* The function returns a pointer to an edit script or -ENOMEM.
*
* The caller should lock against other modifications and must continue to hold
* the lock until assoc_array_apply_edit() has been called.
*
* Accesses to the tree may take place concurrently with this function,
* provided they hold the RCU read lock.
*/
struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key,
void *object)
{
struct assoc_array_walk_result result;
struct assoc_array_edit *edit;
pr_devel("-->%s()\n", __func__);
/* The leaf pointer we're given must not have the bottom bit set as we
* use those for type-marking the pointer. NULL pointers are also not
* allowed as they indicate an empty slot but we have to allow them
* here as they can be updated later.
*/
BUG_ON(assoc_array_ptr_is_meta(object));
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
edit->ops = ops;
edit->leaf = assoc_array_leaf_to_ptr(object);
edit->adjust_count_by = 1;
switch (assoc_array_walk(array, ops, index_key, &result)) {
case assoc_array_walk_tree_empty:
/* Allocate a root node if there isn't one yet */
if (!assoc_array_insert_in_empty_tree(edit))
goto enomem;
return edit;
case assoc_array_walk_found_terminal_node:
/* We found a node that doesn't have a node/shortcut pointer in
* the slot corresponding to the index key that we have to
* follow.
*/
if (!assoc_array_insert_into_terminal_node(edit, ops, index_key,
&result))
goto enomem;
return edit;
case assoc_array_walk_found_wrong_shortcut:
/* We found a shortcut that didn't match our key in a slot we
* needed to follow.
*/
if (!assoc_array_insert_mid_shortcut(edit, ops, &result))
goto enomem;
return edit;
}
enomem:
/* Clean up after an out of memory error */
pr_devel("enomem\n");
assoc_array_cancel_edit(edit);
return ERR_PTR(-ENOMEM);
}
/**
* assoc_array_insert_set_object - Set the new object pointer in an edit script
* @edit: The edit script to modify.
* @object: The object pointer to set.
*
* Change the object to be inserted in an edit script. The object pointed to
* by the old object is not freed. This must be done prior to applying the
* script.
*/
void assoc_array_insert_set_object(struct assoc_array_edit *edit, void *object)
{
BUG_ON(!object);
edit->leaf = assoc_array_leaf_to_ptr(object);
}
struct assoc_array_delete_collapse_context {
struct assoc_array_node *node;
const void *skip_leaf;
int slot;
};
/*
* Subtree collapse to node iterator.
*/
static int assoc_array_delete_collapse_iterator(const void *leaf,
void *iterator_data)
{
struct assoc_array_delete_collapse_context *collapse = iterator_data;
if (leaf == collapse->skip_leaf)
return 0;
BUG_ON(collapse->slot >= ASSOC_ARRAY_FAN_OUT);
collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf);
return 0;
}
/**
* assoc_array_delete - Script deletion of an object from an associative array
* @array: The array to search.
* @ops: The operations to use.
* @index_key: The key to the object.
*
* Precalculate and preallocate a script for the deletion of an object from an
* associative array. This results in an edit script that can either be
* applied or cancelled.
*
* The function returns a pointer to an edit script if the object was found,
* NULL if the object was not found or -ENOMEM.
*
* The caller should lock against other modifications and must continue to hold
* the lock until assoc_array_apply_edit() has been called.
*
* Accesses to the tree may take place concurrently with this function,
* provided they hold the RCU read lock.
*/
struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
const struct assoc_array_ops *ops,
const void *index_key)
{
struct assoc_array_delete_collapse_context collapse;
struct assoc_array_walk_result result;
struct assoc_array_node *node, *new_n0;
struct assoc_array_edit *edit;
struct assoc_array_ptr *ptr;
bool has_meta;
int slot, i;
pr_devel("-->%s()\n", __func__);
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
edit->ops = ops;
edit->adjust_count_by = -1;
switch (assoc_array_walk(array, ops, index_key, &result)) {
case assoc_array_walk_found_terminal_node:
/* We found a node that should contain the leaf we've been
* asked to remove - *if* it's in the tree.
*/
pr_devel("terminal_node\n");
node = result.terminal_node.node;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = node->slots[slot];
if (ptr &&
assoc_array_ptr_is_leaf(ptr) &&
ops->compare_object(assoc_array_ptr_to_leaf(ptr),
index_key))
goto found_leaf;
}
fallthrough;
case assoc_array_walk_tree_empty:
case assoc_array_walk_found_wrong_shortcut:
default:
assoc_array_cancel_edit(edit);
pr_devel("not found\n");
return NULL;
}
found_leaf:
BUG_ON(array->nr_leaves_on_tree <= 0);
/* In the simplest form of deletion we just clear the slot and release
* the leaf after a suitable interval.
*/
edit->dead_leaf = node->slots[slot];
edit->set[0].ptr = &node->slots[slot];
edit->set[0].to = NULL;
edit->adjust_count_on = node;
/* If that concludes erasure of the last leaf, then delete the entire
* internal array.
*/
if (array->nr_leaves_on_tree == 1) {
edit->set[1].ptr = &array->root;
edit->set[1].to = NULL;
edit->adjust_count_on = NULL;
edit->excised_subtree = array->root;
pr_devel("all gone\n");
return edit;
}
/* However, we'd also like to clear up some metadata blocks if we
* possibly can.
*
* We go for a simple algorithm of: if this node has FAN_OUT or fewer
* leaves in it, then attempt to collapse it - and attempt to
* recursively collapse up the tree.
*
* We could also try and collapse in partially filled subtrees to take
* up space in this node.
*/
if (node->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
struct assoc_array_node *parent, *grandparent;
struct assoc_array_ptr *ptr;
/* First of all, we need to know if this node has metadata so
* that we don't try collapsing if all the leaves are already
* here.
*/
has_meta = false;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
ptr = node->slots[i];
if (assoc_array_ptr_is_meta(ptr)) {
has_meta = true;
break;
}
}
pr_devel("leaves: %ld [m=%d]\n",
node->nr_leaves_on_branch - 1, has_meta);
/* Look further up the tree to see if we can collapse this node
* into a more proximal node too.
*/
parent = node;
collapse_up:
pr_devel("collapse subtree: %ld\n", parent->nr_leaves_on_branch);
ptr = parent->back_pointer;
if (!ptr)
goto do_collapse;
if (assoc_array_ptr_is_shortcut(ptr)) {
struct assoc_array_shortcut *s = assoc_array_ptr_to_shortcut(ptr);
ptr = s->back_pointer;
if (!ptr)
goto do_collapse;
}
grandparent = assoc_array_ptr_to_node(ptr);
if (grandparent->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT + 1) {
parent = grandparent;
goto collapse_up;
}
do_collapse:
/* There's no point collapsing if the original node has no meta
* pointers to discard and if we didn't merge into one of that
* node's ancestry.
*/
if (has_meta || parent != node) {
node = parent;
/* Create a new node to collapse into */
new_n0 = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n0)
goto enomem;
edit->new_meta[0] = assoc_array_node_to_ptr(new_n0);
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
collapse.node = new_n0;
collapse.skip_leaf = assoc_array_ptr_to_leaf(edit->dead_leaf);
collapse.slot = 0;
assoc_array_subtree_iterate(assoc_array_node_to_ptr(node),
node->back_pointer,
assoc_array_delete_collapse_iterator,
&collapse);
pr_devel("collapsed %d,%lu\n", collapse.slot, new_n0->nr_leaves_on_branch);
BUG_ON(collapse.slot != new_n0->nr_leaves_on_branch - 1);
if (!node->back_pointer) {
edit->set[1].ptr = &array->root;
} else if (assoc_array_ptr_is_leaf(node->back_pointer)) {
BUG();
} else if (assoc_array_ptr_is_node(node->back_pointer)) {
struct assoc_array_node *p =
assoc_array_ptr_to_node(node->back_pointer);
edit->set[1].ptr = &p->slots[node->parent_slot];
} else if (assoc_array_ptr_is_shortcut(node->back_pointer)) {
struct assoc_array_shortcut *s =
assoc_array_ptr_to_shortcut(node->back_pointer);
edit->set[1].ptr = &s->next_node;
}
edit->set[1].to = assoc_array_node_to_ptr(new_n0);
edit->excised_subtree = assoc_array_node_to_ptr(node);
}
}
return edit;
enomem:
/* Clean up after an out of memory error */
pr_devel("enomem\n");
assoc_array_cancel_edit(edit);
return ERR_PTR(-ENOMEM);
}
/**
* assoc_array_clear - Script deletion of all objects from an associative array
* @array: The array to clear.
* @ops: The operations to use.
*
* Precalculate and preallocate a script for the deletion of all the objects
* from an associative array. This results in an edit script that can either
* be applied or cancelled.
*
* The function returns a pointer to an edit script if there are objects to be
* deleted, NULL if there are no objects in the array or -ENOMEM.
*
* The caller should lock against other modifications and must continue to hold
* the lock until assoc_array_apply_edit() has been called.
*
* Accesses to the tree may take place concurrently with this function,
* provided they hold the RCU read lock.
*/
struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
const struct assoc_array_ops *ops)
{
struct assoc_array_edit *edit;
pr_devel("-->%s()\n", __func__);
if (!array->root)
return NULL;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return ERR_PTR(-ENOMEM);
edit->array = array;
edit->ops = ops;
edit->set[1].ptr = &array->root;
edit->set[1].to = NULL;
edit->excised_subtree = array->root;
edit->ops_for_excised_subtree = ops;
pr_devel("all gone\n");
return edit;
}
/*
* Handle the deferred destruction after an applied edit.
*/
static void assoc_array_rcu_cleanup(struct rcu_head *head)
{
struct assoc_array_edit *edit =
container_of(head, struct assoc_array_edit, rcu);
int i;
pr_devel("-->%s()\n", __func__);
if (edit->dead_leaf)
edit->ops->free_object(assoc_array_ptr_to_leaf(edit->dead_leaf));
for (i = 0; i < ARRAY_SIZE(edit->excised_meta); i++)
if (edit->excised_meta[i])
kfree(assoc_array_ptr_to_node(edit->excised_meta[i]));
if (edit->excised_subtree) {
BUG_ON(assoc_array_ptr_is_leaf(edit->excised_subtree));
if (assoc_array_ptr_is_node(edit->excised_subtree)) {
struct assoc_array_node *n =
assoc_array_ptr_to_node(edit->excised_subtree);
n->back_pointer = NULL;
} else {
struct assoc_array_shortcut *s =
assoc_array_ptr_to_shortcut(edit->excised_subtree);
s->back_pointer = NULL;
}
assoc_array_destroy_subtree(edit->excised_subtree,
edit->ops_for_excised_subtree);
}
kfree(edit);
}
/**
* assoc_array_apply_edit - Apply an edit script to an associative array
* @edit: The script to apply.
*
* Apply an edit script to an associative array to effect an insertion,
* deletion or clearance. As the edit script includes preallocated memory,
* this is guaranteed not to fail.
*
* The edit script, dead objects and dead metadata will be scheduled for
* destruction after an RCU grace period to permit those doing read-only
* accesses on the array to continue to do so under the RCU read lock whilst
* the edit is taking place.
*/
void assoc_array_apply_edit(struct assoc_array_edit *edit)
{
struct assoc_array_shortcut *shortcut;
struct assoc_array_node *node;
struct assoc_array_ptr *ptr;
int i;
pr_devel("-->%s()\n", __func__);
smp_wmb();
if (edit->leaf_p)
*edit->leaf_p = edit->leaf;
smp_wmb();
for (i = 0; i < ARRAY_SIZE(edit->set_parent_slot); i++)
if (edit->set_parent_slot[i].p)
*edit->set_parent_slot[i].p = edit->set_parent_slot[i].to;
smp_wmb();
for (i = 0; i < ARRAY_SIZE(edit->set_backpointers); i++)
if (edit->set_backpointers[i])
*edit->set_backpointers[i] = edit->set_backpointers_to;
smp_wmb();
for (i = 0; i < ARRAY_SIZE(edit->set); i++)
if (edit->set[i].ptr)
*edit->set[i].ptr = edit->set[i].to;
if (edit->array->root == NULL) {
edit->array->nr_leaves_on_tree = 0;
} else if (edit->adjust_count_on) {
node = edit->adjust_count_on;
for (;;) {
node->nr_leaves_on_branch += edit->adjust_count_by;
ptr = node->back_pointer;
if (!ptr)
break;
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
ptr = shortcut->back_pointer;
if (!ptr)
break;
}
BUG_ON(!assoc_array_ptr_is_node(ptr));
node = assoc_array_ptr_to_node(ptr);
}
edit->array->nr_leaves_on_tree += edit->adjust_count_by;
}
call_rcu(&edit->rcu, assoc_array_rcu_cleanup);
}
/**
* assoc_array_cancel_edit - Discard an edit script.
* @edit: The script to discard.
*
* Free an edit script and all the preallocated data it holds without making
* any changes to the associative array it was intended for.
*
* NOTE! In the case of an insertion script, this does _not_ release the leaf
* that was to be inserted. That is left to the caller.
*/
void assoc_array_cancel_edit(struct assoc_array_edit *edit)
{
struct assoc_array_ptr *ptr;
int i;
pr_devel("-->%s()\n", __func__);
/* Clean up after an out of memory error */
for (i = 0; i < ARRAY_SIZE(edit->new_meta); i++) {
ptr = edit->new_meta[i];
if (ptr) {
if (assoc_array_ptr_is_node(ptr))
kfree(assoc_array_ptr_to_node(ptr));
else
kfree(assoc_array_ptr_to_shortcut(ptr));
}
}
kfree(edit);
}
/**
* assoc_array_gc - Garbage collect an associative array.
* @array: The array to clean.
* @ops: The operations to use.
* @iterator: A callback function to pass judgement on each object.
* @iterator_data: Private data for the callback function.
*
* Collect garbage from an associative array and pack down the internal tree to
* save memory.
*
* The iterator function is asked to pass judgement upon each object in the
* array. If it returns false, the object is discard and if it returns true,
* the object is kept. If it returns true, it must increment the object's
* usage count (or whatever it needs to do to retain it) before returning.
*
* This function returns 0 if successful or -ENOMEM if out of memory. In the
* latter case, the array is not changed.
*
* The caller should lock against other modifications and must continue to hold
* the lock until assoc_array_apply_edit() has been called.
*
* Accesses to the tree may take place concurrently with this function,
* provided they hold the RCU read lock.
*/
int assoc_array_gc(struct assoc_array *array,
const struct assoc_array_ops *ops,
bool (*iterator)(void *object, void *iterator_data),
void *iterator_data)
{
struct assoc_array_shortcut *shortcut, *new_s;
struct assoc_array_node *node, *new_n;
struct assoc_array_edit *edit;
struct assoc_array_ptr *cursor, *ptr;
struct assoc_array_ptr *new_root, *new_parent, **new_ptr_pp;
unsigned long nr_leaves_on_tree;
bool retained;
int keylen, slot, nr_free, next_slot, i;
pr_devel("-->%s()\n", __func__);
if (!array->root)
return 0;
edit = kzalloc(sizeof(struct assoc_array_edit), GFP_KERNEL);
if (!edit)
return -ENOMEM;
edit->array = array;
edit->ops = ops;
edit->ops_for_excised_subtree = ops;
edit->set[0].ptr = &array->root;
edit->excised_subtree = array->root;
new_root = new_parent = NULL;
new_ptr_pp = &new_root;
cursor = array->root;
descend:
/* If this point is a shortcut, then we need to duplicate it and
* advance the target cursor.
*/
if (assoc_array_ptr_is_shortcut(cursor)) {
shortcut = assoc_array_ptr_to_shortcut(cursor);
keylen = round_up(shortcut->skip_to_level, ASSOC_ARRAY_KEY_CHUNK_SIZE);
keylen >>= ASSOC_ARRAY_KEY_CHUNK_SHIFT;
new_s = kmalloc(struct_size(new_s, index_key, keylen),
GFP_KERNEL);
if (!new_s)
goto enomem;
pr_devel("dup shortcut %p -> %p\n", shortcut, new_s);
memcpy(new_s, shortcut, struct_size(new_s, index_key, keylen));
new_s->back_pointer = new_parent;
new_s->parent_slot = shortcut->parent_slot;
*new_ptr_pp = new_parent = assoc_array_shortcut_to_ptr(new_s);
new_ptr_pp = &new_s->next_node;
cursor = shortcut->next_node;
}
/* Duplicate the node at this position */
node = assoc_array_ptr_to_node(cursor);
new_n = kzalloc(sizeof(struct assoc_array_node), GFP_KERNEL);
if (!new_n)
goto enomem;
pr_devel("dup node %p -> %p\n", node, new_n);
new_n->back_pointer = new_parent;
new_n->parent_slot = node->parent_slot;
*new_ptr_pp = new_parent = assoc_array_node_to_ptr(new_n);
new_ptr_pp = NULL;
slot = 0;
continue_node:
/* Filter across any leaves and gc any subtrees */
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = node->slots[slot];
if (!ptr)
continue;
if (assoc_array_ptr_is_leaf(ptr)) {
if (iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data))
/* The iterator will have done any reference
* counting on the object for us.
*/
new_n->slots[slot] = ptr;
continue;
}
new_ptr_pp = &new_n->slots[slot];
cursor = ptr;
goto descend;
}
retry_compress:
pr_devel("-- compress node %p --\n", new_n);
/* Count up the number of empty slots in this node and work out the
* subtree leaf count.
*/
new_n->nr_leaves_on_branch = 0;
nr_free = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
ptr = new_n->slots[slot];
if (!ptr)
nr_free++;
else if (assoc_array_ptr_is_leaf(ptr))
new_n->nr_leaves_on_branch++;
}
pr_devel("free=%d, leaves=%lu\n", nr_free, new_n->nr_leaves_on_branch);
/* See what we can fold in */
retained = false;
next_slot = 0;
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
struct assoc_array_shortcut *s;
struct assoc_array_node *child;
ptr = new_n->slots[slot];
if (!ptr || assoc_array_ptr_is_leaf(ptr))
continue;
s = NULL;
if (assoc_array_ptr_is_shortcut(ptr)) {
s = assoc_array_ptr_to_shortcut(ptr);
ptr = s->next_node;
}
child = assoc_array_ptr_to_node(ptr);
new_n->nr_leaves_on_branch += child->nr_leaves_on_branch;
if (child->nr_leaves_on_branch <= nr_free + 1) {
/* Fold the child node into this one */
pr_devel("[%d] fold node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
/* We would already have reaped an intervening shortcut
* on the way back up the tree.
*/
BUG_ON(s);
new_n->slots[slot] = NULL;
nr_free++;
if (slot < next_slot)
next_slot = slot;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
struct assoc_array_ptr *p = child->slots[i];
if (!p)
continue;
BUG_ON(assoc_array_ptr_is_meta(p));
while (new_n->slots[next_slot])
next_slot++;
BUG_ON(next_slot >= ASSOC_ARRAY_FAN_OUT);
new_n->slots[next_slot++] = p;
nr_free--;
}
kfree(child);
} else {
pr_devel("[%d] retain node %lu/%d [nx %d]\n",
slot, child->nr_leaves_on_branch, nr_free + 1,
next_slot);
retained = true;
}
}
if (retained && new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
pr_devel("internal nodes remain despite enough space, retrying\n");
goto retry_compress;
}
pr_devel("after: %lu\n", new_n->nr_leaves_on_branch);
nr_leaves_on_tree = new_n->nr_leaves_on_branch;
/* Excise this node if it is singly occupied by a shortcut */
if (nr_free == ASSOC_ARRAY_FAN_OUT - 1) {
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++)
if ((ptr = new_n->slots[slot]))
break;
if (assoc_array_ptr_is_meta(ptr) &&
assoc_array_ptr_is_shortcut(ptr)) {
pr_devel("excise node %p with 1 shortcut\n", new_n);
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_n->back_pointer;
slot = new_n->parent_slot;
kfree(new_n);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
if (assoc_array_ptr_is_shortcut(new_parent)) {
/* We can discard any preceding shortcut also */
struct assoc_array_shortcut *s =
assoc_array_ptr_to_shortcut(new_parent);
pr_devel("excise preceding shortcut\n");
new_parent = new_s->back_pointer = s->back_pointer;
slot = new_s->parent_slot = s->parent_slot;
kfree(s);
if (!new_parent) {
new_s->back_pointer = NULL;
new_s->parent_slot = 0;
new_root = ptr;
goto gc_complete;
}
}
new_s->back_pointer = new_parent;
new_s->parent_slot = slot;
new_n = assoc_array_ptr_to_node(new_parent);
new_n->slots[slot] = ptr;
goto ascend_old_tree;
}
}
/* Excise any shortcuts we might encounter that point to nodes that
* only contain leaves.
*/
ptr = new_n->back_pointer;
if (!ptr)
goto gc_complete;
if (assoc_array_ptr_is_shortcut(ptr)) {
new_s = assoc_array_ptr_to_shortcut(ptr);
new_parent = new_s->back_pointer;
slot = new_s->parent_slot;
if (new_n->nr_leaves_on_branch <= ASSOC_ARRAY_FAN_OUT) {
struct assoc_array_node *n;
pr_devel("excise shortcut\n");
new_n->back_pointer = new_parent;
new_n->parent_slot = slot;
kfree(new_s);
if (!new_parent) {
new_root = assoc_array_node_to_ptr(new_n);
goto gc_complete;
}
n = assoc_array_ptr_to_node(new_parent);
n->slots[slot] = assoc_array_node_to_ptr(new_n);
}
} else {
new_parent = ptr;
}
new_n = assoc_array_ptr_to_node(new_parent);
ascend_old_tree:
ptr = node->back_pointer;
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
slot = shortcut->parent_slot;
cursor = shortcut->back_pointer;
if (!cursor)
goto gc_complete;
} else {
slot = node->parent_slot;
cursor = ptr;
}
BUG_ON(!cursor);
node = assoc_array_ptr_to_node(cursor);
slot++;
goto continue_node;
gc_complete:
edit->set[0].to = new_root;
assoc_array_apply_edit(edit);
array->nr_leaves_on_tree = nr_leaves_on_tree;
return 0;
enomem:
pr_devel("enomem\n");
assoc_array_destroy_subtree(new_root, edit->ops);
kfree(edit);
return -ENOMEM;
}
| linux-master | lib/assoc_array.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* LZO decompressor for the Linux kernel. Code borrowed from the lzo
* implementation by Markus Franz Xaver Johannes Oberhumer.
*
* Linux kernel adaptation:
* Copyright (C) 2009
* Albin Tonnerre, Free Electrons <[email protected]>
*
* Original code:
* Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
* All Rights Reserved.
*
* Markus F.X.J. Oberhumer
* <[email protected]>
* http://www.oberhumer.com/opensource/lzop/
*/
#ifdef STATIC
#define PREBOOT
#include "lzo/lzo1x_decompress_safe.c"
#else
#include <linux/decompress/unlzo.h>
#endif
#include <linux/types.h>
#include <linux/lzo.h>
#include <linux/decompress/mm.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
static const unsigned char lzop_magic[] = {
0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
#define LZO_BLOCK_SIZE (256*1024l)
#define HEADER_HAS_FILTER 0x00000800L
#define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4)
#define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len)
{
int l;
u8 *parse = input;
u8 *end = input + in_len;
u16 version;
/*
* Check that there's enough input to possibly have a valid header.
* Then it is possible to parse several fields until the minimum
* size may have been used.
*/
if (in_len < HEADER_SIZE_MIN)
return 0;
/* read magic: 9 first bits */
for (l = 0; l < 9; l++) {
if (*parse++ != lzop_magic[l])
return 0;
}
/* get version (2bytes), skip library version (2),
* 'need to be extracted' version (2) and
* method (1) */
version = get_unaligned_be16(parse);
parse += 7;
if (version >= 0x0940)
parse++;
if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
parse += 8; /* flags + filter info */
else
parse += 4; /* flags */
/*
* At least mode, mtime_low, filename length, and checksum must
* be left to be parsed. If also mtime_high is present, it's OK
* because the next input buffer check is after reading the
* filename length.
*/
if (end - parse < 8 + 1 + 4)
return 0;
/* skip mode and mtime_low */
parse += 8;
if (version >= 0x0940)
parse += 4; /* skip mtime_high */
l = *parse++;
/* don't care about the file name, and skip checksum */
if (end - parse < l + 4)
return 0;
parse += l + 4;
*skip = parse - input;
return 1;
}
STATIC int INIT unlzo(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
u8 r = 0;
long skip = 0;
u32 src_len, dst_len;
size_t tmp;
u8 *in_buf, *in_buf_save, *out_buf;
int ret = -1;
if (output) {
out_buf = output;
} else if (!flush) {
error("NULL output pointer and no flush function provided");
goto exit;
} else {
out_buf = malloc(LZO_BLOCK_SIZE);
if (!out_buf) {
error("Could not allocate output buffer");
goto exit;
}
}
if (input && fill) {
error("Both input pointer and fill function provided, don't know what to do");
goto exit_1;
} else if (input) {
in_buf = input;
} else if (!fill) {
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
if (!in_buf) {
error("Could not allocate input buffer");
goto exit_1;
}
}
in_buf_save = in_buf;
if (posp)
*posp = 0;
if (fill) {
/*
* Start from in_buf + HEADER_SIZE_MAX to make it possible
* to use memcpy() to copy the unused data to the beginning
* of the buffer. This way memmove() isn't needed which
* is missing from pre-boot environments of most archs.
*/
in_buf += HEADER_SIZE_MAX;
in_len = fill(in_buf, HEADER_SIZE_MAX);
}
if (!parse_header(in_buf, &skip, in_len)) {
error("invalid header");
goto exit_2;
}
in_buf += skip;
in_len -= skip;
if (fill) {
/* Move the unused data to the beginning of the buffer. */
memcpy(in_buf_save, in_buf, in_len);
in_buf = in_buf_save;
}
if (posp)
*posp = skip;
for (;;) {
/* read uncompressed block size */
if (fill && in_len < 4) {
skip = fill(in_buf + in_len, 4 - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < 4) {
error("file corrupted");
goto exit_2;
}
dst_len = get_unaligned_be32(in_buf);
in_buf += 4;
in_len -= 4;
/* exit if last block */
if (dst_len == 0) {
if (posp)
*posp += 4;
break;
}
if (dst_len > LZO_BLOCK_SIZE) {
error("dest len longer than block size");
goto exit_2;
}
/* read compressed block size, and skip block checksum info */
if (fill && in_len < 8) {
skip = fill(in_buf + in_len, 8 - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < 8) {
error("file corrupted");
goto exit_2;
}
src_len = get_unaligned_be32(in_buf);
in_buf += 8;
in_len -= 8;
if (src_len <= 0 || src_len > dst_len) {
error("file corrupted");
goto exit_2;
}
/* decompress */
if (fill && in_len < src_len) {
skip = fill(in_buf + in_len, src_len - in_len);
if (skip > 0)
in_len += skip;
}
if (in_len < src_len) {
error("file corrupted");
goto exit_2;
}
tmp = dst_len;
/* When the input data is not compressed at all,
* lzo1x_decompress_safe will fail, so call memcpy()
* instead */
if (unlikely(dst_len == src_len))
memcpy(out_buf, in_buf, src_len);
else {
r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
out_buf, &tmp);
if (r != LZO_E_OK || dst_len != tmp) {
error("Compressed data violation");
goto exit_2;
}
}
if (flush && flush(out_buf, dst_len) != dst_len)
goto exit_2;
if (output)
out_buf += dst_len;
if (posp)
*posp += src_len + 12;
in_buf += src_len;
in_len -= src_len;
if (fill) {
/*
* If there happens to still be unused data left in
* in_buf, move it to the beginning of the buffer.
* Use a loop to avoid memmove() dependency.
*/
if (in_len > 0)
for (skip = 0; skip < in_len; ++skip)
in_buf_save[skip] = in_buf[skip];
in_buf = in_buf_save;
}
}
ret = 0;
exit_2:
if (!input)
free(in_buf_save);
exit_1:
if (!output)
free(out_buf);
exit:
return ret;
}
#ifdef PREBOOT
STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long olen,
long *pos,
void (*error)(char *x))
{
return unlzo(buf, len, fill, flush, out_buf, pos, error);
}
#endif
| linux-master | lib/decompress_unlzo.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to
*
* if (atomic_dec_and_test(&atomic)) {
* spin_lock(&lock);
* return 1;
* }
* return 0;
*
* because the spin-lock and the decrement must be
* "atomic".
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
raw_spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
raw_spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags)
{
/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
if (atomic_add_unless(atomic, -1, 1))
return 0;
/* Otherwise do it the slow way */
raw_spin_lock_irqsave(lock, *flags);
if (atomic_dec_and_test(atomic))
return 1;
raw_spin_unlock_irqrestore(lock, *flags);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave);
| linux-master | lib/dec_and_lock.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef STATIC
#define PREBOOT
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
* errors about console_printk etc... on ARM */
#define _LINUX_KERNEL_H
#include "zlib_inflate/inftrees.c"
#include "zlib_inflate/inffast.c"
#include "zlib_inflate/inflate.c"
#ifdef CONFIG_ZLIB_DFLTCC
#include "zlib_dfltcc/dfltcc.c"
#include "zlib_dfltcc/dfltcc_inflate.c"
#endif
#else /* STATIC */
/* initramfs et al: linked */
#include <linux/zutil.h>
#include "zlib_inflate/inftrees.h"
#include "zlib_inflate/inffast.h"
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
#include <linux/decompress/inflate.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define GZIP_IOBUF_SIZE (16*1024)
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
static int INIT __gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
if (!out_len)
out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
goto gunzip_nomem1;
}
if (buf)
zbuf = buf;
else {
zbuf = malloc(GZIP_IOBUF_SIZE);
len = 0;
}
if (!zbuf) {
error("Out of memory while allocating input buffer");
goto gunzip_nomem2;
}
strm = malloc(sizeof(*strm));
if (strm == NULL) {
error("Out of memory while allocating z_stream");
goto gunzip_nomem3;
}
strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
#ifdef CONFIG_ZLIB_DFLTCC
/* Always allocate the full workspace for DFLTCC */
zlib_inflate_workspacesize());
#else
sizeof(struct inflate_state));
#endif
if (strm->workspace == NULL) {
error("Out of memory while allocating workspace");
goto gunzip_nomem4;
}
if (!fill)
fill = nofill;
if (len == 0)
len = fill(zbuf, GZIP_IOBUF_SIZE);
/* verify the gzip header */
if (len < 10 ||
zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
if (pos)
*pos = 0;
error("Not a gzip file");
goto gunzip_5;
}
/* skip over gzip header (1f,8b,08... 10 bytes total +
* possible asciz filename)
*/
strm->next_in = zbuf + 10;
strm->avail_in = len - 10;
/* skip over asciz filename */
if (zbuf[3] & 0x8) {
do {
/*
* If the filename doesn't fit into the buffer,
* the file is very probably corrupt. Don't try
* to read more data.
*/
if (strm->avail_in == 0) {
error("header error");
goto gunzip_5;
}
--strm->avail_in;
} while (*strm->next_in++);
}
strm->next_out = out_buf;
strm->avail_out = out_len;
rc = zlib_inflateInit2(strm, -MAX_WBITS);
#ifdef CONFIG_ZLIB_DFLTCC
/* Always keep the window for DFLTCC */
#else
if (!flush) {
WS(strm)->inflate_state.wsize = 0;
WS(strm)->inflate_state.window = NULL;
}
#endif
while (rc == Z_OK) {
if (strm->avail_in == 0) {
/* TODO: handle case where both pos and fill are set */
len = fill(zbuf, GZIP_IOBUF_SIZE);
if (len < 0) {
rc = -1;
error("read error");
break;
}
strm->next_in = zbuf;
strm->avail_in = len;
}
rc = zlib_inflate(strm, 0);
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
break;
}
strm->next_out = out_buf;
strm->avail_out = out_len;
}
/* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
if (rc == Z_STREAM_END) {
rc = 0;
break;
} else if (rc != Z_OK) {
error("uncompression error");
rc = -1;
}
}
zlib_inflateEnd(strm);
if (pos)
/* add + 8 to skip over trailer */
*pos = strm->next_in - zbuf+8;
gunzip_5:
free(strm->workspace);
gunzip_nomem4:
free(strm);
gunzip_nomem3:
if (!buf)
free(zbuf);
gunzip_nomem2:
if (flush)
free(out_buf);
gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
#ifndef PREBOOT
STATIC int INIT gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
long *pos,
void (*error)(char *x))
{
return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
}
#else
STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *pos,
void (*error)(char *x))
{
return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
}
#endif
| linux-master | lib/decompress_inflate.c |
/*
* Extracted fronm glob.c
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/glob.h>
#include <linux/printk.h>
/* Boot with "glob.verbose=1" to show successful tests, too */
static bool verbose = false;
module_param(verbose, bool, 0);
struct glob_test {
char const *pat, *str;
bool expected;
};
static bool __pure __init test(char const *pat, char const *str, bool expected)
{
bool match = glob_match(pat, str);
bool success = match == expected;
/* Can't get string literals into a particular section, so... */
static char const msg_error[] __initconst =
KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
static char const msg_ok[] __initconst =
KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
static char const mismatch[] __initconst = "mismatch";
char const *message;
if (!success)
message = msg_error;
else if (verbose)
message = msg_ok;
else
return success;
printk(message, pat, str, mismatch + 3*match);
return success;
}
/*
* The tests are all jammed together in one array to make it simpler
* to place that array in the .init.rodata section. The obvious
* "array of structures containing char *" has no way to force the
* pointed-to strings to be in a particular section.
*
* Anyway, a test consists of:
* 1. Expected glob_match result: '1' or '0'.
* 2. Pattern to match: null-terminated string
* 3. String to match against: null-terminated string
*
* The list of tests is terminated with a final '\0' instead of
* a glob_match result character.
*/
static char const glob_tests[] __initconst =
/* Some basic tests */
"1" "a\0" "a\0"
"0" "a\0" "b\0"
"0" "a\0" "aa\0"
"0" "a\0" "\0"
"1" "\0" "\0"
"0" "\0" "a\0"
/* Simple character class tests */
"1" "[a]\0" "a\0"
"0" "[a]\0" "b\0"
"0" "[!a]\0" "a\0"
"1" "[!a]\0" "b\0"
"1" "[ab]\0" "a\0"
"1" "[ab]\0" "b\0"
"0" "[ab]\0" "c\0"
"1" "[!ab]\0" "c\0"
"1" "[a-c]\0" "b\0"
"0" "[a-c]\0" "d\0"
/* Corner cases in character class parsing */
"1" "[a-c-e-g]\0" "-\0"
"0" "[a-c-e-g]\0" "d\0"
"1" "[a-c-e-g]\0" "f\0"
"1" "[]a-ceg-ik[]\0" "a\0"
"1" "[]a-ceg-ik[]\0" "]\0"
"1" "[]a-ceg-ik[]\0" "[\0"
"1" "[]a-ceg-ik[]\0" "h\0"
"0" "[]a-ceg-ik[]\0" "f\0"
"0" "[!]a-ceg-ik[]\0" "h\0"
"0" "[!]a-ceg-ik[]\0" "]\0"
"1" "[!]a-ceg-ik[]\0" "f\0"
/* Simple wild cards */
"1" "?\0" "a\0"
"0" "?\0" "aa\0"
"0" "??\0" "a\0"
"1" "?x?\0" "axb\0"
"0" "?x?\0" "abx\0"
"0" "?x?\0" "xab\0"
/* Asterisk wild cards (backtracking) */
"0" "*??\0" "a\0"
"1" "*??\0" "ab\0"
"1" "*??\0" "abc\0"
"1" "*??\0" "abcd\0"
"0" "??*\0" "a\0"
"1" "??*\0" "ab\0"
"1" "??*\0" "abc\0"
"1" "??*\0" "abcd\0"
"0" "?*?\0" "a\0"
"1" "?*?\0" "ab\0"
"1" "?*?\0" "abc\0"
"1" "?*?\0" "abcd\0"
"1" "*b\0" "b\0"
"1" "*b\0" "ab\0"
"0" "*b\0" "ba\0"
"1" "*b\0" "bb\0"
"1" "*b\0" "abb\0"
"1" "*b\0" "bab\0"
"1" "*bc\0" "abbc\0"
"1" "*bc\0" "bc\0"
"1" "*bc\0" "bbc\0"
"1" "*bc\0" "bcbc\0"
/* Multiple asterisks (complex backtracking) */
"1" "*ac*\0" "abacadaeafag\0"
"1" "*ac*ae*ag*\0" "abacadaeafag\0"
"1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
"0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
"1" "*abcd*\0" "abcabcabcabcdefg\0"
"1" "*ab*cd*\0" "abcabcabcabcdefg\0"
"1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
"0" "*abcd*\0" "abcabcabcabcefg\0"
"0" "*ab*cd*\0" "abcabcabcabcefg\0";
static int __init glob_init(void)
{
unsigned successes = 0;
unsigned n = 0;
char const *p = glob_tests;
static char const message[] __initconst =
KERN_INFO "glob: %u self-tests passed, %u failed\n";
/*
* Tests are jammed together in a string. The first byte is '1'
* or '0' to indicate the expected outcome, or '\0' to indicate the
* end of the tests. Then come two null-terminated strings: the
* pattern and the string to match it against.
*/
while (*p) {
bool expected = *p++ & 1;
char const *pat = p;
p += strlen(p) + 1;
successes += test(pat, p, expected);
p += strlen(p) + 1;
n++;
}
n -= successes;
printk(message, successes, n);
/* What's the errno for "kernel bug detected"? Guess... */
return n ? -ECANCELED : 0;
}
/* We need a dummy exit function to allow unload */
static void __exit glob_fini(void) { }
module_init(glob_init);
module_exit(glob_fini);
MODULE_DESCRIPTION("glob(7) matching tests");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | lib/globtest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lib/ts_kmp.c Knuth-Morris-Pratt text search implementation
*
* Authors: Thomas Graf <[email protected]>
*
* ==========================================================================
*
* Implements a linear-time string-matching algorithm due to Knuth,
* Morris, and Pratt [1]. Their algorithm avoids the explicit
* computation of the transition function DELTA altogether. Its
* matching time is O(n), for n being length(text), using just an
* auxiliary function PI[1..m], for m being length(pattern),
* precomputed from the pattern in time O(m). The array PI allows
* the transition function DELTA to be computed efficiently
* "on the fly" as needed. Roughly speaking, for any state
* "q" = 0,1,...,m and any character "a" in SIGMA, the value
* PI["q"] contains the information that is independent of "a" and
* is needed to compute DELTA("q", "a") [2]. Since the array PI
* has only m entries, whereas DELTA has O(m|SIGMA|) entries, we
* save a factor of |SIGMA| in the preprocessing time by computing
* PI rather than DELTA.
*
* [1] Cormen, Leiserson, Rivest, Stein
* Introdcution to Algorithms, 2nd Edition, MIT Press
* [2] See finite automaton theory
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/textsearch.h>
struct ts_kmp
{
u8 * pattern;
unsigned int pattern_len;
unsigned int prefix_tbl[];
};
static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
{
struct ts_kmp *kmp = ts_config_priv(conf);
unsigned int i, q = 0, text_len, consumed = state->offset;
const u8 *text;
const int icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
if (unlikely(text_len == 0))
break;
for (i = 0; i < text_len; i++) {
while (q > 0 && kmp->pattern[q]
!= (icase ? toupper(text[i]) : text[i]))
q = kmp->prefix_tbl[q - 1];
if (kmp->pattern[q]
== (icase ? toupper(text[i]) : text[i]))
q++;
if (unlikely(q == kmp->pattern_len)) {
state->offset = consumed + i + 1;
return state->offset - kmp->pattern_len;
}
}
consumed += text_len;
}
return UINT_MAX;
}
static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
unsigned int *prefix_tbl, int flags)
{
unsigned int k, q;
const u8 icase = flags & TS_IGNORECASE;
for (k = 0, q = 1; q < len; q++) {
while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
!= (icase ? toupper(pattern[q]) : pattern[q]))
k = prefix_tbl[k-1];
if ((icase ? toupper(pattern[k]) : pattern[k])
== (icase ? toupper(pattern[q]) : pattern[q]))
k++;
prefix_tbl[q] = k;
}
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_kmp *kmp;
int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
conf = alloc_ts_config(priv_size, gfp_mask);
if (IS_ERR(conf))
return conf;
conf->flags = flags;
kmp = ts_config_priv(conf);
kmp->pattern_len = len;
compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
if (flags & TS_IGNORECASE)
for (i = 0; i < len; i++)
kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
else
memcpy(kmp->pattern, pattern, len);
return conf;
}
static void *kmp_get_pattern(struct ts_config *conf)
{
struct ts_kmp *kmp = ts_config_priv(conf);
return kmp->pattern;
}
static unsigned int kmp_get_pattern_len(struct ts_config *conf)
{
struct ts_kmp *kmp = ts_config_priv(conf);
return kmp->pattern_len;
}
static struct ts_ops kmp_ops = {
.name = "kmp",
.find = kmp_find,
.init = kmp_init,
.get_pattern = kmp_get_pattern,
.get_pattern_len = kmp_get_pattern_len,
.owner = THIS_MODULE,
.list = LIST_HEAD_INIT(kmp_ops.list)
};
static int __init init_kmp(void)
{
return textsearch_register(&kmp_ops);
}
static void __exit exit_kmp(void)
{
textsearch_unregister(&kmp_ops);
}
MODULE_LICENSE("GPL");
module_init(init_kmp);
module_exit(exit_kmp);
| linux-master | lib/ts_kmp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* T10 Data Integrity Field CRC16 calculation
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <[email protected]>
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <linux/static_key.h>
#include <linux/notifier.h>
static struct crypto_shash __rcu *crct10dif_tfm;
static DEFINE_STATIC_KEY_TRUE(crct10dif_fallback);
static DEFINE_MUTEX(crc_t10dif_mutex);
static struct work_struct crct10dif_rehash_work;
static int crc_t10dif_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
if (val != CRYPTO_MSG_ALG_LOADED ||
strcmp(alg->cra_name, CRC_T10DIF_STRING))
return NOTIFY_DONE;
schedule_work(&crct10dif_rehash_work);
return NOTIFY_OK;
}
static void crc_t10dif_rehash(struct work_struct *work)
{
struct crypto_shash *new, *old;
mutex_lock(&crc_t10dif_mutex);
old = rcu_dereference_protected(crct10dif_tfm,
lockdep_is_held(&crc_t10dif_mutex));
new = crypto_alloc_shash(CRC_T10DIF_STRING, 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc_t10dif_mutex);
return;
}
rcu_assign_pointer(crct10dif_tfm, new);
mutex_unlock(&crc_t10dif_mutex);
if (old) {
synchronize_rcu();
crypto_free_shash(old);
} else {
static_branch_disable(&crct10dif_fallback);
}
}
static struct notifier_block crc_t10dif_nb = {
.notifier_call = crc_t10dif_notify,
};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
__u16 crc;
} desc;
int err;
if (static_branch_unlikely(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crct10dif_tfm);
desc.crc = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
rcu_read_unlock();
BUG_ON(err);
return desc.crc;
}
EXPORT_SYMBOL(crc_t10dif_update);
__u16 crc_t10dif(const unsigned char *buffer, size_t len)
{
return crc_t10dif_update(0, buffer, len);
}
EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
INIT_WORK(&crct10dif_rehash_work, crc_t10dif_rehash);
crypto_register_notifier(&crc_t10dif_nb);
crc_t10dif_rehash(&crct10dif_rehash_work);
return 0;
}
static void __exit crc_t10dif_mod_fini(void)
{
crypto_unregister_notifier(&crc_t10dif_nb);
cancel_work_sync(&crct10dif_rehash_work);
crypto_free_shash(rcu_dereference_protected(crct10dif_tfm, 1));
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
{
struct crypto_shash *tfm;
int len;
if (static_branch_unlikely(&crct10dif_fallback))
return sprintf(buffer, "fallback\n");
rcu_read_lock();
tfm = rcu_dereference(crct10dif_tfm);
len = snprintf(buffer, PAGE_SIZE, "%s\n",
crypto_shash_driver_name(tfm));
rcu_read_unlock();
return len;
}
module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0444);
MODULE_DESCRIPTION("T10 DIF CRC calculation (library API)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
| linux-master | lib/crc-t10dif.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
* Author: Gabriele Paoloni <[email protected]>
* Author: Zhichang Yuan <[email protected]>
* Author: John Garry <[email protected]>
*/
#define pr_fmt(fmt) "LOGIC PIO: " fmt
#include <linux/of.h>
#include <linux/io.h>
#include <linux/logic_pio.h>
#include <linux/mm.h>
#include <linux/rculist.h>
#include <linux/sizes.h>
#include <linux/slab.h>
/* The unique hardware address list */
static LIST_HEAD(io_range_list);
static DEFINE_MUTEX(io_range_mutex);
/**
* logic_pio_register_range - register logical PIO range for a host
* @new_range: pointer to the IO range to be registered.
*
* Returns 0 on success, the error code in case of failure.
* If the range already exists, -EEXIST will be returned, which should be
* considered a success.
*
* Register a new IO range node in the IO range list.
*/
int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
{
struct logic_pio_hwaddr *range;
resource_size_t start;
resource_size_t end;
resource_size_t mmio_end = 0;
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
int ret = 0;
if (!new_range || !new_range->fwnode || !new_range->size ||
(new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
return -EINVAL;
start = new_range->hw_start;
end = new_range->hw_start + new_range->size;
mutex_lock(&io_range_mutex);
list_for_each_entry(range, &io_range_list, list) {
if (range->fwnode == new_range->fwnode) {
/* range already there */
ret = -EEXIST;
goto end_register;
}
if (range->flags == LOGIC_PIO_CPU_MMIO &&
new_range->flags == LOGIC_PIO_CPU_MMIO) {
/* for MMIO ranges we need to check for overlap */
if (start >= range->hw_start + range->size ||
end < range->hw_start) {
mmio_end = range->io_start + range->size;
} else {
ret = -EFAULT;
goto end_register;
}
} else if (range->flags == LOGIC_PIO_INDIRECT &&
new_range->flags == LOGIC_PIO_INDIRECT) {
iio_sz += range->size;
}
}
/* range not registered yet, check for available space */
if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
/* if it's too big check if 64K space can be reserved */
if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
ret = -E2BIG;
goto end_register;
}
new_range->size = SZ_64K;
pr_warn("Requested IO range too big, new size set to 64K\n");
}
new_range->io_start = mmio_end;
} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
ret = -E2BIG;
goto end_register;
}
new_range->io_start = iio_sz;
} else {
/* invalid flag */
ret = -EINVAL;
goto end_register;
}
list_add_tail_rcu(&new_range->list, &io_range_list);
end_register:
mutex_unlock(&io_range_mutex);
return ret;
}
/**
* logic_pio_unregister_range - unregister a logical PIO range for a host
* @range: pointer to the IO range which has been already registered.
*
* Unregister a previously-registered IO range node.
*/
void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
{
mutex_lock(&io_range_mutex);
list_del_rcu(&range->list);
mutex_unlock(&io_range_mutex);
synchronize_rcu();
}
/**
* find_io_range_by_fwnode - find logical PIO range for given FW node
* @fwnode: FW node handle associated with logical PIO range
*
* Returns pointer to node on success, NULL otherwise.
*
* Traverse the io_range_list to find the registered node for @fwnode.
*/
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
{
struct logic_pio_hwaddr *range, *found_range = NULL;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->fwnode == fwnode) {
found_range = range;
break;
}
}
rcu_read_unlock();
return found_range;
}
/* Return a registered range given an input PIO token */
static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
{
struct logic_pio_hwaddr *range, *found_range = NULL;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (in_range(pio, range->io_start, range->size)) {
found_range = range;
break;
}
}
rcu_read_unlock();
if (!found_range)
pr_err("PIO entry token 0x%lx invalid\n", pio);
return found_range;
}
/**
* logic_pio_to_hwaddr - translate logical PIO to HW address
* @pio: logical PIO value
*
* Returns HW address if valid, ~0 otherwise.
*
* Translate the input logical PIO to the corresponding hardware address.
* The input PIO should be unique in the whole logical PIO space.
*/
resource_size_t logic_pio_to_hwaddr(unsigned long pio)
{
struct logic_pio_hwaddr *range;
range = find_io_range(pio);
if (range)
return range->hw_start + pio - range->io_start;
return (resource_size_t)~0;
}
/**
* logic_pio_trans_hwaddr - translate HW address to logical PIO
* @fwnode: FW node reference for the host
* @addr: Host-relative HW address
* @size: size to translate
*
* Returns Logical PIO value if successful, ~0UL otherwise
*/
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
resource_size_t addr, resource_size_t size)
{
struct logic_pio_hwaddr *range;
range = find_io_range_by_fwnode(fwnode);
if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
pr_err("IO range not found or invalid\n");
return ~0UL;
}
if (range->size < size) {
pr_err("resource size %pa cannot fit in IO range size %pa\n",
&size, &range->size);
return ~0UL;
}
return addr - range->hw_start + range->io_start;
}
unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
{
struct logic_pio_hwaddr *range;
rcu_read_lock();
list_for_each_entry_rcu(range, &io_range_list, list) {
if (range->flags != LOGIC_PIO_CPU_MMIO)
continue;
if (in_range(addr, range->hw_start, range->size)) {
unsigned long cpuaddr;
cpuaddr = addr - range->hw_start + range->io_start;
rcu_read_unlock();
return cpuaddr;
}
}
rcu_read_unlock();
pr_err("addr %pa not registered in io_range_list\n", &addr);
return ~0UL;
}
#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
#define BUILD_LOGIC_IO(bwl, type) \
type logic_in##bwl(unsigned long addr) \
{ \
type ret = (type)~0; \
\
if (addr < MMIO_UPPER_LIMIT) { \
ret = _in##bwl(addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
if (entry) \
ret = entry->ops->in(entry->hostdata, \
addr, sizeof(type)); \
else \
WARN_ON_ONCE(1); \
} \
return ret; \
} \
\
void logic_out##bwl(type value, unsigned long addr) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
_out##bwl(value, addr); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
if (entry) \
entry->ops->out(entry->hostdata, \
addr, value, sizeof(type)); \
else \
WARN_ON_ONCE(1); \
} \
} \
\
void logic_ins##bwl(unsigned long addr, void *buffer, \
unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
reads##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
if (entry) \
entry->ops->ins(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
WARN_ON_ONCE(1); \
} \
\
} \
\
void logic_outs##bwl(unsigned long addr, const void *buffer, \
unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
writes##bwl(PCI_IOBASE + addr, buffer, count); \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
if (entry) \
entry->ops->outs(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
WARN_ON_ONCE(1); \
} \
}
BUILD_LOGIC_IO(b, u8)
EXPORT_SYMBOL(logic_inb);
EXPORT_SYMBOL(logic_insb);
EXPORT_SYMBOL(logic_outb);
EXPORT_SYMBOL(logic_outsb);
BUILD_LOGIC_IO(w, u16)
EXPORT_SYMBOL(logic_inw);
EXPORT_SYMBOL(logic_insw);
EXPORT_SYMBOL(logic_outw);
EXPORT_SYMBOL(logic_outsw);
BUILD_LOGIC_IO(l, u32)
EXPORT_SYMBOL(logic_inl);
EXPORT_SYMBOL(logic_insl);
EXPORT_SYMBOL(logic_outl);
EXPORT_SYMBOL(logic_outsl);
#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
| linux-master | lib/logic_pio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic Timer-queue
*
* Manages a simple queue of timers, ordered by expiration time.
* Uses rbtrees for quick list adds and expiration.
*
* NOTE: All of the following functions need to be serialized
* to avoid races. No locking is done by this library code.
*/
#include <linux/bug.h>
#include <linux/timerqueue.h>
#include <linux/rbtree.h>
#include <linux/export.h>
#define __node_2_tq(_n) \
rb_entry((_n), struct timerqueue_node, node)
static inline bool __timerqueue_less(struct rb_node *a, const struct rb_node *b)
{
return __node_2_tq(a)->expires < __node_2_tq(b)->expires;
}
/**
* timerqueue_add - Adds timer to timerqueue.
*
* @head: head of timerqueue
* @node: timer node to be added
*
* Adds the timer node to the timerqueue, sorted by the node's expires
* value. Returns true if the newly added timer is the first expiring timer in
* the queue.
*/
bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
/* Make sure we don't add nodes that are already added */
WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));
return rb_add_cached(&node->node, &head->rb_root, __timerqueue_less);
}
EXPORT_SYMBOL_GPL(timerqueue_add);
/**
* timerqueue_del - Removes a timer from the timerqueue.
*
* @head: head of timerqueue
* @node: timer node to be removed
*
* Removes the timer node from the timerqueue. Returns true if the queue is
* not empty after the remove.
*/
bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
rb_erase_cached(&node->node, &head->rb_root);
RB_CLEAR_NODE(&node->node);
return !RB_EMPTY_ROOT(&head->rb_root.rb_root);
}
EXPORT_SYMBOL_GPL(timerqueue_del);
/**
* timerqueue_iterate_next - Returns the timer after the provided timer
*
* @node: Pointer to a timer.
*
* Provides the timer that is after the given node. This is used, when
* necessary, to iterate through the list of timers in a timer list
* without modifying the list.
*/
struct timerqueue_node *timerqueue_iterate_next(struct timerqueue_node *node)
{
struct rb_node *next;
if (!node)
return NULL;
next = rb_next(&node->node);
if (!next)
return NULL;
return container_of(next, struct timerqueue_node, node);
}
EXPORT_SYMBOL_GPL(timerqueue_iterate_next);
| linux-master | lib/timerqueue.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NMI backtrace support
*
* Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
* with the following header:
*
* HW NMI watchdog support
*
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
*
* Arch specific calls to support NMI watchdog
*
* Bits copied from original nmi.c file
*/
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/cpu.h>
#include <linux/sched/debug.h>
#ifdef arch_trigger_cpumask_backtrace
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
/* "in progress" flag of arch_trigger_cpumask_backtrace */
static unsigned long backtrace_flag;
/*
* When raise() is called it will be passed a pointer to the
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
* directly from their raise() functions may rely on the mask
* they are passed being updated as a side effect of this call.
*/
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
int exclude_cpu,
void (*raise)(cpumask_t *mask))
{
int i, this_cpu = get_cpu();
if (test_and_set_bit(0, &backtrace_flag)) {
/*
* If there is already a trigger_all_cpu_backtrace() in progress
* (backtrace_flag == 1), don't output double cpu dump infos.
*/
put_cpu();
return;
}
cpumask_copy(to_cpumask(backtrace_mask), mask);
if (exclude_cpu != -1)
cpumask_clear_cpu(exclude_cpu, to_cpumask(backtrace_mask));
/*
* Don't try to send an NMI to this cpu; it may work on some
* architectures, but on others it may not, and we'll get
* information at least as useful just by doing a dump_stack() here.
* Note that nmi_cpu_backtrace(NULL) will clear the cpu bit.
*/
if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask)))
nmi_cpu_backtrace(NULL);
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
nmi_backtrace_stall_snap(to_cpumask(backtrace_mask));
raise(to_cpumask(backtrace_mask));
}
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
if (cpumask_empty(to_cpumask(backtrace_mask)))
break;
mdelay(1);
touch_softlockup_watchdog();
}
nmi_backtrace_stall_check(to_cpumask(backtrace_mask));
/*
* Force flush any remote buffers that might be stuck in IRQ context
* and therefore could not run their irq_work.
*/
printk_trigger_flush();
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
}
// Dump stacks even for idle CPUs.
static bool backtrace_idle;
module_param(backtrace_idle, bool, 0644);
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
unsigned long flags;
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
/*
* Allow nested NMI backtraces while serializing
* against other CPUs.
*/
printk_cpu_sync_get_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
} else {
pr_warn("NMI backtrace for cpu %d\n", cpu);
if (regs)
show_regs(regs);
else
dump_stack();
}
printk_cpu_sync_put_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
return false;
}
NOKPROBE_SYMBOL(nmi_cpu_backtrace);
#endif
| linux-master | lib/nmi_backtrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This is a maximally equidistributed combined Tausworthe generator
* based on code from GNU Scientific Library 1.5 (30 Jun 2004)
*
* lfsr113 version:
*
* x_n = (s1_n ^ s2_n ^ s3_n ^ s4_n)
*
* s1_{n+1} = (((s1_n & 4294967294) << 18) ^ (((s1_n << 6) ^ s1_n) >> 13))
* s2_{n+1} = (((s2_n & 4294967288) << 2) ^ (((s2_n << 2) ^ s2_n) >> 27))
* s3_{n+1} = (((s3_n & 4294967280) << 7) ^ (((s3_n << 13) ^ s3_n) >> 21))
* s4_{n+1} = (((s4_n & 4294967168) << 13) ^ (((s4_n << 3) ^ s4_n) >> 12))
*
* The period of this generator is about 2^113 (see erratum paper).
*
* From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
* Generators", Mathematics of Computation, 65, 213 (1996), 203--213:
* http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
* ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
*
* There is an erratum in the paper "Tables of Maximally Equidistributed
* Combined LFSR Generators", Mathematics of Computation, 68, 225 (1999),
* 261--269: http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
*
* ... the k_j most significant bits of z_j must be non-zero,
* for each j. (Note: this restriction also applies to the
* computer code given in [4], but was mistakenly not mentioned
* in that paper.)
*
* This affects the seeding procedure by imposing the requirement
* s1 > 1, s2 > 7, s3 > 15, s4 > 127.
*/
#include <linux/types.h>
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
/**
* prandom_u32_state - seeded pseudo-random number generator.
* @state: pointer to state structure holding seeded state.
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use get_random_u32().
*/
u32 prandom_u32_state(struct rnd_state *state)
{
#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
state->s4 = TAUSWORTHE(state->s4, 3U, 12U, 4294967168U, 13U);
return (state->s1 ^ state->s2 ^ state->s3 ^ state->s4);
}
EXPORT_SYMBOL(prandom_u32_state);
/**
* prandom_bytes_state - get the requested number of pseudo-random bytes
*
* @state: pointer to state structure holding seeded state.
* @buf: where to copy the pseudo-random bytes to
* @bytes: the requested number of bytes
*
* This is used for pseudo-randomness with no outside seeding.
* For more random results, use get_random_bytes().
*/
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
{
u8 *ptr = buf;
while (bytes >= sizeof(u32)) {
put_unaligned(prandom_u32_state(state), (u32 *) ptr);
ptr += sizeof(u32);
bytes -= sizeof(u32);
}
if (bytes > 0) {
u32 rem = prandom_u32_state(state);
do {
*ptr++ = (u8) rem;
bytes--;
rem >>= BITS_PER_BYTE;
} while (bytes > 0);
}
}
EXPORT_SYMBOL(prandom_bytes_state);
static void prandom_warmup(struct rnd_state *state)
{
/* Calling RNG ten times to satisfy recurrence condition */
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
prandom_u32_state(state);
}
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
for_each_possible_cpu(i) {
struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
u32 seeds[4];
get_random_bytes(&seeds, sizeof(seeds));
state->s1 = __seed(seeds[0], 2U);
state->s2 = __seed(seeds[1], 8U);
state->s3 = __seed(seeds[2], 16U);
state->s4 = __seed(seeds[3], 128U);
prandom_warmup(state);
}
}
EXPORT_SYMBOL(prandom_seed_full_state);
#ifdef CONFIG_RANDOM32_SELFTEST
static struct prandom_test1 {
u32 seed;
u32 result;
} test1[] = {
{ 1U, 3484351685U },
{ 2U, 2623130059U },
{ 3U, 3125133893U },
{ 4U, 984847254U },
};
static struct prandom_test2 {
u32 seed;
u32 iteration;
u32 result;
} test2[] = {
/* Test cases against taus113 from GSL library. */
{ 931557656U, 959U, 2975593782U },
{ 1339693295U, 876U, 3887776532U },
{ 1545556285U, 961U, 1615538833U },
{ 601730776U, 723U, 1776162651U },
{ 1027516047U, 687U, 511983079U },
{ 416526298U, 700U, 916156552U },
{ 1395522032U, 652U, 2222063676U },
{ 366221443U, 617U, 2992857763U },
{ 1539836965U, 714U, 3783265725U },
{ 556206671U, 994U, 799626459U },
{ 684907218U, 799U, 367789491U },
{ 2121230701U, 931U, 2115467001U },
{ 1668516451U, 644U, 3620590685U },
{ 768046066U, 883U, 2034077390U },
{ 1989159136U, 833U, 1195767305U },
{ 536585145U, 996U, 3577259204U },
{ 1008129373U, 642U, 1478080776U },
{ 1740775604U, 939U, 1264980372U },
{ 1967883163U, 508U, 10734624U },
{ 1923019697U, 730U, 3821419629U },
{ 442079932U, 560U, 3440032343U },
{ 1961302714U, 845U, 841962572U },
{ 2030205964U, 962U, 1325144227U },
{ 1160407529U, 507U, 240940858U },
{ 635482502U, 779U, 4200489746U },
{ 1252788931U, 699U, 867195434U },
{ 1961817131U, 719U, 668237657U },
{ 1071468216U, 983U, 917876630U },
{ 1281848367U, 932U, 1003100039U },
{ 582537119U, 780U, 1127273778U },
{ 1973672777U, 853U, 1071368872U },
{ 1896756996U, 762U, 1127851055U },
{ 847917054U, 500U, 1717499075U },
{ 1240520510U, 951U, 2849576657U },
{ 1685071682U, 567U, 1961810396U },
{ 1516232129U, 557U, 3173877U },
{ 1208118903U, 612U, 1613145022U },
{ 1817269927U, 693U, 4279122573U },
{ 1510091701U, 717U, 638191229U },
{ 365916850U, 807U, 600424314U },
{ 399324359U, 702U, 1803598116U },
{ 1318480274U, 779U, 2074237022U },
{ 697758115U, 840U, 1483639402U },
{ 1696507773U, 840U, 577415447U },
{ 2081979121U, 981U, 3041486449U },
{ 955646687U, 742U, 3846494357U },
{ 1250683506U, 749U, 836419859U },
{ 595003102U, 534U, 366794109U },
{ 47485338U, 558U, 3521120834U },
{ 619433479U, 610U, 3991783875U },
{ 704096520U, 518U, 4139493852U },
{ 1712224984U, 606U, 2393312003U },
{ 1318233152U, 922U, 3880361134U },
{ 855572992U, 761U, 1472974787U },
{ 64721421U, 703U, 683860550U },
{ 678931758U, 840U, 380616043U },
{ 692711973U, 778U, 1382361947U },
{ 677703619U, 530U, 2826914161U },
{ 92393223U, 586U, 1522128471U },
{ 1222592920U, 743U, 3466726667U },
{ 358288986U, 695U, 1091956998U },
{ 1935056945U, 958U, 514864477U },
{ 735675993U, 990U, 1294239989U },
{ 1560089402U, 897U, 2238551287U },
{ 70616361U, 829U, 22483098U },
{ 368234700U, 731U, 2913875084U },
{ 20221190U, 879U, 1564152970U },
{ 539444654U, 682U, 1835141259U },
{ 1314987297U, 840U, 1801114136U },
{ 2019295544U, 645U, 3286438930U },
{ 469023838U, 716U, 1637918202U },
{ 1843754496U, 653U, 2562092152U },
{ 400672036U, 809U, 4264212785U },
{ 404722249U, 965U, 2704116999U },
{ 600702209U, 758U, 584979986U },
{ 519953954U, 667U, 2574436237U },
{ 1658071126U, 694U, 2214569490U },
{ 420480037U, 749U, 3430010866U },
{ 690103647U, 969U, 3700758083U },
{ 1029424799U, 937U, 3787746841U },
{ 2012608669U, 506U, 3362628973U },
{ 1535432887U, 998U, 42610943U },
{ 1330635533U, 857U, 3040806504U },
{ 1223800550U, 539U, 3954229517U },
{ 1322411537U, 680U, 3223250324U },
{ 1877847898U, 945U, 2915147143U },
{ 1646356099U, 874U, 965988280U },
{ 805687536U, 744U, 4032277920U },
{ 1948093210U, 633U, 1346597684U },
{ 392609744U, 783U, 1636083295U },
{ 690241304U, 770U, 1201031298U },
{ 1360302965U, 696U, 1665394461U },
{ 1220090946U, 780U, 1316922812U },
{ 447092251U, 500U, 3438743375U },
{ 1613868791U, 592U, 828546883U },
{ 523430951U, 548U, 2552392304U },
{ 726692899U, 810U, 1656872867U },
{ 1364340021U, 836U, 3710513486U },
{ 1986257729U, 931U, 935013962U },
{ 407983964U, 921U, 728767059U },
};
static void prandom_state_selftest_seed(struct rnd_state *state, u32 seed)
{
#define LCG(x) ((x) * 69069U) /* super-duper LCG */
state->s1 = __seed(LCG(seed), 2U);
state->s2 = __seed(LCG(state->s1), 8U);
state->s3 = __seed(LCG(state->s2), 16U);
state->s4 = __seed(LCG(state->s3), 128U);
}
static int __init prandom_state_selftest(void)
{
int i, j, errors = 0, runs = 0;
bool error = false;
for (i = 0; i < ARRAY_SIZE(test1); i++) {
struct rnd_state state;
prandom_state_selftest_seed(&state, test1[i].seed);
prandom_warmup(&state);
if (test1[i].result != prandom_u32_state(&state))
error = true;
}
if (error)
pr_warn("prandom: seed boundary self test failed\n");
else
pr_info("prandom: seed boundary self test passed\n");
for (i = 0; i < ARRAY_SIZE(test2); i++) {
struct rnd_state state;
prandom_state_selftest_seed(&state, test2[i].seed);
prandom_warmup(&state);
for (j = 0; j < test2[i].iteration - 1; j++)
prandom_u32_state(&state);
if (test2[i].result != prandom_u32_state(&state))
errors++;
runs++;
cond_resched();
}
if (errors)
pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
else
pr_info("prandom: %d self tests passed\n", runs);
return 0;
}
core_initcall(prandom_state_selftest);
#endif
| linux-master | lib/random32.c |
// SPDX-License-Identifier: GPL-2.0
#define DEBG(x)
#define DEBG1(x)
/* inflate.c -- Not copyrighted 1992 by Mark Adler
version c10p1, 10 January 1993 */
/*
* Adapted for booting Linux by Hannu Savolainen 1993
* based on gzip-1.0.3
*
* Nicolas Pitre <[email protected]>, 1999/04/14 :
* Little mods for all variable to reside either into rodata or bss segments
* by marking constant variables with 'const' and initializing all the others
* at run-time only. This allows for the kernel uncompressor to run
* directly from Flash or ROM memory on embedded systems.
*/
/*
Inflate deflated (PKZIP's method 8 compressed) data. The compression
method searches for as much of the current string of bytes (up to a
length of 258) in the previous 32 K bytes. If it doesn't find any
matches (of at least length 3), it codes the next byte. Otherwise, it
codes the length of the matched string and its distance backwards from
the current position. There is a single Huffman code that codes both
single bytes (called "literals") and match lengths. A second Huffman
code codes the distance information, which follows a length code. Each
length or distance code actually represents a base value and a number
of "extra" (sometimes zero) bits to get to add to the base value. At
the end of each deflated block is a special end-of-block (EOB) literal/
length code. The decoding process is basically: get a literal/length
code; if EOB then done; if a literal, emit the decoded byte; if a
length then get the distance and emit the referred-to bytes from the
sliding window of previously emitted data.
There are (currently) three kinds of inflate blocks: stored, fixed, and
dynamic. The compressor deals with some chunk of data at a time, and
decides which method to use on a chunk-by-chunk basis. A chunk might
typically be 32 K or 64 K. If the chunk is incompressible, then the
"stored" method is used. In this case, the bytes are simply stored as
is, eight bits per byte, with none of the above coding. The bytes are
preceded by a count, since there is no longer an EOB code.
If the data is compressible, then either the fixed or dynamic methods
are used. In the dynamic method, the compressed data is preceded by
an encoding of the literal/length and distance Huffman codes that are
to be used to decode this block. The representation is itself Huffman
coded, and so is preceded by a description of that code. These code
descriptions take up a little space, and so for small blocks, there is
a predefined set of codes, called the fixed codes. The fixed method is
used if the block codes up smaller that way (usually for quite small
chunks), otherwise the dynamic method is used. In the latter case, the
codes are customized to the probabilities in the current block, and so
can code it much better than the pre-determined fixed codes.
The Huffman codes themselves are decoded using a multi-level table
lookup, in order to maximize the speed of decoding plus the speed of
building the decoding tables. See the comments below that precede the
lbits and dbits tuning parameters.
*/
/*
Notes beyond the 1.93a appnote.txt:
1. Distance pointers never point before the beginning of the output
stream.
2. Distance pointers can point back across blocks, up to 32k away.
3. There is an implied maximum of 7 bits for the bit length table and
15 bits for the actual data.
4. If only one code exists, then it is encoded using one bit. (Zero
would be more efficient, but perhaps a little confusing.) If two
codes exist, they are coded using one bit each (0 and 1).
5. There is no way of sending zero distance codes--a dummy must be
sent if there are none. (History: a pre 2.0 version of PKZIP would
store blocks with no distance codes, but this was discovered to be
too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
zero distance codes, which is sent as one code of zero bits in
length.
6. There are up to 286 literal/length codes. Code 256 represents the
end-of-block. Note however that the static length tree defines
288 codes just to fill out the Huffman codes. Codes 286 and 287
cannot be used though, since there is no length base or extra bits
defined for them. Similarly, there are up to 30 distance codes.
However, static trees define 32 codes (all 5 bits) to fill out the
Huffman codes, but the last two had better not show up in the data.
7. Unzip can check dynamic Huffman blocks for complete code sets.
The exception is that a single code would not be complete (see #4).
8. The five bits following the block type is really the number of
literal codes sent minus 257.
9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
(1+6+6). Therefore, to output three times the length, you output
three codes (1+1+1), whereas to output four times the same length,
you only need two codes (1+3). Hmm.
10. In the tree reconstruction algorithm, Code = Code + Increment
only if BitLength(i) is not zero. (Pretty obvious.)
11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
12. Note: length code 284 can represent 227-258, but length code 285
really is 258. The last length deserves its own, short code
since it gets used a lot in very redundant files. The length
258 is special since 258 - 3 (the min match length) is 255.
13. The literal/length and distance code bit lengths are read as a
single stream of lengths. It is possible (and advantageous) for
a repeat code (16, 17, or 18) to go across the boundary between
the two sets of lengths.
*/
#include <linux/compiler.h>
#ifdef NO_INFLATE_MALLOC
#include <linux/slab.h>
#endif
#ifdef RCSID
static char rcsid[] = "#Id: inflate.c,v 0.14 1993/06/10 13:27:04 jloup Exp #";
#endif
#ifndef STATIC
#if defined(STDC_HEADERS) || defined(HAVE_STDLIB_H)
# include <sys/types.h>
# include <stdlib.h>
#endif
#include "gzip.h"
#define STATIC
#endif /* !STATIC */
#ifndef INIT
#define INIT
#endif
#define slide window
/* Huffman code lookup table entry--this entry is four bytes for machines
that have 16-bit pointers (e.g. PC's in the small or medium model).
Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
means that v is a literal, 16 < e < 32 means that v is a pointer to
the next table, which codes e - 16 bits, and lastly e == 99 indicates
an unused code. If a code with e == 99 is looked up, this implies an
error in the data. */
struct huft {
uch e; /* number of extra bits or operation */
uch b; /* number of bits in this code or subcode */
union {
ush n; /* literal, length base, or distance base */
struct huft *t; /* pointer to next level of table */
} v;
};
/* Function prototypes */
STATIC int INIT huft_build OF((unsigned *, unsigned, unsigned,
const ush *, const ush *, struct huft **, int *));
STATIC int INIT huft_free OF((struct huft *));
STATIC int INIT inflate_codes OF((struct huft *, struct huft *, int, int));
STATIC int INIT inflate_stored OF((void));
STATIC int INIT inflate_fixed OF((void));
STATIC int INIT inflate_dynamic OF((void));
STATIC int INIT inflate_block OF((int *));
STATIC int INIT inflate OF((void));
/* The inflate algorithm uses a sliding 32 K byte window on the uncompressed
stream to find repeated byte strings. This is implemented here as a
circular buffer. The index is updated simply by incrementing and then
ANDing with 0x7fff (32K-1). */
/* It is left to other modules to supply the 32 K area. It is assumed
to be usable as if it were declared "uch slide[32768];" or as just
"uch *slide;" and then malloc'ed in the latter case. The definition
must be in unzip.h, included above. */
/* unsigned wp; current position in slide */
#define wp outcnt
#define flush_output(w) (wp=(w),flush_window())
/* Tables for deflate from PKZIP's appnote.txt. */
static const unsigned border[] = { /* Order of the bit length code lengths */
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
/* note: see note #13 above about the 258 in this list. */
static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
8193, 12289, 16385, 24577};
static const ush cpdext[] = { /* Extra bits for distance codes */
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
12, 12, 13, 13};
/* Macros for inflate() bit peeking and grabbing.
The usage is:
NEEDBITS(j)
x = b & mask_bits[j];
DUMPBITS(j)
where NEEDBITS makes sure that b has at least j bits in it, and
DUMPBITS removes the bits from b. The macros use the variable k
for the number of bits in b. Normally, b and k are register
variables for speed, and are initialized at the beginning of a
routine that uses these macros from a global bit buffer and count.
If we assume that EOB will be the longest code, then we will never
ask for bits with NEEDBITS that are beyond the end of the stream.
So, NEEDBITS should not read any more bytes than are needed to
meet the request. Then no bytes need to be "returned" to the buffer
at the end of the last block.
However, this assumption is not true for fixed blocks--the EOB code
is 7 bits, but the other literal/length codes can be 8 or 9 bits.
(The EOB code is shorter than other codes because fixed blocks are
generally short. So, while a block always has an EOB, many other
literal/length codes have a significantly lower probability of
showing up at all.) However, by making the first table have a
lookup of seven bits, the EOB code will be found in that first
lookup, and so will not require that too many bits be pulled from
the stream.
*/
STATIC ulg bb; /* bit buffer */
STATIC unsigned bk; /* bits in bit buffer */
STATIC const ush mask_bits[] = {
0x0000,
0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
};
#define NEXTBYTE() ({ int v = get_byte(); if (v < 0) goto underrun; (uch)v; })
#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
#define DUMPBITS(n) {b>>=(n);k-=(n);}
#ifndef NO_INFLATE_MALLOC
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
static unsigned long malloc_ptr;
static int malloc_count;
static void *malloc(int size)
{
void *p;
if (size < 0)
error("Malloc error");
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
error("Out of memory");
malloc_count++;
return p;
}
static void free(void *where)
{
malloc_count--;
if (!malloc_count)
malloc_ptr = free_mem_ptr;
}
#else
#define malloc(a) kmalloc(a, GFP_KERNEL)
#define free(a) kfree(a)
#endif
/*
Huffman code decoding is performed using a multi-level table lookup.
The fastest way to decode is to simply build a lookup table whose
size is determined by the longest code. However, the time it takes
to build this table can also be a factor if the data being decoded
is not very long. The most common codes are necessarily the
shortest codes, so those codes dominate the decoding time, and hence
the speed. The idea is you can have a shorter table that decodes the
shorter, more probable codes, and then point to subsidiary tables for
the longer codes. The time it costs to decode the longer codes is
then traded against the time it takes to make longer tables.
This results of this trade are in the variables lbits and dbits
below. lbits is the number of bits the first level table for literal/
length codes can decode in one step, and dbits is the same thing for
the distance codes. Subsequent tables are also less than or equal to
those sizes. These values may be adjusted either when all of the
codes are shorter than that, in which case the longest code length in
bits is used, or when the shortest code is *longer* than the requested
table size, in which case the length of the shortest code in bits is
used.
There are two different values for the two tables, since they code a
different number of possibilities each. The literal/length table
codes 286 possible values, or in a flat code, a little over eight
bits. The distance table codes 30 possible values, or a little less
than five bits, flat. The optimum values for speed end up being
about one bit more than those, so lbits is 8+1 and dbits is 5+1.
The optimum values may differ though from machine to machine, and
possibly even between compilers. Your mileage may vary.
*/
STATIC const int lbits = 9; /* bits in base literal/length lookup table */
STATIC const int dbits = 6; /* bits in base distance lookup table */
/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
#define BMAX 16 /* maximum bit length of any code (16 for explode) */
#define N_MAX 288 /* maximum number of codes in any set */
STATIC unsigned hufts; /* track memory usage */
STATIC int INIT huft_build(
unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
unsigned n, /* number of codes (assumed <= N_MAX) */
unsigned s, /* number of simple-valued codes (0..s-1) */
const ush *d, /* list of base values for non-simple codes */
const ush *e, /* list of extra bits for non-simple codes */
struct huft **t, /* result: starting table */
int *m /* maximum lookup bits, returns actual */
)
/* Given a list of code lengths and a maximum table size, make a set of
tables to decode that set of codes. Return zero on success, one if
the given code set is incomplete (the tables are still built in this
case), two if the input is invalid (all zero length codes or an
oversubscribed set of lengths), and three if not enough memory. */
{
unsigned a; /* counter for codes of length k */
unsigned f; /* i repeats in table every f entries */
int g; /* maximum code length */
int h; /* table level */
register unsigned i; /* counter, current code */
register unsigned j; /* counter */
register int k; /* number of bits in current code */
int l; /* bits per table (returned in m) */
register unsigned *p; /* pointer into c[], b[], or v[] */
register struct huft *q; /* points to current table */
struct huft r; /* table entry for structure assignment */
register int w; /* bits before this table == (l * h) */
unsigned *xp; /* pointer into x */
int y; /* number of dummy codes added */
unsigned z; /* number of entries in current table */
struct {
unsigned c[BMAX+1]; /* bit length count table */
struct huft *u[BMAX]; /* table stack */
unsigned v[N_MAX]; /* values in order of bit length */
unsigned x[BMAX+1]; /* bit offsets, then code stack */
} *stk;
unsigned *c, *v, *x;
struct huft **u;
int ret;
DEBG("huft1 ");
stk = malloc(sizeof(*stk));
if (stk == NULL)
return 3; /* out of memory */
c = stk->c;
v = stk->v;
x = stk->x;
u = stk->u;
/* Generate counts for each bit length */
memzero(stk->c, sizeof(stk->c));
p = b; i = n;
do {
Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
n-i, *p));
c[*p]++; /* assume all entries <= BMAX */
p++; /* Can't combine with above line (Solaris bug) */
} while (--i);
if (c[0] == n) /* null input--all zero length codes */
{
*t = (struct huft *)NULL;
*m = 0;
ret = 2;
goto out;
}
DEBG("huft2 ");
/* Find minimum and maximum length, bound *m by those */
l = *m;
for (j = 1; j <= BMAX; j++)
if (c[j])
break;
k = j; /* minimum code length */
if ((unsigned)l < j)
l = j;
for (i = BMAX; i; i--)
if (c[i])
break;
g = i; /* maximum code length */
if ((unsigned)l > i)
l = i;
*m = l;
DEBG("huft3 ");
/* Adjust last length count to fill out codes, if needed */
for (y = 1 << j; j < i; j++, y <<= 1)
if ((y -= c[j]) < 0) {
ret = 2; /* bad input: more codes than bits */
goto out;
}
if ((y -= c[i]) < 0) {
ret = 2;
goto out;
}
c[i] += y;
DEBG("huft4 ");
/* Generate starting offsets into the value table for each length */
x[1] = j = 0;
p = c + 1; xp = x + 2;
while (--i) { /* note that i == g from above */
*xp++ = (j += *p++);
}
DEBG("huft5 ");
/* Make a table of values in order of bit lengths */
p = b; i = 0;
do {
if ((j = *p++) != 0)
v[x[j]++] = i;
} while (++i < n);
n = x[g]; /* set n to length of v */
DEBG("h6 ");
/* Generate the Huffman codes and for each, make the table entries */
x[0] = i = 0; /* first Huffman code is zero */
p = v; /* grab values in bit order */
h = -1; /* no tables yet--level -1 */
w = -l; /* bits decoded == (l * h) */
u[0] = (struct huft *)NULL; /* just to keep compilers happy */
q = (struct huft *)NULL; /* ditto */
z = 0; /* ditto */
DEBG("h6a ");
/* go through the bit lengths (k already is bits in shortest code) */
for (; k <= g; k++)
{
DEBG("h6b ");
a = c[k];
while (a--)
{
DEBG("h6b1 ");
/* here i is the Huffman code of length k bits for value *p */
/* make tables up to required level */
while (k > w + l)
{
DEBG1("1 ");
h++;
w += l; /* previous table always l bits */
/* compute minimum size table less than or equal to l bits */
z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */
if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
{ /* too few codes for k-w bit table */
DEBG1("2 ");
f -= a + 1; /* deduct codes from patterns left */
xp = c + k;
if (j < z)
while (++j < z) /* try smaller tables up to z bits */
{
if ((f <<= 1) <= *++xp)
break; /* enough codes to use up j bits */
f -= *xp; /* else deduct codes from patterns */
}
}
DEBG1("3 ");
z = 1 << j; /* table entries for j-bit table */
/* allocate and link in new table */
if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
(struct huft *)NULL)
{
if (h)
huft_free(u[0]);
ret = 3; /* not enough memory */
goto out;
}
DEBG1("4 ");
hufts += z + 1; /* track memory usage */
*t = q + 1; /* link to list for huft_free() */
*(t = &(q->v.t)) = (struct huft *)NULL;
u[h] = ++q; /* table starts after link */
DEBG1("5 ");
/* connect to last table, if there is one */
if (h)
{
x[h] = i; /* save pattern for backing up */
r.b = (uch)l; /* bits to dump before this table */
r.e = (uch)(16 + j); /* bits in this table */
r.v.t = q; /* pointer to this table */
j = i >> (w - l); /* (get around Turbo C bug) */
u[h-1][j] = r; /* connect to last table */
}
DEBG1("6 ");
}
DEBG("h6c ");
/* set up table entry in r */
r.b = (uch)(k - w);
if (p >= v + n)
r.e = 99; /* out of values--invalid code */
else if (*p < s)
{
r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
r.v.n = (ush)(*p); /* simple code is just the value */
p++; /* one compiler does not like *p++ */
}
else
{
r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
r.v.n = d[*p++ - s];
}
DEBG("h6d ");
/* fill code-like entries with r */
f = 1 << (k - w);
for (j = i >> w; j < z; j += f)
q[j] = r;
/* backwards increment the k-bit code i */
for (j = 1 << (k - 1); i & j; j >>= 1)
i ^= j;
i ^= j;
/* backup over finished tables */
while ((i & ((1 << w) - 1)) != x[h])
{
h--; /* don't need to update q */
w -= l;
}
DEBG("h6e ");
}
DEBG("h6f ");
}
DEBG("huft7 ");
/* Return true (1) if we were given an incomplete table */
ret = y != 0 && g != 1;
out:
free(stk);
return ret;
}
STATIC int INIT huft_free(
struct huft *t /* table to free */
)
/* Free the malloc'ed tables built by huft_build(), which makes a linked
list of the tables it made, with the links in a dummy first entry of
each table. */
{
register struct huft *p, *q;
/* Go through linked list, freeing from the malloced (t[-1]) address. */
p = t;
while (p != (struct huft *)NULL)
{
q = (--p)->v.t;
free((char*)p);
p = q;
}
return 0;
}
STATIC int INIT inflate_codes(
struct huft *tl, /* literal/length decoder tables */
struct huft *td, /* distance decoder tables */
int bl, /* number of bits decoded by tl[] */
int bd /* number of bits decoded by td[] */
)
/* inflate (decompress) the codes in a deflated (compressed) block.
Return an error code or zero if it all goes ok. */
{
register unsigned e; /* table entry flag/number of extra bits */
unsigned n, d; /* length and index for copy */
unsigned w; /* current window position */
struct huft *t; /* pointer to table entry */
unsigned ml, md; /* masks for bl and bd bits */
register ulg b; /* bit buffer */
register unsigned k; /* number of bits in bit buffer */
/* make local copies of globals */
b = bb; /* initialize bit buffer */
k = bk;
w = wp; /* initialize window position */
/* inflate the coded data */
ml = mask_bits[bl]; /* precompute masks for speed */
md = mask_bits[bd];
for (;;) /* do until end of block */
{
NEEDBITS((unsigned)bl)
if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
do {
if (e == 99)
return 1;
DUMPBITS(t->b)
e -= 16;
NEEDBITS(e)
} while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
DUMPBITS(t->b)
if (e == 16) /* then it's a literal */
{
slide[w++] = (uch)t->v.n;
Tracevv((stderr, "%c", slide[w-1]));
if (w == WSIZE)
{
flush_output(w);
w = 0;
}
}
else /* it's an EOB or a length */
{
/* exit if end of block */
if (e == 15)
break;
/* get length of block to copy */
NEEDBITS(e)
n = t->v.n + ((unsigned)b & mask_bits[e]);
DUMPBITS(e);
/* decode distance of block to copy */
NEEDBITS((unsigned)bd)
if ((e = (t = td + ((unsigned)b & md))->e) > 16)
do {
if (e == 99)
return 1;
DUMPBITS(t->b)
e -= 16;
NEEDBITS(e)
} while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
DUMPBITS(t->b)
NEEDBITS(e)
d = w - t->v.n - ((unsigned)b & mask_bits[e]);
DUMPBITS(e)
Tracevv((stderr,"\\[%d,%d]", w-d, n));
/* do the copy */
do {
n -= (e = (e = WSIZE - ((d &= WSIZE-1) > w ? d : w)) > n ? n : e);
#if !defined(NOMEMCPY) && !defined(DEBUG)
if (w - d >= e) /* (this test assumes unsigned comparison) */
{
memcpy(slide + w, slide + d, e);
w += e;
d += e;
}
else /* do it slow to avoid memcpy() overlap */
#endif /* !NOMEMCPY */
do {
slide[w++] = slide[d++];
Tracevv((stderr, "%c", slide[w-1]));
} while (--e);
if (w == WSIZE)
{
flush_output(w);
w = 0;
}
} while (n);
}
}
/* restore the globals from the locals */
wp = w; /* restore global window pointer */
bb = b; /* restore global bit buffer */
bk = k;
/* done */
return 0;
underrun:
return 4; /* Input underrun */
}
STATIC int INIT inflate_stored(void)
/* "decompress" an inflated type 0 (stored) block. */
{
unsigned n; /* number of bytes in block */
unsigned w; /* current window position */
register ulg b; /* bit buffer */
register unsigned k; /* number of bits in bit buffer */
DEBG("<stor");
/* make local copies of globals */
b = bb; /* initialize bit buffer */
k = bk;
w = wp; /* initialize window position */
/* go to byte boundary */
n = k & 7;
DUMPBITS(n);
/* get the length and its complement */
NEEDBITS(16)
n = ((unsigned)b & 0xffff);
DUMPBITS(16)
NEEDBITS(16)
if (n != (unsigned)((~b) & 0xffff))
return 1; /* error in compressed data */
DUMPBITS(16)
/* read and output the compressed data */
while (n--)
{
NEEDBITS(8)
slide[w++] = (uch)b;
if (w == WSIZE)
{
flush_output(w);
w = 0;
}
DUMPBITS(8)
}
/* restore the globals from the locals */
wp = w; /* restore global window pointer */
bb = b; /* restore global bit buffer */
bk = k;
DEBG(">");
return 0;
underrun:
return 4; /* Input underrun */
}
/*
* We use `noinline' here to prevent gcc-3.5 from using too much stack space
*/
STATIC int noinline INIT inflate_fixed(void)
/* decompress an inflated type 1 (fixed Huffman codes) block. We should
either replace this with a custom decoder, or at least precompute the
Huffman tables. */
{
int i; /* temporary variable */
struct huft *tl; /* literal/length code table */
struct huft *td; /* distance code table */
int bl; /* lookup bits for tl */
int bd; /* lookup bits for td */
unsigned *l; /* length list for huft_build */
DEBG("<fix");
l = malloc(sizeof(*l) * 288);
if (l == NULL)
return 3; /* out of memory */
/* set up literal table */
for (i = 0; i < 144; i++)
l[i] = 8;
for (; i < 256; i++)
l[i] = 9;
for (; i < 280; i++)
l[i] = 7;
for (; i < 288; i++) /* make a complete, but wrong code set */
l[i] = 8;
bl = 7;
if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0) {
free(l);
return i;
}
/* set up distance table */
for (i = 0; i < 30; i++) /* make an incomplete code set */
l[i] = 5;
bd = 5;
if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1)
{
huft_free(tl);
free(l);
DEBG(">");
return i;
}
/* decompress until an end-of-block code */
if (inflate_codes(tl, td, bl, bd)) {
free(l);
return 1;
}
/* free the decoding tables, return */
free(l);
huft_free(tl);
huft_free(td);
return 0;
}
/*
* We use `noinline' here to prevent gcc-3.5 from using too much stack space
*/
STATIC int noinline INIT inflate_dynamic(void)
/* decompress an inflated type 2 (dynamic Huffman codes) block. */
{
int i; /* temporary variables */
unsigned j;
unsigned l; /* last length */
unsigned m; /* mask for bit lengths table */
unsigned n; /* number of lengths to get */
struct huft *tl; /* literal/length code table */
struct huft *td; /* distance code table */
int bl; /* lookup bits for tl */
int bd; /* lookup bits for td */
unsigned nb; /* number of bit length codes */
unsigned nl; /* number of literal/length codes */
unsigned nd; /* number of distance codes */
unsigned *ll; /* literal/length and distance code lengths */
register ulg b; /* bit buffer */
register unsigned k; /* number of bits in bit buffer */
int ret;
DEBG("<dyn");
#ifdef PKZIP_BUG_WORKAROUND
ll = malloc(sizeof(*ll) * (288+32)); /* literal/length and distance code lengths */
#else
ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */
#endif
if (ll == NULL)
return 1;
/* make local bit buffer */
b = bb;
k = bk;
/* read in table lengths */
NEEDBITS(5)
nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */
DUMPBITS(5)
NEEDBITS(5)
nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */
DUMPBITS(5)
NEEDBITS(4)
nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */
DUMPBITS(4)
#ifdef PKZIP_BUG_WORKAROUND
if (nl > 288 || nd > 32)
#else
if (nl > 286 || nd > 30)
#endif
{
ret = 1; /* bad lengths */
goto out;
}
DEBG("dyn1 ");
/* read in bit-length-code lengths */
for (j = 0; j < nb; j++)
{
NEEDBITS(3)
ll[border[j]] = (unsigned)b & 7;
DUMPBITS(3)
}
for (; j < 19; j++)
ll[border[j]] = 0;
DEBG("dyn2 ");
/* build decoding table for trees--single level, 7 bit lookup */
bl = 7;
if ((i = huft_build(ll, 19, 19, NULL, NULL, &tl, &bl)) != 0)
{
if (i == 1)
huft_free(tl);
ret = i; /* incomplete code set */
goto out;
}
DEBG("dyn3 ");
/* read in literal and distance code lengths */
n = nl + nd;
m = mask_bits[bl];
i = l = 0;
while ((unsigned)i < n)
{
NEEDBITS((unsigned)bl)
j = (td = tl + ((unsigned)b & m))->b;
DUMPBITS(j)
j = td->v.n;
if (j < 16) /* length of code in bits (0..15) */
ll[i++] = l = j; /* save last length in l */
else if (j == 16) /* repeat last length 3 to 6 times */
{
NEEDBITS(2)
j = 3 + ((unsigned)b & 3);
DUMPBITS(2)
if ((unsigned)i + j > n) {
ret = 1;
goto out;
}
while (j--)
ll[i++] = l;
}
else if (j == 17) /* 3 to 10 zero length codes */
{
NEEDBITS(3)
j = 3 + ((unsigned)b & 7);
DUMPBITS(3)
if ((unsigned)i + j > n) {
ret = 1;
goto out;
}
while (j--)
ll[i++] = 0;
l = 0;
}
else /* j == 18: 11 to 138 zero length codes */
{
NEEDBITS(7)
j = 11 + ((unsigned)b & 0x7f);
DUMPBITS(7)
if ((unsigned)i + j > n) {
ret = 1;
goto out;
}
while (j--)
ll[i++] = 0;
l = 0;
}
}
DEBG("dyn4 ");
/* free decoding table for trees */
huft_free(tl);
DEBG("dyn5 ");
/* restore the global bit buffer */
bb = b;
bk = k;
DEBG("dyn5a ");
/* build the decoding tables for literal/length and distance codes */
bl = lbits;
if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0)
{
DEBG("dyn5b ");
if (i == 1) {
error("incomplete literal tree");
huft_free(tl);
}
ret = i; /* incomplete code set */
goto out;
}
DEBG("dyn5c ");
bd = dbits;
if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
{
DEBG("dyn5d ");
if (i == 1) {
error("incomplete distance tree");
#ifdef PKZIP_BUG_WORKAROUND
i = 0;
}
#else
huft_free(td);
}
huft_free(tl);
ret = i; /* incomplete code set */
goto out;
#endif
}
DEBG("dyn6 ");
/* decompress until an end-of-block code */
if (inflate_codes(tl, td, bl, bd)) {
ret = 1;
goto out;
}
DEBG("dyn7 ");
/* free the decoding tables, return */
huft_free(tl);
huft_free(td);
DEBG(">");
ret = 0;
out:
free(ll);
return ret;
underrun:
ret = 4; /* Input underrun */
goto out;
}
STATIC int INIT inflate_block(
int *e /* last block flag */
)
/* decompress an inflated block */
{
unsigned t; /* block type */
register ulg b; /* bit buffer */
register unsigned k; /* number of bits in bit buffer */
DEBG("<blk");
/* make local bit buffer */
b = bb;
k = bk;
/* read in last block bit */
NEEDBITS(1)
*e = (int)b & 1;
DUMPBITS(1)
/* read in block type */
NEEDBITS(2)
t = (unsigned)b & 3;
DUMPBITS(2)
/* restore the global bit buffer */
bb = b;
bk = k;
/* inflate that block type */
if (t == 2)
return inflate_dynamic();
if (t == 0)
return inflate_stored();
if (t == 1)
return inflate_fixed();
DEBG(">");
/* bad block type */
return 2;
underrun:
return 4; /* Input underrun */
}
STATIC int INIT inflate(void)
/* decompress an inflated entry */
{
int e; /* last block flag */
int r; /* result code */
unsigned h; /* maximum struct huft's malloc'ed */
/* initialize window, bit buffer */
wp = 0;
bk = 0;
bb = 0;
/* decompress until the last block */
h = 0;
do {
hufts = 0;
#ifdef ARCH_HAS_DECOMP_WDOG
arch_decomp_wdog();
#endif
r = inflate_block(&e);
if (r)
return r;
if (hufts > h)
h = hufts;
} while (!e);
/* Undo too much lookahead. The next read will be byte aligned so we
* can discard unused bits in the last meaningful byte.
*/
while (bk >= 8) {
bk -= 8;
inptr--;
}
/* flush out slide */
flush_output(wp);
/* return success */
#ifdef DEBUG
fprintf(stderr, "<%u> ", h);
#endif /* DEBUG */
return 0;
}
/**********************************************************************
*
* The following are support routines for inflate.c
*
**********************************************************************/
static ulg crc_32_tab[256];
static ulg crc; /* initialized in makecrc() so it'll reside in bss */
#define CRC_VALUE (crc ^ 0xffffffffUL)
/*
* Code to compute the CRC-32 table. Borrowed from
* gzip-1.0.3/makecrc.c.
*/
static void INIT
makecrc(void)
{
/* Not copyrighted 1990 Mark Adler */
unsigned long c; /* crc shift register */
unsigned long e; /* polynomial exclusive-or pattern */
int i; /* counter for all possible eight bit values */
int k; /* byte being shifted into crc apparatus */
/* terms of polynomial defining this crc (except x^32): */
static const int p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
/* Make exclusive-or pattern from polynomial */
e = 0;
for (i = 0; i < sizeof(p)/sizeof(int); i++)
e |= 1L << (31 - p[i]);
crc_32_tab[0] = 0;
for (i = 1; i < 256; i++)
{
c = 0;
for (k = i | 256; k != 1; k >>= 1)
{
c = c & 1 ? (c >> 1) ^ e : c >> 1;
if (k & 1)
c ^= e;
}
crc_32_tab[i] = c;
}
/* this is initialized here so this code could reside in ROM */
crc = (ulg)0xffffffffUL; /* shift register contents */
}
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
#define RESERVED 0xC0 /* bit 6,7: reserved */
/*
* Do the uncompression!
*/
static int INIT gunzip(void)
{
uch flags;
unsigned char magic[2]; /* magic header */
char method;
ulg orig_crc = 0; /* original crc */
ulg orig_len = 0; /* original uncompressed length */
int res;
magic[0] = NEXTBYTE();
magic[1] = NEXTBYTE();
method = NEXTBYTE();
if (magic[0] != 037 ||
((magic[1] != 0213) && (magic[1] != 0236))) {
error("bad gzip magic numbers");
return -1;
}
/* We only support method #8, DEFLATED */
if (method != 8) {
error("internal error, invalid method");
return -1;
}
flags = (uch)get_byte();
if ((flags & ENCRYPTED) != 0) {
error("Input is encrypted");
return -1;
}
if ((flags & CONTINUATION) != 0) {
error("Multi part input");
return -1;
}
if ((flags & RESERVED) != 0) {
error("Input has invalid flags");
return -1;
}
NEXTBYTE(); /* Get timestamp */
NEXTBYTE();
NEXTBYTE();
NEXTBYTE();
(void)NEXTBYTE(); /* Ignore extra flags for the moment */
(void)NEXTBYTE(); /* Ignore OS type for the moment */
if ((flags & EXTRA_FIELD) != 0) {
unsigned len = (unsigned)NEXTBYTE();
len |= ((unsigned)NEXTBYTE())<<8;
while (len--) (void)NEXTBYTE();
}
/* Get original file name if it was truncated */
if ((flags & ORIG_NAME) != 0) {
/* Discard the old name */
while (NEXTBYTE() != 0) /* null */ ;
}
/* Discard file comment if any */
if ((flags & COMMENT) != 0) {
while (NEXTBYTE() != 0) /* null */ ;
}
/* Decompress */
if ((res = inflate())) {
switch (res) {
case 0:
break;
case 1:
error("invalid compressed format (err=1)");
break;
case 2:
error("invalid compressed format (err=2)");
break;
case 3:
error("out of memory");
break;
case 4:
error("out of input data");
break;
default:
error("invalid compressed format (other)");
}
return -1;
}
/* Get the crc and original length */
/* crc32 (see algorithm.doc)
* uncompressed input size modulo 2^32
*/
orig_crc = (ulg) NEXTBYTE();
orig_crc |= (ulg) NEXTBYTE() << 8;
orig_crc |= (ulg) NEXTBYTE() << 16;
orig_crc |= (ulg) NEXTBYTE() << 24;
orig_len = (ulg) NEXTBYTE();
orig_len |= (ulg) NEXTBYTE() << 8;
orig_len |= (ulg) NEXTBYTE() << 16;
orig_len |= (ulg) NEXTBYTE() << 24;
/* Validate decompression */
if (orig_crc != CRC_VALUE) {
error("crc error");
return -1;
}
if (orig_len != bytes_out) {
error("length error");
return -1;
}
return 0;
underrun: /* NEXTBYTE() goto's here if needed */
error("out of input data");
return -1;
}
| linux-master | lib/inflate.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bcd.h>
#include <linux/export.h>
unsigned _bcd2bin(unsigned char val)
{
return (val & 0x0f) + (val >> 4) * 10;
}
EXPORT_SYMBOL(_bcd2bin);
unsigned char _bin2bcd(unsigned val)
{
return ((val / 10) << 4) + val % 10;
}
EXPORT_SYMBOL(_bin2bcd);
| linux-master | lib/bcd.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
/**
* cpumask_next_wrap - helper to implement for_each_cpu_wrap
* @n: the cpu prior to the place to search
* @mask: the cpumask pointer
* @start: the start point of the iteration
* @wrap: assume @n crossing @start terminates the iteration
*
* Returns >= nr_cpu_ids on completion
*
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
*/
unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
{
unsigned int next;
again:
next = cpumask_next(n, mask);
if (wrap && n < start && next >= start) {
return nr_cpumask_bits;
} else if (next >= nr_cpumask_bits) {
wrap = true;
n = -1;
goto again;
}
return next;
}
EXPORT_SYMBOL(cpumask_next_wrap);
/* These are not inline because of header tangles. */
#ifdef CONFIG_CPUMASK_OFFSTACK
/**
* alloc_cpumask_var_node - allocate a struct cpumask on a given node
* @mask: pointer to cpumask_var_t where the cpumask is returned
* @flags: GFP_ flags
* @node: memory node from which to allocate or %NUMA_NO_NODE
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop returning a constant 1 (in <linux/cpumask.h>)
* Returns TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
* CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
* too.
*/
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
*mask = kmalloc_node(cpumask_size(), flags, node);
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) {
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
dump_stack();
}
#endif
return *mask != NULL;
}
EXPORT_SYMBOL(alloc_cpumask_var_node);
/**
* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
* @mask: pointer to cpumask_var_t where the cpumask is returned
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
* a nop (in <linux/cpumask.h>).
* Either returns an allocated (zero-filled) cpumask, or causes the
* system to panic.
*/
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
if (!*mask)
panic("%s: Failed to allocate %u bytes\n", __func__,
cpumask_size());
}
/**
* free_cpumask_var - frees memory allocated for a struct cpumask.
* @mask: cpumask to free
*
* This is safe on a NULL mask.
*/
void free_cpumask_var(cpumask_var_t mask)
{
kfree(mask);
}
EXPORT_SYMBOL(free_cpumask_var);
/**
* free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
* @mask: cpumask to free
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
memblock_free(mask, cpumask_size());
}
#endif
/**
* cpumask_local_spread - select the i'th cpu based on NUMA distances
* @i: index number
* @node: local numa_node
*
* Returns online CPU according to a numa aware policy; local cpus are returned
* first, followed by non-local ones, then it wraps around.
*
* For those who wants to enumerate all CPUs based on their NUMA distances,
* i.e. call this function in a loop, like:
*
* for (i = 0; i < num_online_cpus(); i++) {
* cpu = cpumask_local_spread(i, node);
* do_something(cpu);
* }
*
* There's a better alternative based on for_each()-like iterators:
*
* for_each_numa_hop_mask(mask, node) {
* for_each_cpu_andnot(cpu, mask, prev)
* do_something(cpu);
* prev = mask;
* }
*
* It's simpler and more verbose than above. Complexity of iterator-based
* enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
* cpumask_local_spread() when called for each cpu is
* O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
unsigned int cpu;
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
cpu = (node == NUMA_NO_NODE) ?
cpumask_nth(i, cpu_online_mask) :
sched_numa_find_nth_cpu(cpu_online_mask, i, node);
WARN_ON(cpu >= nr_cpu_ids);
return cpu;
}
EXPORT_SYMBOL(cpumask_local_spread);
static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
/**
* cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
* @src1p: first &cpumask for intersection
* @src2p: second &cpumask for intersection
*
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
*
* Returns >= nr_cpu_ids if the intersection is empty.
*/
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
{
unsigned int next, prev;
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
nr_cpumask_bits, prev + 1);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
return next;
}
EXPORT_SYMBOL(cpumask_any_and_distribute);
unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
unsigned int next, prev;
/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);
next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);
return next;
}
EXPORT_SYMBOL(cpumask_any_distribute);
| linux-master | lib/cpumask.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for sscanf facility.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "../tools/testing/selftests/kselftest_module.h"
#define BUF_SIZE 1024
KSTM_MODULE_GLOBALS();
static char *test_buffer __initdata;
static char *fmt_buffer __initdata;
static struct rnd_state rnd_state __initdata;
typedef int (*check_fn)(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap);
static void __scanf(4, 6) __init
_test(check_fn fn, const void *check_data, const char *string, const char *fmt,
int n_args, ...)
{
va_list ap, ap_copy;
int ret;
total_tests++;
va_start(ap, n_args);
va_copy(ap_copy, ap);
ret = vsscanf(string, fmt, ap_copy);
va_end(ap_copy);
if (ret != n_args) {
pr_warn("vsscanf(\"%s\", \"%s\", ...) returned %d expected %d\n",
string, fmt, ret, n_args);
goto fail;
}
ret = (*fn)(check_data, string, fmt, n_args, ap);
if (ret)
goto fail;
va_end(ap);
return;
fail:
failed_tests++;
va_end(ap);
}
#define _check_numbers_template(arg_fmt, expect, str, fmt, n_args, ap) \
do { \
pr_debug("\"%s\", \"%s\" ->\n", str, fmt); \
for (; n_args > 0; n_args--, expect++) { \
typeof(*expect) got = *va_arg(ap, typeof(expect)); \
pr_debug("\t" arg_fmt "\n", got); \
if (got != *expect) { \
pr_warn("vsscanf(\"%s\", \"%s\", ...) expected " arg_fmt " got " arg_fmt "\n", \
str, fmt, *expect, got); \
return 1; \
} \
} \
return 0; \
} while (0)
static int __init check_ull(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const unsigned long long *pval = check_data;
_check_numbers_template("%llu", pval, string, fmt, n_args, ap);
}
static int __init check_ll(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const long long *pval = check_data;
_check_numbers_template("%lld", pval, string, fmt, n_args, ap);
}
static int __init check_ulong(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const unsigned long *pval = check_data;
_check_numbers_template("%lu", pval, string, fmt, n_args, ap);
}
static int __init check_long(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const long *pval = check_data;
_check_numbers_template("%ld", pval, string, fmt, n_args, ap);
}
static int __init check_uint(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const unsigned int *pval = check_data;
_check_numbers_template("%u", pval, string, fmt, n_args, ap);
}
static int __init check_int(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const int *pval = check_data;
_check_numbers_template("%d", pval, string, fmt, n_args, ap);
}
static int __init check_ushort(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const unsigned short *pval = check_data;
_check_numbers_template("%hu", pval, string, fmt, n_args, ap);
}
static int __init check_short(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const short *pval = check_data;
_check_numbers_template("%hd", pval, string, fmt, n_args, ap);
}
static int __init check_uchar(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const unsigned char *pval = check_data;
_check_numbers_template("%hhu", pval, string, fmt, n_args, ap);
}
static int __init check_char(const void *check_data, const char *string,
const char *fmt, int n_args, va_list ap)
{
const signed char *pval = check_data;
_check_numbers_template("%hhd", pval, string, fmt, n_args, ap);
}
/* Selection of interesting numbers to test, copied from test-kstrtox.c */
static const unsigned long long numbers[] __initconst = {
0x0ULL,
0x1ULL,
0x7fULL,
0x80ULL,
0x81ULL,
0xffULL,
0x100ULL,
0x101ULL,
0x7fffULL,
0x8000ULL,
0x8001ULL,
0xffffULL,
0x10000ULL,
0x10001ULL,
0x7fffffffULL,
0x80000000ULL,
0x80000001ULL,
0xffffffffULL,
0x100000000ULL,
0x100000001ULL,
0x7fffffffffffffffULL,
0x8000000000000000ULL,
0x8000000000000001ULL,
0xfffffffffffffffeULL,
0xffffffffffffffffULL,
};
#define value_representable_in_type(T, val) \
(is_signed_type(T) \
? ((long long)(val) >= type_min(T)) && ((long long)(val) <= type_max(T)) \
: ((unsigned long long)(val) <= type_max(T)))
#define test_one_number(T, gen_fmt, scan_fmt, val, fn) \
do { \
const T expect_val = (T)(val); \
T result = ~expect_val; /* should be overwritten */ \
\
snprintf(test_buffer, BUF_SIZE, gen_fmt, expect_val); \
_test(fn, &expect_val, test_buffer, "%" scan_fmt, 1, &result); \
} while (0)
#define simple_numbers_loop(T, gen_fmt, scan_fmt, fn) \
do { \
int i; \
\
for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
if (value_representable_in_type(T, numbers[i])) \
test_one_number(T, gen_fmt, scan_fmt, \
numbers[i], fn); \
\
if (value_representable_in_type(T, -numbers[i])) \
test_one_number(T, gen_fmt, scan_fmt, \
-numbers[i], fn); \
} \
} while (0)
static void __init numbers_simple(void)
{
simple_numbers_loop(unsigned long long, "%llu", "llu", check_ull);
simple_numbers_loop(long long, "%lld", "lld", check_ll);
simple_numbers_loop(long long, "%lld", "lli", check_ll);
simple_numbers_loop(unsigned long long, "%llx", "llx", check_ull);
simple_numbers_loop(long long, "%llx", "llx", check_ll);
simple_numbers_loop(long long, "0x%llx", "lli", check_ll);
simple_numbers_loop(unsigned long long, "0x%llx", "llx", check_ull);
simple_numbers_loop(long long, "0x%llx", "llx", check_ll);
simple_numbers_loop(unsigned long, "%lu", "lu", check_ulong);
simple_numbers_loop(long, "%ld", "ld", check_long);
simple_numbers_loop(long, "%ld", "li", check_long);
simple_numbers_loop(unsigned long, "%lx", "lx", check_ulong);
simple_numbers_loop(long, "%lx", "lx", check_long);
simple_numbers_loop(long, "0x%lx", "li", check_long);
simple_numbers_loop(unsigned long, "0x%lx", "lx", check_ulong);
simple_numbers_loop(long, "0x%lx", "lx", check_long);
simple_numbers_loop(unsigned int, "%u", "u", check_uint);
simple_numbers_loop(int, "%d", "d", check_int);
simple_numbers_loop(int, "%d", "i", check_int);
simple_numbers_loop(unsigned int, "%x", "x", check_uint);
simple_numbers_loop(int, "%x", "x", check_int);
simple_numbers_loop(int, "0x%x", "i", check_int);
simple_numbers_loop(unsigned int, "0x%x", "x", check_uint);
simple_numbers_loop(int, "0x%x", "x", check_int);
simple_numbers_loop(unsigned short, "%hu", "hu", check_ushort);
simple_numbers_loop(short, "%hd", "hd", check_short);
simple_numbers_loop(short, "%hd", "hi", check_short);
simple_numbers_loop(unsigned short, "%hx", "hx", check_ushort);
simple_numbers_loop(short, "%hx", "hx", check_short);
simple_numbers_loop(short, "0x%hx", "hi", check_short);
simple_numbers_loop(unsigned short, "0x%hx", "hx", check_ushort);
simple_numbers_loop(short, "0x%hx", "hx", check_short);
simple_numbers_loop(unsigned char, "%hhu", "hhu", check_uchar);
simple_numbers_loop(signed char, "%hhd", "hhd", check_char);
simple_numbers_loop(signed char, "%hhd", "hhi", check_char);
simple_numbers_loop(unsigned char, "%hhx", "hhx", check_uchar);
simple_numbers_loop(signed char, "%hhx", "hhx", check_char);
simple_numbers_loop(signed char, "0x%hhx", "hhi", check_char);
simple_numbers_loop(unsigned char, "0x%hhx", "hhx", check_uchar);
simple_numbers_loop(signed char, "0x%hhx", "hhx", check_char);
}
/*
* This gives a better variety of number "lengths" in a small sample than
* the raw prandom*() functions (Not mathematically rigorous!!).
* Variabilty of length and value is more important than perfect randomness.
*/
static u32 __init next_test_random(u32 max_bits)
{
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
}
static unsigned long long __init next_test_random_ull(void)
{
u32 rand1 = prandom_u32_state(&rnd_state);
u32 n_bits = (hweight32(rand1) * 3) % 64;
u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
return val & GENMASK_ULL(n_bits, 0);
}
#define random_for_type(T) \
((T)(sizeof(T) <= sizeof(u32) \
? next_test_random(BITS_PER_TYPE(T)) \
: next_test_random_ull()))
/*
* Define a pattern of negative and positive numbers to ensure we get
* some of both within the small number of samples in a test string.
*/
#define NEGATIVES_PATTERN 0x3246 /* 00110010 01000110 */
#define fill_random_array(arr) \
do { \
unsigned int neg_pattern = NEGATIVES_PATTERN; \
int i; \
\
for (i = 0; i < ARRAY_SIZE(arr); i++, neg_pattern >>= 1) { \
(arr)[i] = random_for_type(typeof((arr)[0])); \
if (is_signed_type(typeof((arr)[0])) && (neg_pattern & 1)) \
(arr)[i] = -(arr)[i]; \
} \
} while (0)
/*
* Convenience wrapper around snprintf() to append at buf_pos in buf,
* updating buf_pos and returning the number of characters appended.
* On error buf_pos is not changed and return value is 0.
*/
static int __init __printf(4, 5)
append_fmt(char *buf, int *buf_pos, int buf_len, const char *val_fmt, ...)
{
va_list ap;
int field_len;
va_start(ap, val_fmt);
field_len = vsnprintf(buf + *buf_pos, buf_len - *buf_pos, val_fmt, ap);
va_end(ap);
if (field_len < 0)
field_len = 0;
*buf_pos += field_len;
return field_len;
}
/*
* Convenience function to append the field delimiter string
* to both the value string and format string buffers.
*/
static void __init append_delim(char *str_buf, int *str_buf_pos, int str_buf_len,
char *fmt_buf, int *fmt_buf_pos, int fmt_buf_len,
const char *delim_str)
{
append_fmt(str_buf, str_buf_pos, str_buf_len, delim_str);
append_fmt(fmt_buf, fmt_buf_pos, fmt_buf_len, delim_str);
}
#define test_array_8(fn, check_data, string, fmt, arr) \
do { \
BUILD_BUG_ON(ARRAY_SIZE(arr) != 8); \
_test(fn, check_data, string, fmt, 8, \
&(arr)[0], &(arr)[1], &(arr)[2], &(arr)[3], \
&(arr)[4], &(arr)[5], &(arr)[6], &(arr)[7]); \
} while (0)
#define numbers_list_8(T, gen_fmt, field_sep, scan_fmt, fn) \
do { \
int i, pos = 0, fmt_pos = 0; \
T expect[8], result[8]; \
\
fill_random_array(expect); \
\
for (i = 0; i < ARRAY_SIZE(expect); i++) { \
if (i != 0) \
append_delim(test_buffer, &pos, BUF_SIZE, \
fmt_buffer, &fmt_pos, BUF_SIZE, \
field_sep); \
\
append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, expect[i]); \
append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, "%%%s", scan_fmt); \
} \
\
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
#define numbers_list_fix_width(T, gen_fmt, field_sep, width, scan_fmt, fn) \
do { \
char full_fmt[16]; \
\
snprintf(full_fmt, sizeof(full_fmt), "%u%s", width, scan_fmt); \
numbers_list_8(T, gen_fmt, field_sep, full_fmt, fn); \
} while (0)
#define numbers_list_val_width(T, gen_fmt, field_sep, scan_fmt, fn) \
do { \
int i, val_len, pos = 0, fmt_pos = 0; \
T expect[8], result[8]; \
\
fill_random_array(expect); \
\
for (i = 0; i < ARRAY_SIZE(expect); i++) { \
if (i != 0) \
append_delim(test_buffer, &pos, BUF_SIZE, \
fmt_buffer, &fmt_pos, BUF_SIZE, field_sep);\
\
val_len = append_fmt(test_buffer, &pos, BUF_SIZE, gen_fmt, \
expect[i]); \
append_fmt(fmt_buffer, &fmt_pos, BUF_SIZE, \
"%%%u%s", val_len, scan_fmt); \
} \
\
test_array_8(fn, expect, test_buffer, fmt_buffer, result); \
} while (0)
static void __init numbers_list_ll(const char *delim)
{
numbers_list_8(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_8(long long, "%lld", delim, "lld", check_ll);
numbers_list_8(long long, "%lld", delim, "lli", check_ll);
numbers_list_8(unsigned long long, "%llx", delim, "llx", check_ull);
numbers_list_8(unsigned long long, "0x%llx", delim, "llx", check_ull);
numbers_list_8(long long, "0x%llx", delim, "lli", check_ll);
}
static void __init numbers_list_l(const char *delim)
{
numbers_list_8(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_8(long, "%ld", delim, "ld", check_long);
numbers_list_8(long, "%ld", delim, "li", check_long);
numbers_list_8(unsigned long, "%lx", delim, "lx", check_ulong);
numbers_list_8(unsigned long, "0x%lx", delim, "lx", check_ulong);
numbers_list_8(long, "0x%lx", delim, "li", check_long);
}
static void __init numbers_list_d(const char *delim)
{
numbers_list_8(unsigned int, "%u", delim, "u", check_uint);
numbers_list_8(int, "%d", delim, "d", check_int);
numbers_list_8(int, "%d", delim, "i", check_int);
numbers_list_8(unsigned int, "%x", delim, "x", check_uint);
numbers_list_8(unsigned int, "0x%x", delim, "x", check_uint);
numbers_list_8(int, "0x%x", delim, "i", check_int);
}
static void __init numbers_list_h(const char *delim)
{
numbers_list_8(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_8(short, "%hd", delim, "hd", check_short);
numbers_list_8(short, "%hd", delim, "hi", check_short);
numbers_list_8(unsigned short, "%hx", delim, "hx", check_ushort);
numbers_list_8(unsigned short, "0x%hx", delim, "hx", check_ushort);
numbers_list_8(short, "0x%hx", delim, "hi", check_short);
}
static void __init numbers_list_hh(const char *delim)
{
numbers_list_8(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_8(signed char, "%hhd", delim, "hhd", check_char);
numbers_list_8(signed char, "%hhd", delim, "hhi", check_char);
numbers_list_8(unsigned char, "%hhx", delim, "hhx", check_uchar);
numbers_list_8(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
numbers_list_8(signed char, "0x%hhx", delim, "hhi", check_char);
}
static void __init numbers_list(const char *delim)
{
numbers_list_ll(delim);
numbers_list_l(delim);
numbers_list_d(delim);
numbers_list_h(delim);
numbers_list_hh(delim);
}
static void __init numbers_list_field_width_ll(const char *delim)
{
numbers_list_fix_width(unsigned long long, "%llu", delim, 20, "llu", check_ull);
numbers_list_fix_width(long long, "%lld", delim, 20, "lld", check_ll);
numbers_list_fix_width(long long, "%lld", delim, 20, "lli", check_ll);
numbers_list_fix_width(unsigned long long, "%llx", delim, 16, "llx", check_ull);
numbers_list_fix_width(unsigned long long, "0x%llx", delim, 18, "llx", check_ull);
numbers_list_fix_width(long long, "0x%llx", delim, 18, "lli", check_ll);
}
static void __init numbers_list_field_width_l(const char *delim)
{
#if BITS_PER_LONG == 64
numbers_list_fix_width(unsigned long, "%lu", delim, 20, "lu", check_ulong);
numbers_list_fix_width(long, "%ld", delim, 20, "ld", check_long);
numbers_list_fix_width(long, "%ld", delim, 20, "li", check_long);
numbers_list_fix_width(unsigned long, "%lx", delim, 16, "lx", check_ulong);
numbers_list_fix_width(unsigned long, "0x%lx", delim, 18, "lx", check_ulong);
numbers_list_fix_width(long, "0x%lx", delim, 18, "li", check_long);
#else
numbers_list_fix_width(unsigned long, "%lu", delim, 10, "lu", check_ulong);
numbers_list_fix_width(long, "%ld", delim, 11, "ld", check_long);
numbers_list_fix_width(long, "%ld", delim, 11, "li", check_long);
numbers_list_fix_width(unsigned long, "%lx", delim, 8, "lx", check_ulong);
numbers_list_fix_width(unsigned long, "0x%lx", delim, 10, "lx", check_ulong);
numbers_list_fix_width(long, "0x%lx", delim, 10, "li", check_long);
#endif
}
static void __init numbers_list_field_width_d(const char *delim)
{
numbers_list_fix_width(unsigned int, "%u", delim, 10, "u", check_uint);
numbers_list_fix_width(int, "%d", delim, 11, "d", check_int);
numbers_list_fix_width(int, "%d", delim, 11, "i", check_int);
numbers_list_fix_width(unsigned int, "%x", delim, 8, "x", check_uint);
numbers_list_fix_width(unsigned int, "0x%x", delim, 10, "x", check_uint);
numbers_list_fix_width(int, "0x%x", delim, 10, "i", check_int);
}
static void __init numbers_list_field_width_h(const char *delim)
{
numbers_list_fix_width(unsigned short, "%hu", delim, 5, "hu", check_ushort);
numbers_list_fix_width(short, "%hd", delim, 6, "hd", check_short);
numbers_list_fix_width(short, "%hd", delim, 6, "hi", check_short);
numbers_list_fix_width(unsigned short, "%hx", delim, 4, "hx", check_ushort);
numbers_list_fix_width(unsigned short, "0x%hx", delim, 6, "hx", check_ushort);
numbers_list_fix_width(short, "0x%hx", delim, 6, "hi", check_short);
}
static void __init numbers_list_field_width_hh(const char *delim)
{
numbers_list_fix_width(unsigned char, "%hhu", delim, 3, "hhu", check_uchar);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhd", check_char);
numbers_list_fix_width(signed char, "%hhd", delim, 4, "hhi", check_char);
numbers_list_fix_width(unsigned char, "%hhx", delim, 2, "hhx", check_uchar);
numbers_list_fix_width(unsigned char, "0x%hhx", delim, 4, "hhx", check_uchar);
numbers_list_fix_width(signed char, "0x%hhx", delim, 4, "hhi", check_char);
}
/*
* List of numbers separated by delim. Each field width specifier is the
* maximum possible digits for the given type and base.
*/
static void __init numbers_list_field_width_typemax(const char *delim)
{
numbers_list_field_width_ll(delim);
numbers_list_field_width_l(delim);
numbers_list_field_width_d(delim);
numbers_list_field_width_h(delim);
numbers_list_field_width_hh(delim);
}
static void __init numbers_list_field_width_val_ll(const char *delim)
{
numbers_list_val_width(unsigned long long, "%llu", delim, "llu", check_ull);
numbers_list_val_width(long long, "%lld", delim, "lld", check_ll);
numbers_list_val_width(long long, "%lld", delim, "lli", check_ll);
numbers_list_val_width(unsigned long long, "%llx", delim, "llx", check_ull);
numbers_list_val_width(unsigned long long, "0x%llx", delim, "llx", check_ull);
numbers_list_val_width(long long, "0x%llx", delim, "lli", check_ll);
}
static void __init numbers_list_field_width_val_l(const char *delim)
{
numbers_list_val_width(unsigned long, "%lu", delim, "lu", check_ulong);
numbers_list_val_width(long, "%ld", delim, "ld", check_long);
numbers_list_val_width(long, "%ld", delim, "li", check_long);
numbers_list_val_width(unsigned long, "%lx", delim, "lx", check_ulong);
numbers_list_val_width(unsigned long, "0x%lx", delim, "lx", check_ulong);
numbers_list_val_width(long, "0x%lx", delim, "li", check_long);
}
static void __init numbers_list_field_width_val_d(const char *delim)
{
numbers_list_val_width(unsigned int, "%u", delim, "u", check_uint);
numbers_list_val_width(int, "%d", delim, "d", check_int);
numbers_list_val_width(int, "%d", delim, "i", check_int);
numbers_list_val_width(unsigned int, "%x", delim, "x", check_uint);
numbers_list_val_width(unsigned int, "0x%x", delim, "x", check_uint);
numbers_list_val_width(int, "0x%x", delim, "i", check_int);
}
static void __init numbers_list_field_width_val_h(const char *delim)
{
numbers_list_val_width(unsigned short, "%hu", delim, "hu", check_ushort);
numbers_list_val_width(short, "%hd", delim, "hd", check_short);
numbers_list_val_width(short, "%hd", delim, "hi", check_short);
numbers_list_val_width(unsigned short, "%hx", delim, "hx", check_ushort);
numbers_list_val_width(unsigned short, "0x%hx", delim, "hx", check_ushort);
numbers_list_val_width(short, "0x%hx", delim, "hi", check_short);
}
static void __init numbers_list_field_width_val_hh(const char *delim)
{
numbers_list_val_width(unsigned char, "%hhu", delim, "hhu", check_uchar);
numbers_list_val_width(signed char, "%hhd", delim, "hhd", check_char);
numbers_list_val_width(signed char, "%hhd", delim, "hhi", check_char);
numbers_list_val_width(unsigned char, "%hhx", delim, "hhx", check_uchar);
numbers_list_val_width(unsigned char, "0x%hhx", delim, "hhx", check_uchar);
numbers_list_val_width(signed char, "0x%hhx", delim, "hhi", check_char);
}
/*
* List of numbers separated by delim. Each field width specifier is the
* exact length of the corresponding value digits in the string being scanned.
*/
static void __init numbers_list_field_width_val_width(const char *delim)
{
numbers_list_field_width_val_ll(delim);
numbers_list_field_width_val_l(delim);
numbers_list_field_width_val_d(delim);
numbers_list_field_width_val_h(delim);
numbers_list_field_width_val_hh(delim);
}
/*
* Slice a continuous string of digits without field delimiters, containing
* numbers of varying length, using the field width to extract each group
* of digits. For example the hex values c0,3,bf01,303 would have a
* string representation of "c03bf01303" and extracted with "%2x%1x%4x%3x".
*/
static void __init numbers_slice(void)
{
numbers_list_field_width_val_width("");
}
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
do { \
const T expect[2] = { expect0, expect1 }; \
T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
_test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
} while (0)
/*
* Number prefix is >= field width.
* Expected behaviour is derived from testing userland sscanf.
*/
static void __init numbers_prefix_overflow(void)
{
/*
* Negative decimal with a field of width 1, should quit scanning
* and return 0.
*/
test_number_prefix(long long, "-1 1", "%1lld %lld", 0, 0, 0, check_ll);
test_number_prefix(long, "-1 1", "%1ld %ld", 0, 0, 0, check_long);
test_number_prefix(int, "-1 1", "%1d %d", 0, 0, 0, check_int);
test_number_prefix(short, "-1 1", "%1hd %hd", 0, 0, 0, check_short);
test_number_prefix(signed char, "-1 1", "%1hhd %hhd", 0, 0, 0, check_char);
test_number_prefix(long long, "-1 1", "%1lli %lli", 0, 0, 0, check_ll);
test_number_prefix(long, "-1 1", "%1li %li", 0, 0, 0, check_long);
test_number_prefix(int, "-1 1", "%1i %i", 0, 0, 0, check_int);
test_number_prefix(short, "-1 1", "%1hi %hi", 0, 0, 0, check_short);
test_number_prefix(signed char, "-1 1", "%1hhi %hhi", 0, 0, 0, check_char);
/*
* 0x prefix in a field of width 1: 0 is a valid digit so should
* convert. Next field scan starts at the 'x' which isn't a digit so
* scan quits with one field converted.
*/
test_number_prefix(unsigned long long, "0xA7", "%1llx%llx", 0, 0, 1, check_ull);
test_number_prefix(unsigned long, "0xA7", "%1lx%lx", 0, 0, 1, check_ulong);
test_number_prefix(unsigned int, "0xA7", "%1x%x", 0, 0, 1, check_uint);
test_number_prefix(unsigned short, "0xA7", "%1hx%hx", 0, 0, 1, check_ushort);
test_number_prefix(unsigned char, "0xA7", "%1hhx%hhx", 0, 0, 1, check_uchar);
test_number_prefix(long long, "0xA7", "%1lli%llx", 0, 0, 1, check_ll);
test_number_prefix(long, "0xA7", "%1li%lx", 0, 0, 1, check_long);
test_number_prefix(int, "0xA7", "%1i%x", 0, 0, 1, check_int);
test_number_prefix(short, "0xA7", "%1hi%hx", 0, 0, 1, check_short);
test_number_prefix(char, "0xA7", "%1hhi%hhx", 0, 0, 1, check_char);
/*
* 0x prefix in a field of width 2 using %x conversion: first field
* converts to 0. Next field scan starts at the character after "0x".
* Both fields will convert.
*/
test_number_prefix(unsigned long long, "0xA7", "%2llx%llx", 0, 0xa7, 2, check_ull);
test_number_prefix(unsigned long, "0xA7", "%2lx%lx", 0, 0xa7, 2, check_ulong);
test_number_prefix(unsigned int, "0xA7", "%2x%x", 0, 0xa7, 2, check_uint);
test_number_prefix(unsigned short, "0xA7", "%2hx%hx", 0, 0xa7, 2, check_ushort);
test_number_prefix(unsigned char, "0xA7", "%2hhx%hhx", 0, 0xa7, 2, check_uchar);
/*
* 0x prefix in a field of width 2 using %i conversion: first field
* converts to 0. Next field scan starts at the character after "0x",
* which will convert if can be interpreted as decimal but will fail
* if it contains any hex digits (since no 0x prefix).
*/
test_number_prefix(long long, "0x67", "%2lli%lli", 0, 67, 2, check_ll);
test_number_prefix(long, "0x67", "%2li%li", 0, 67, 2, check_long);
test_number_prefix(int, "0x67", "%2i%i", 0, 67, 2, check_int);
test_number_prefix(short, "0x67", "%2hi%hi", 0, 67, 2, check_short);
test_number_prefix(char, "0x67", "%2hhi%hhi", 0, 67, 2, check_char);
test_number_prefix(long long, "0xA7", "%2lli%lli", 0, 0, 1, check_ll);
test_number_prefix(long, "0xA7", "%2li%li", 0, 0, 1, check_long);
test_number_prefix(int, "0xA7", "%2i%i", 0, 0, 1, check_int);
test_number_prefix(short, "0xA7", "%2hi%hi", 0, 0, 1, check_short);
test_number_prefix(char, "0xA7", "%2hhi%hhi", 0, 0, 1, check_char);
}
#define _test_simple_strtoxx(T, fn, gen_fmt, expect, base) \
do { \
T got; \
char *endp; \
int len; \
bool fail = false; \
\
total_tests++; \
len = snprintf(test_buffer, BUF_SIZE, gen_fmt, expect); \
got = (fn)(test_buffer, &endp, base); \
pr_debug(#fn "(\"%s\", %d) -> " gen_fmt "\n", test_buffer, base, got); \
if (got != (expect)) { \
fail = true; \
pr_warn(#fn "(\"%s\", %d): got " gen_fmt " expected " gen_fmt "\n", \
test_buffer, base, got, expect); \
} else if (endp != test_buffer + len) { \
fail = true; \
pr_warn(#fn "(\"%s\", %d) startp=0x%px got endp=0x%px expected 0x%px\n", \
test_buffer, base, test_buffer, \
test_buffer + len, endp); \
} \
\
if (fail) \
failed_tests++; \
} while (0)
#define test_simple_strtoxx(T, fn, gen_fmt, base) \
do { \
int i; \
\
for (i = 0; i < ARRAY_SIZE(numbers); i++) { \
_test_simple_strtoxx(T, fn, gen_fmt, (T)numbers[i], base); \
\
if (is_signed_type(T)) \
_test_simple_strtoxx(T, fn, gen_fmt, \
-(T)numbers[i], base); \
} \
} while (0)
static void __init test_simple_strtoull(void)
{
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 10);
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llu", 0);
test_simple_strtoxx(unsigned long long, simple_strtoull, "%llx", 16);
test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 16);
test_simple_strtoxx(unsigned long long, simple_strtoull, "0x%llx", 0);
}
static void __init test_simple_strtoll(void)
{
test_simple_strtoxx(long long, simple_strtoll, "%lld", 10);
test_simple_strtoxx(long long, simple_strtoll, "%lld", 0);
test_simple_strtoxx(long long, simple_strtoll, "%llx", 16);
test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 16);
test_simple_strtoxx(long long, simple_strtoll, "0x%llx", 0);
}
static void __init test_simple_strtoul(void)
{
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 10);
test_simple_strtoxx(unsigned long, simple_strtoul, "%lu", 0);
test_simple_strtoxx(unsigned long, simple_strtoul, "%lx", 16);
test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 16);
test_simple_strtoxx(unsigned long, simple_strtoul, "0x%lx", 0);
}
static void __init test_simple_strtol(void)
{
test_simple_strtoxx(long, simple_strtol, "%ld", 10);
test_simple_strtoxx(long, simple_strtol, "%ld", 0);
test_simple_strtoxx(long, simple_strtol, "%lx", 16);
test_simple_strtoxx(long, simple_strtol, "0x%lx", 16);
test_simple_strtoxx(long, simple_strtol, "0x%lx", 0);
}
/* Selection of common delimiters/separators between numbers in a string. */
static const char * const number_delimiters[] __initconst = {
" ", ":", ",", "-", "/",
};
static void __init test_numbers(void)
{
int i;
/* String containing only one number. */
numbers_simple();
/* String with multiple numbers separated by delimiter. */
for (i = 0; i < ARRAY_SIZE(number_delimiters); i++) {
numbers_list(number_delimiters[i]);
/* Field width may be longer than actual field digits. */
numbers_list_field_width_typemax(number_delimiters[i]);
/* Each field width exactly length of actual field digits. */
numbers_list_field_width_val_width(number_delimiters[i]);
}
/* Slice continuous sequence of digits using field widths. */
numbers_slice();
numbers_prefix_overflow();
}
static void __init selftest(void)
{
test_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!test_buffer)
return;
fmt_buffer = kmalloc(BUF_SIZE, GFP_KERNEL);
if (!fmt_buffer) {
kfree(test_buffer);
return;
}
prandom_seed_state(&rnd_state, 3141592653589793238ULL);
test_numbers();
test_simple_strtoull();
test_simple_strtoll();
test_simple_strtoul();
test_simple_strtol();
kfree(fmt_buffer);
kfree(test_buffer);
}
KSTM_MODULE_LOADERS(test_scanf);
MODULE_AUTHOR("Richard Fitzgerald <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | lib/test_scanf.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel module for testing 'strscpy' family of functions.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/string.h>
/*
* tc() - Run a specific test case.
* @src: Source string, argument to strscpy_pad()
* @count: Size of destination buffer, argument to strscpy_pad()
* @expected: Expected return value from call to strscpy_pad()
* @terminator: 1 if there should be a terminating null byte 0 otherwise.
* @chars: Number of characters from the src string expected to be
* written to the dst buffer.
* @pad: Number of pad characters expected (in the tail of dst buffer).
* (@pad does not include the null terminator byte.)
*
* Calls strscpy_pad() and verifies the return value and state of the
* destination buffer after the call returns.
*/
static void tc(struct kunit *test, char *src, int count, int expected,
int chars, int terminator, int pad)
{
int nr_bytes_poison;
int max_expected;
int max_count;
int written;
char buf[6];
int index, i;
const char POISON = 'z';
KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
"null source string not supported");
memset(buf, POISON, sizeof(buf));
/* Future proofing test suite, validate args */
max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
max_expected = count - 1; /* Space for the null */
KUNIT_ASSERT_LE_MSG(test, count, max_count,
"count (%d) is too big (%d) ... aborting", count, max_count);
KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
"expected (%d) is bigger than can possibly be returned (%d)",
expected, max_expected);
written = strscpy_pad(buf, src, count);
KUNIT_ASSERT_EQ(test, written, expected);
if (count && written == -E2BIG) {
KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
"buffer state invalid for -E2BIG");
KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
"too big string is not null terminated correctly");
}
for (i = 0; i < chars; i++)
KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
"buf[i]==%c != src[i]==%c", buf[i], src[i]);
if (terminator)
KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
"string is not null terminated correctly");
for (i = 0; i < pad; i++) {
index = chars + terminator + i;
KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
"padding missing at index: %d", i);
}
nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
for (i = 0; i < nr_bytes_poison; i++) {
index = sizeof(buf) - 1 - i; /* Check from the end back */
KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
"poison value missing at index: %d", i);
}
}
static void strscpy_test(struct kunit *test)
{
char dest[8];
/*
* tc() uses a destination buffer of size 6 and needs at
* least 2 characters spare (one for null and one to check for
* overflow). This means we should only call tc() with
* strings up to a maximum of 4 characters long and 'count'
* should not exceed 4. To test with longer strings increase
* the buffer size in tc().
*/
/* tc(test, src, count, expected, chars, terminator, pad) */
tc(test, "a", 0, -E2BIG, 0, 0, 0);
tc(test, "", 0, -E2BIG, 0, 0, 0);
tc(test, "a", 1, -E2BIG, 0, 1, 0);
tc(test, "", 1, 0, 0, 1, 0);
tc(test, "ab", 2, -E2BIG, 1, 1, 0);
tc(test, "a", 2, 1, 1, 1, 0);
tc(test, "", 2, 0, 0, 1, 1);
tc(test, "abc", 3, -E2BIG, 2, 1, 0);
tc(test, "ab", 3, 2, 2, 1, 0);
tc(test, "a", 3, 1, 1, 1, 1);
tc(test, "", 3, 0, 0, 1, 2);
tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
tc(test, "abc", 4, 3, 3, 1, 0);
tc(test, "ab", 4, 2, 2, 1, 1);
tc(test, "a", 4, 1, 1, 1, 2);
tc(test, "", 4, 0, 0, 1, 3);
/* Compile-time-known source strings. */
KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
}
static struct kunit_case strscpy_test_cases[] = {
KUNIT_CASE(strscpy_test),
{}
};
static struct kunit_suite strscpy_test_suite = {
.name = "strscpy",
.test_cases = strscpy_test_cases,
};
kunit_test_suite(strscpy_test_suite);
MODULE_AUTHOR("Tobin C. Harding <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/strscpy_kunit.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* ./tools/testing/kunit/kunit.py run is_signed_type [--raw_output]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/compiler.h>
enum unsigned_enum {
constant_a = 3,
};
enum signed_enum {
constant_b = -1,
constant_c = 2,
};
static void is_signed_type_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long), false);
KUNIT_EXPECT_EQ(test, is_signed_type(long long), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned long long), false);
KUNIT_EXPECT_EQ(test, is_signed_type(enum unsigned_enum), false);
KUNIT_EXPECT_EQ(test, is_signed_type(enum signed_enum), true);
KUNIT_EXPECT_EQ(test, is_signed_type(void *), false);
KUNIT_EXPECT_EQ(test, is_signed_type(const char *), false);
}
static struct kunit_case is_signed_type_test_cases[] = {
KUNIT_CASE(is_signed_type_test),
{}
};
static struct kunit_suite is_signed_type_test_suite = {
.name = "is_signed_type",
.test_cases = is_signed_type_test_cases,
};
kunit_test_suite(is_signed_type_test_suite);
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | lib/is_signed_type_kunit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for SL[AOU]B/page initialization at alloc/free time.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#define GARBAGE_INT (0x09A7BA9E)
#define GARBAGE_BYTE (0x9E)
#define REPORT_FAILURES_IN_FN() \
do { \
if (failures) \
pr_info("%s failed %d out of %d times\n", \
__func__, failures, num_tests); \
else \
pr_info("all %d tests in %s passed\n", \
num_tests, __func__); \
} while (0)
/* Calculate the number of uninitialized bytes in the buffer. */
static int __init count_nonzero_bytes(void *ptr, size_t size)
{
int i, ret = 0;
unsigned char *p = (unsigned char *)ptr;
for (i = 0; i < size; i++)
if (p[i])
ret++;
return ret;
}
/* Fill a buffer with garbage, skipping |skip| first bytes. */
static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
{
unsigned int *p = (unsigned int *)((char *)ptr + skip);
int i = 0;
WARN_ON(skip > size);
size -= skip;
while (size >= sizeof(*p)) {
p[i] = GARBAGE_INT;
i++;
size -= sizeof(*p);
}
if (size)
memset(&p[i], GARBAGE_BYTE, size);
}
static void __init fill_with_garbage(void *ptr, size_t size)
{
fill_with_garbage_skip(ptr, size, 0);
}
static int __init do_alloc_pages_order(int order, int *total_failures)
{
struct page *page;
void *buf;
size_t size = PAGE_SIZE << order;
page = alloc_pages(GFP_KERNEL, order);
if (!page)
goto err;
buf = page_address(page);
fill_with_garbage(buf, size);
__free_pages(page, order);
page = alloc_pages(GFP_KERNEL, order);
if (!page)
goto err;
buf = page_address(page);
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
__free_pages(page, order);
return 1;
err:
(*total_failures)++;
return 1;
}
/* Test the page allocator by calling alloc_pages with different orders. */
static int __init test_pages(int *total_failures)
{
int failures = 0, num_tests = 0;
int i;
for (i = 0; i <= MAX_ORDER; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
*total_failures += failures;
return num_tests;
}
/* Test kmalloc() with given parameters. */
static int __init do_kmalloc_size(size_t size, int *total_failures)
{
void *buf;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
goto err;
fill_with_garbage(buf, size);
kfree(buf);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
goto err;
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
kfree(buf);
return 1;
err:
(*total_failures)++;
return 1;
}
/* Test vmalloc() with given parameters. */
static int __init do_vmalloc_size(size_t size, int *total_failures)
{
void *buf;
buf = vmalloc(size);
if (!buf)
goto err;
fill_with_garbage(buf, size);
vfree(buf);
buf = vmalloc(size);
if (!buf)
goto err;
if (count_nonzero_bytes(buf, size))
(*total_failures)++;
fill_with_garbage(buf, size);
vfree(buf);
return 1;
err:
(*total_failures)++;
return 1;
}
/* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
static int __init test_kvmalloc(int *total_failures)
{
int failures = 0, num_tests = 0;
int i, size;
for (i = 0; i < 20; i++) {
size = 1 << i;
num_tests += do_kmalloc_size(size, &failures);
num_tests += do_vmalloc_size(size, &failures);
}
REPORT_FAILURES_IN_FN();
*total_failures += failures;
return num_tests;
}
#define CTOR_BYTES (sizeof(unsigned int))
#define CTOR_PATTERN (0x41414141)
/* Initialize the first 4 bytes of the object. */
static void test_ctor(void *obj)
{
*(unsigned int *)obj = CTOR_PATTERN;
}
/*
* Check the invariants for the buffer allocated from a slab cache.
* If the cache has a test constructor, the first 4 bytes of the object must
* always remain equal to CTOR_PATTERN.
* If the cache isn't an RCU-typesafe one, or if the allocation is done with
* __GFP_ZERO, then the object contents must be zeroed after allocation.
* If the cache is an RCU-typesafe one, the object contents must never be
* zeroed after the first use. This is checked by memcmp() in
* do_kmem_cache_size().
*/
static bool __init check_buf(void *buf, int size, bool want_ctor,
bool want_rcu, bool want_zero)
{
int bytes;
bool fail = false;
bytes = count_nonzero_bytes(buf, size);
WARN_ON(want_ctor && want_zero);
if (want_zero)
return bytes;
if (want_ctor) {
if (*(unsigned int *)buf != CTOR_PATTERN)
fail = 1;
} else {
if (bytes)
fail = !want_rcu;
}
return fail;
}
#define BULK_SIZE 100
static void *bulk_array[BULK_SIZE];
/*
* Test kmem_cache with given parameters:
* want_ctor - use a constructor;
* want_rcu - use SLAB_TYPESAFE_BY_RCU;
* want_zero - use __GFP_ZERO.
*/
static int __init do_kmem_cache_size(size_t size, bool want_ctor,
bool want_rcu, bool want_zero,
int *total_failures)
{
struct kmem_cache *c;
int iter;
bool fail = false;
gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
void *buf, *buf_copy;
c = kmem_cache_create("test_cache", size, 1,
want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
want_ctor ? test_ctor : NULL);
for (iter = 0; iter < 10; iter++) {
/* Do a test of bulk allocations */
if (!want_rcu && !want_ctor) {
int ret;
ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
if (!ret) {
fail = true;
} else {
int i;
for (i = 0; i < ret; i++)
fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
kmem_cache_free_bulk(c, ret, bulk_array);
}
}
buf = kmem_cache_alloc(c, alloc_mask);
/* Check that buf is zeroed, if it must be. */
fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
if (!want_rcu) {
kmem_cache_free(c, buf);
continue;
}
/*
* If this is an RCU cache, use a critical section to ensure we
* can touch objects after they're freed.
*/
rcu_read_lock();
/*
* Copy the buffer to check that it's not wiped on
* free().
*/
buf_copy = kmalloc(size, GFP_ATOMIC);
if (buf_copy)
memcpy(buf_copy, buf, size);
kmem_cache_free(c, buf);
/*
* Check that |buf| is intact after kmem_cache_free().
* |want_zero| is false, because we wrote garbage to
* the buffer already.
*/
fail |= check_buf(buf, size, want_ctor, want_rcu,
false);
if (buf_copy) {
fail |= (bool)memcmp(buf, buf_copy, size);
kfree(buf_copy);
}
rcu_read_unlock();
}
kmem_cache_destroy(c);
*total_failures += fail;
return 1;
}
/*
* Check that the data written to an RCU-allocated object survives
* reallocation.
*/
static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
{
struct kmem_cache *c;
void *buf, *buf_contents, *saved_ptr;
void **used_objects;
int i, iter, maxiter = 1024;
bool fail = false;
c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
NULL);
buf = kmem_cache_alloc(c, GFP_KERNEL);
if (!buf)
goto out;
saved_ptr = buf;
fill_with_garbage(buf, size);
buf_contents = kmalloc(size, GFP_KERNEL);
if (!buf_contents) {
kmem_cache_free(c, buf);
goto out;
}
used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
if (!used_objects) {
kmem_cache_free(c, buf);
kfree(buf_contents);
goto out;
}
memcpy(buf_contents, buf, size);
kmem_cache_free(c, buf);
/*
* Run for a fixed number of iterations. If we never hit saved_ptr,
* assume the test passes.
*/
for (iter = 0; iter < maxiter; iter++) {
buf = kmem_cache_alloc(c, GFP_KERNEL);
used_objects[iter] = buf;
if (buf == saved_ptr) {
fail = memcmp(buf_contents, buf, size);
for (i = 0; i <= iter; i++)
kmem_cache_free(c, used_objects[i]);
goto free_out;
}
}
for (iter = 0; iter < maxiter; iter++)
kmem_cache_free(c, used_objects[iter]);
free_out:
kfree(buf_contents);
kfree(used_objects);
out:
kmem_cache_destroy(c);
*total_failures += fail;
return 1;
}
static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
{
struct kmem_cache *c;
int i, iter, maxiter = 1024;
int num, bytes;
bool fail = false;
void *objects[10];
c = kmem_cache_create("test_cache", size, size, 0, NULL);
for (iter = 0; (iter < maxiter) && !fail; iter++) {
num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
objects);
for (i = 0; i < num; i++) {
bytes = count_nonzero_bytes(objects[i], size);
if (bytes)
fail = true;
fill_with_garbage(objects[i], size);
}
if (num)
kmem_cache_free_bulk(c, num, objects);
}
kmem_cache_destroy(c);
*total_failures += fail;
return 1;
}
/*
* Test kmem_cache allocation by creating caches of different sizes, with and
* without constructors, with and without SLAB_TYPESAFE_BY_RCU.
*/
static int __init test_kmemcache(int *total_failures)
{
int failures = 0, num_tests = 0;
int i, flags, size;
bool ctor, rcu, zero;
for (i = 0; i < 10; i++) {
size = 8 << i;
for (flags = 0; flags < 8; flags++) {
ctor = flags & 1;
rcu = flags & 2;
zero = flags & 4;
if (ctor & zero)
continue;
num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
&failures);
}
num_tests += do_kmem_cache_size_bulk(size, &failures);
}
REPORT_FAILURES_IN_FN();
*total_failures += failures;
return num_tests;
}
/* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
static int __init test_rcu_persistent(int *total_failures)
{
int failures = 0, num_tests = 0;
int i, size;
for (i = 0; i < 10; i++) {
size = 8 << i;
num_tests += do_kmem_cache_rcu_persistent(size, &failures);
}
REPORT_FAILURES_IN_FN();
*total_failures += failures;
return num_tests;
}
/*
* Run the tests. Each test function returns the number of executed tests and
* updates |failures| with the number of failed tests.
*/
static int __init test_meminit_init(void)
{
int failures = 0, num_tests = 0;
num_tests += test_pages(&failures);
num_tests += test_kvmalloc(&failures);
num_tests += test_kmemcache(&failures);
num_tests += test_rcu_persistent(&failures);
if (failures == 0)
pr_info("all %d tests passed!\n", num_tests);
else
pr_info("failures: %d out of %d\n", failures, num_tests);
return failures ? -EINVAL : 0;
}
module_init(test_meminit_init);
MODULE_LICENSE("GPL");
| linux-master | lib/test_meminit.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Test cases for functions and macros in bits.h
*/
#include <kunit/test.h>
#include <linux/bits.h>
static void genmask_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 1ul, GENMASK(0, 0));
KUNIT_EXPECT_EQ(test, 3ul, GENMASK(1, 0));
KUNIT_EXPECT_EQ(test, 6ul, GENMASK(2, 1));
KUNIT_EXPECT_EQ(test, 0xFFFFFFFFul, GENMASK(31, 0));
#ifdef TEST_GENMASK_FAILURES
/* these should fail compilation */
GENMASK(0, 1);
GENMASK(0, 10);
GENMASK(9, 10);
#endif
}
static void genmask_ull_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 1ull, GENMASK_ULL(0, 0));
KUNIT_EXPECT_EQ(test, 3ull, GENMASK_ULL(1, 0));
KUNIT_EXPECT_EQ(test, 0x000000ffffe00000ull, GENMASK_ULL(39, 21));
KUNIT_EXPECT_EQ(test, 0xffffffffffffffffull, GENMASK_ULL(63, 0));
#ifdef TEST_GENMASK_FAILURES
/* these should fail compilation */
GENMASK_ULL(0, 1);
GENMASK_ULL(0, 10);
GENMASK_ULL(9, 10);
#endif
}
static void genmask_input_check_test(struct kunit *test)
{
unsigned int x, y;
int z, w;
/* Unknown input */
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, 0));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, x));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(x, y));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, 0));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(0, z));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(z, w));
/* Valid input */
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(1, 1));
KUNIT_EXPECT_EQ(test, 0, GENMASK_INPUT_CHECK(39, 21));
}
static struct kunit_case bits_test_cases[] = {
KUNIT_CASE(genmask_test),
KUNIT_CASE(genmask_ull_test),
KUNIT_CASE(genmask_input_check_test),
{}
};
static struct kunit_suite bits_test_suite = {
.name = "bits-test",
.test_cases = bits_test_cases,
};
kunit_test_suite(bits_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/test_bits.c |
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
* indicate the number of elements to allocate in the array. max_size
* gives the maximum number of elements to allocate. cpu_mult gives
* the number of locks per CPU to allocate. The size is rounded up
* to a power of 2 to be suitable as a hash table.
*/
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
size_t max_size, unsigned int cpu_mult, gfp_t gfp,
const char *name, struct lock_class_key *key)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
unsigned int nr_pcpus = 2;
#else
unsigned int nr_pcpus = num_possible_cpus();
#endif
if (cpu_mult) {
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
} else {
size = max_size;
}
if (sizeof(spinlock_t) != 0) {
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++) {
spin_lock_init(&tlocks[i]);
lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
}
}
*locks = tlocks;
*locks_mask = size - 1;
return 0;
}
EXPORT_SYMBOL(__alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{
kvfree(locks);
}
EXPORT_SYMBOL(free_bucket_spinlocks);
| linux-master | lib/bucket_locks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* lib/smp_processor_id.c
*
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
#include <linux/kprobes.h>
#include <linux/sched.h>
noinstr static
unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
if (likely(preempt_count()))
goto out;
if (irqs_disabled())
goto out;
if (is_percpu_thread())
goto out;
#ifdef CONFIG_SMP
if (current->migration_disabled)
goto out;
#endif
/*
* It is valid to assume CPU-locality during early bootup:
*/
if (system_state < SYSTEM_SCHEDULING)
goto out;
/*
* Avoid recursion:
*/
preempt_disable_notrace();
instrumentation_begin();
if (!printk_ratelimit())
goto out_enable;
printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
what1, what2, preempt_count() - 1, current->comm, current->pid);
printk("caller is %pS\n", __builtin_return_address(0));
dump_stack();
out_enable:
instrumentation_end();
preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
noinstr unsigned int debug_smp_processor_id(void)
{
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
noinstr void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
| linux-master | lib/smp_processor_id.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit_arch.h>
#include <asm/unistd32.h>
unsigned compat_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned compat_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned compat_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned compat_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned compat_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_compat_syscall(int abi, unsigned syscall)
{
switch (syscall) {
#ifdef __NR_open
case __NR_open:
return AUDITSC_OPEN;
#endif
#ifdef __NR_openat
case __NR_openat:
return AUDITSC_OPENAT;
#endif
#ifdef __NR_socketcall
case __NR_socketcall:
return AUDITSC_SOCKETCALL;
#endif
case __NR_execve:
return AUDITSC_EXECVE;
#ifdef __NR_openat2
case __NR_openat2:
return AUDITSC_OPENAT2;
#endif
default:
return AUDITSC_COMPAT;
}
}
| linux-master | lib/compat_audit.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Test cases for using floating point operations inside a kernel module.
*
* This tests kernel_fpu_begin() and kernel_fpu_end() functions, especially
* when userland has modified the floating point control registers. The kernel
* state might depend on the state set by the userland thread that was active
* before a syscall.
*
* To facilitate the test, this module registers file
* /sys/kernel/debug/selftest_helpers/test_fpu, which when read causes a
* sequence of floating point operations. If the operations fail, either the
* read returns error status or the kernel crashes.
* If the operations succeed, the read returns "1\n".
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <asm/fpu/api.h>
static int test_fpu(void)
{
/*
* This sequence of operations tests that rounding mode is
* to nearest and that denormal numbers are supported.
* Volatile variables are used to avoid compiler optimizing
* the calculations away.
*/
volatile double a, b, c, d, e, f, g;
a = 4.0;
b = 1e-15;
c = 1e-310;
/* Sets precision flag */
d = a + b;
/* Result depends on rounding mode */
e = a + b / 2;
/* Denormal and very large values */
f = b / c;
/* Depends on denormal support */
g = a + c * f;
if (d > a && e > a && g > a)
return 0;
else
return -EINVAL;
}
static int test_fpu_get(void *data, u64 *val)
{
int status = -EINVAL;
kernel_fpu_begin();
status = test_fpu();
kernel_fpu_end();
*val = 1;
return status;
}
DEFINE_DEBUGFS_ATTRIBUTE(test_fpu_fops, test_fpu_get, NULL, "%lld\n");
static struct dentry *selftest_dir;
static int __init test_fpu_init(void)
{
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
if (!selftest_dir)
return -ENOMEM;
debugfs_create_file_unsafe("test_fpu", 0444, selftest_dir, NULL,
&test_fpu_fops);
return 0;
}
static void __exit test_fpu_exit(void)
{
debugfs_remove(selftest_dir);
}
module_init(test_fpu_init);
module_exit(test_fpu_exit);
MODULE_LICENSE("GPL");
| linux-master | lib/test_fpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* rcuref - A scalable reference count implementation for RCU managed objects
*
* rcuref is provided to replace open coded reference count implementations
* based on atomic_t. It protects explicitely RCU managed objects which can
* be visible even after the last reference has been dropped and the object
* is heading towards destruction.
*
* A common usage pattern is:
*
* get()
* rcu_read_lock();
* p = get_ptr();
* if (p && !atomic_inc_not_zero(&p->refcnt))
* p = NULL;
* rcu_read_unlock();
* return p;
*
* put()
* if (!atomic_dec_return(&->refcnt)) {
* remove_ptr(p);
* kfree_rcu((p, rcu);
* }
*
* atomic_inc_not_zero() is implemented with a try_cmpxchg() loop which has
* O(N^2) behaviour under contention with N concurrent operations.
*
* rcuref uses atomic_add_negative_relaxed() for the fast path, which scales
* better under contention.
*
* Why not refcount?
* =================
*
* In principle it should be possible to make refcount use the rcuref
* scheme, but the destruction race described below cannot be prevented
* unless the protected object is RCU managed.
*
* Theory of operation
* ===================
*
* rcuref uses an unsigned integer reference counter. As long as the
* counter value is greater than or equal to RCUREF_ONEREF and not larger
* than RCUREF_MAXREF the reference is alive:
*
* ONEREF MAXREF SATURATED RELEASED DEAD NOREF
* 0 0x7FFFFFFF 0x8000000 0xA0000000 0xBFFFFFFF 0xC0000000 0xE0000000 0xFFFFFFFF
* <---valid --------> <-------saturation zone-------> <-----dead zone----->
*
* The get() and put() operations do unconditional increments and
* decrements. The result is checked after the operation. This optimizes
* for the fast path.
*
* If the reference count is saturated or dead, then the increments and
* decrements are not harmful as the reference count still stays in the
* respective zones and is always set back to STATURATED resp. DEAD. The
* zones have room for 2^28 racing operations in each direction, which
* makes it practically impossible to escape the zones.
*
* Once the last reference is dropped the reference count becomes
* RCUREF_NOREF which forces rcuref_put() into the slowpath operation. The
* slowpath then tries to set the reference count from RCUREF_NOREF to
* RCUREF_DEAD via a cmpxchg(). This opens a small window where a
* concurrent rcuref_get() can acquire the reference count and bring it
* back to RCUREF_ONEREF or even drop the reference again and mark it DEAD.
*
* If the cmpxchg() succeeds then a concurrent rcuref_get() will result in
* DEAD + 1, which is inside the dead zone. If that happens the reference
* count is put back to DEAD.
*
* The actual race is possible due to the unconditional increment and
* decrements in rcuref_get() and rcuref_put():
*
* T1 T2
* get() put()
* if (atomic_add_negative(-1, &ref->refcnt))
* succeeds-> atomic_cmpxchg(&ref->refcnt, NOREF, DEAD);
*
* atomic_add_negative(1, &ref->refcnt); <- Elevates refcount to DEAD + 1
*
* As the result of T1's add is negative, the get() goes into the slow path
* and observes refcnt being in the dead zone which makes the operation fail.
*
* Possible critical states:
*
* Context Counter References Operation
* T1 0 1 init()
* T2 1 2 get()
* T1 0 1 put()
* T2 -1 0 put() tries to mark dead
* T1 0 1 get()
* T2 0 1 put() mark dead fails
* T1 -1 0 put() tries to mark dead
* T1 DEAD 0 put() mark dead succeeds
* T2 DEAD+1 0 get() fails and puts it back to DEAD
*
* Of course there are more complex scenarios, but the above illustrates
* the working principle. The rest is left to the imagination of the
* reader.
*
* Deconstruction race
* ===================
*
* The release operation must be protected by prohibiting a grace period in
* order to prevent a possible use after free:
*
* T1 T2
* put() get()
* // ref->refcnt = ONEREF
* if (!atomic_add_negative(-1, &ref->refcnt))
* return false; <- Not taken
*
* // ref->refcnt == NOREF
* --> preemption
* // Elevates ref->refcnt to ONEREF
* if (!atomic_add_negative(1, &ref->refcnt))
* return true; <- taken
*
* if (put(&p->ref)) { <-- Succeeds
* remove_pointer(p);
* kfree_rcu(p, rcu);
* }
*
* RCU grace period ends, object is freed
*
* atomic_cmpxchg(&ref->refcnt, NOREF, DEAD); <- UAF
*
* This is prevented by disabling preemption around the put() operation as
* that's in most kernel configurations cheaper than a rcu_read_lock() /
* rcu_read_unlock() pair and in many cases even a NOOP. In any case it
* prevents the grace period which keeps the object alive until all put()
* operations complete.
*
* Saturation protection
* =====================
*
* The reference count has a saturation limit RCUREF_MAXREF (INT_MAX).
* Once this is exceedded the reference count becomes stale by setting it
* to RCUREF_SATURATED, which will cause a memory leak, but it prevents
* wrap arounds which obviously cause worse problems than a memory
* leak. When saturation is reached a warning is emitted.
*
* Race conditions
* ===============
*
* All reference count increment/decrement operations are unconditional and
* only verified after the fact. This optimizes for the good case and takes
* the occasional race vs. a dead or already saturated refcount into
* account. The saturation and dead zones are large enough to accomodate
* for that.
*
* Memory ordering
* ===============
*
* Memory ordering rules are slightly relaxed wrt regular atomic_t functions
* and provide only what is strictly required for refcounts.
*
* The increments are fully relaxed; these will not provide ordering. The
* rationale is that whatever is used to obtain the object to increase the
* reference count on will provide the ordering. For locked data
* structures, its the lock acquire, for RCU/lockless data structures its
* the dependent load.
*
* rcuref_get() provides a control dependency ordering future stores which
* ensures that the object is not modified when acquiring a reference
* fails.
*
* rcuref_put() provides release order, i.e. all prior loads and stores
* will be issued before. It also provides a control dependency ordering
* against the subsequent destruction of the object.
*
* If rcuref_put() successfully dropped the last reference and marked the
* object DEAD it also provides acquire ordering.
*/
#include <linux/export.h>
#include <linux/rcuref.h>
/**
* rcuref_get_slowpath - Slowpath of rcuref_get()
* @ref: Pointer to the reference count
*
* Invoked when the reference count is outside of the valid zone.
*
* Return:
* False if the reference count was already marked dead
*
* True if the reference count is saturated, which prevents the
* object from being deconstructed ever.
*/
bool rcuref_get_slowpath(rcuref_t *ref)
{
unsigned int cnt = atomic_read(&ref->refcnt);
/*
* If the reference count was already marked dead, undo the
* increment so it stays in the middle of the dead zone and return
* fail.
*/
if (cnt >= RCUREF_RELEASED) {
atomic_set(&ref->refcnt, RCUREF_DEAD);
return false;
}
/*
* If it was saturated, warn and mark it so. In case the increment
* was already on a saturated value restore the saturation
* marker. This keeps it in the middle of the saturation zone and
* prevents the reference count from overflowing. This leaks the
* object memory, but prevents the obvious reference count overflow
* damage.
*/
if (WARN_ONCE(cnt > RCUREF_MAXREF, "rcuref saturated - leaking memory"))
atomic_set(&ref->refcnt, RCUREF_SATURATED);
return true;
}
EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
* rcuref_put_slowpath - Slowpath of __rcuref_put()
* @ref: Pointer to the reference count
*
* Invoked when the reference count is outside of the valid zone.
*
* Return:
* True if this was the last reference with no future references
* possible. This signals the caller that it can safely schedule the
* object, which is protected by the reference counter, for
* deconstruction.
*
* False if there are still active references or the put() raced
* with a concurrent get()/put() pair. Caller is not allowed to
* deconstruct the protected object.
*/
bool rcuref_put_slowpath(rcuref_t *ref)
{
unsigned int cnt = atomic_read(&ref->refcnt);
/* Did this drop the last reference? */
if (likely(cnt == RCUREF_NOREF)) {
/*
* Carefully try to set the reference count to RCUREF_DEAD.
*
* This can fail if a concurrent get() operation has
* elevated it again or the corresponding put() even marked
* it dead already. Both are valid situations and do not
* require a retry. If this fails the caller is not
* allowed to deconstruct the object.
*/
if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF)
return false;
/*
* The caller can safely schedule the object for
* deconstruction. Provide acquire ordering.
*/
smp_acquire__after_ctrl_dep();
return true;
}
/*
* If the reference count was already in the dead zone, then this
* put() operation is imbalanced. Warn, put the reference count back to
* DEAD and tell the caller to not deconstruct the object.
*/
if (WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")) {
atomic_set(&ref->refcnt, RCUREF_DEAD);
return false;
}
/*
* This is a put() operation on a saturated refcount. Restore the
* mean saturation value and tell the caller to not deconstruct the
* object.
*/
if (cnt > RCUREF_MAXREF)
atomic_set(&ref->refcnt, RCUREF_SATURATED);
return false;
}
EXPORT_SYMBOL_GPL(rcuref_put_slowpath);
| linux-master | lib/rcuref.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <kunit/test.h>
#include <linux/kernel.h>
#include <linux/list_sort.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/random.h>
/*
* The pattern of set bits in the list length determines which cases
* are hit in list_sort().
*/
#define TEST_LIST_LEN (512+128+2) /* not including head */
#define TEST_POISON1 0xDEADBEEF
#define TEST_POISON2 0xA324354C
struct debug_el {
unsigned int poison1;
struct list_head list;
unsigned int poison2;
int value;
unsigned int serial;
};
static void check(struct kunit *test, struct debug_el *ela, struct debug_el *elb)
{
struct debug_el **elts = test->priv;
KUNIT_EXPECT_LT_MSG(test, ela->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
KUNIT_EXPECT_LT_MSG(test, elb->serial, (unsigned int)TEST_LIST_LEN, "incorrect serial");
KUNIT_EXPECT_PTR_EQ_MSG(test, elts[ela->serial], ela, "phantom element");
KUNIT_EXPECT_PTR_EQ_MSG(test, elts[elb->serial], elb, "phantom element");
KUNIT_EXPECT_EQ_MSG(test, ela->poison1, TEST_POISON1, "bad poison");
KUNIT_EXPECT_EQ_MSG(test, ela->poison2, TEST_POISON2, "bad poison");
KUNIT_EXPECT_EQ_MSG(test, elb->poison1, TEST_POISON1, "bad poison");
KUNIT_EXPECT_EQ_MSG(test, elb->poison2, TEST_POISON2, "bad poison");
}
/* `priv` is the test pointer so check() can fail the test if the list is invalid. */
static int cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct debug_el *ela, *elb;
ela = container_of(a, struct debug_el, list);
elb = container_of(b, struct debug_el, list);
check(priv, ela, elb);
return ela->value - elb->value;
}
static void list_sort_test(struct kunit *test)
{
int i, count = 1;
struct debug_el *el, **elts;
struct list_head *cur;
LIST_HEAD(head);
elts = kunit_kcalloc(test, TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elts);
test->priv = elts;
for (i = 0; i < TEST_LIST_LEN; i++) {
el = kunit_kmalloc(test, sizeof(*el), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
el->value = get_random_u32_below(TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
elts[i] = el;
list_add_tail(&el->list, &head);
}
list_sort(test, &head, cmp);
for (cur = head.next; cur->next != &head; cur = cur->next) {
struct debug_el *el1;
int cmp_result;
KUNIT_ASSERT_PTR_EQ_MSG(test, cur->next->prev, cur,
"list is corrupted");
cmp_result = cmp(test, cur, cur->next);
KUNIT_ASSERT_LE_MSG(test, cmp_result, 0, "list is not sorted");
el = container_of(cur, struct debug_el, list);
el1 = container_of(cur->next, struct debug_el, list);
if (cmp_result == 0) {
KUNIT_ASSERT_LE_MSG(test, el->serial, el1->serial,
"order of equivalent elements not preserved");
}
check(test, el, el1);
count++;
}
KUNIT_EXPECT_PTR_EQ_MSG(test, head.prev, cur, "list is corrupted");
KUNIT_EXPECT_EQ_MSG(test, count, TEST_LIST_LEN,
"list length changed after sorting!");
}
static struct kunit_case list_sort_cases[] = {
KUNIT_CASE(list_sort_test),
{}
};
static struct kunit_suite list_sort_suite = {
.name = "list_sort",
.test_cases = list_sort_cases,
};
kunit_test_suites(&list_sort_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/test_list_sort.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/once.h>
#include <linux/random.h>
#include <linux/module.h>
struct once_work {
struct work_struct work;
struct static_key_true *key;
struct module *module;
};
static void once_deferred(struct work_struct *w)
{
struct once_work *work;
work = container_of(w, struct once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_branch_disable(work->key);
module_put(work->module);
kfree(work);
}
static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
w = kmalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
return;
INIT_WORK(&w->work, once_deferred);
w->key = key;
w->module = mod;
__module_get(mod);
schedule_work(&w->work);
}
static DEFINE_SPINLOCK(once_lock);
bool __do_once_start(bool *done, unsigned long *flags)
__acquires(once_lock)
{
spin_lock_irqsave(&once_lock, *flags);
if (*done) {
spin_unlock_irqrestore(&once_lock, *flags);
/* Keep sparse happy by restoring an even lock count on
* this lock. In case we return here, we don't call into
* __do_once_done but return early in the DO_ONCE() macro.
*/
__acquire(once_lock);
return false;
}
return true;
}
EXPORT_SYMBOL(__do_once_start);
void __do_once_done(bool *done, struct static_key_true *once_key,
unsigned long *flags, struct module *mod)
__releases(once_lock)
{
*done = true;
spin_unlock_irqrestore(&once_lock, *flags);
once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
static DEFINE_MUTEX(once_mutex);
bool __do_once_sleepable_start(bool *done)
__acquires(once_mutex)
{
mutex_lock(&once_mutex);
if (*done) {
mutex_unlock(&once_mutex);
/* Keep sparse happy by restoring an even lock count on
* this mutex. In case we return here, we don't call into
* __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
*/
__acquire(once_mutex);
return false;
}
return true;
}
EXPORT_SYMBOL(__do_once_sleepable_start);
void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
struct module *mod)
__releases(once_mutex)
{
*done = true;
mutex_unlock(&once_mutex);
once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_sleepable_done);
| linux-master | lib/once.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test cases for <linux/hash.h> and <linux/stringhash.h>
* This just verifies that various ways of computing a hash
* produce the same thing and, for cases where a k-bit hash
* value is requested, is of the requested size.
*
* We fill a buffer with a 255-byte null-terminated string,
* and use both full_name_hash() and hashlen_string() to hash the
* substrings from i to j, where 0 <= i < j < 256.
*
* The returned values are used to check that __hash_32() and
* __hash_32_generic() compute the same thing. Likewise hash_32()
* and hash_64().
*/
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <kunit/test.h>
/* 32-bit XORSHIFT generator. Seed must not be zero. */
static u32 __attribute_const__
xorshift(u32 seed)
{
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
return seed;
}
/* Given a non-zero x, returns a non-zero byte. */
static u8 __attribute_const__
mod255(u32 x)
{
x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
return x;
}
/* Fill the buffer with non-zero bytes. */
static void fill_buf(char *buf, size_t len, u32 seed)
{
size_t i;
for (i = 0; i < len; i++) {
seed = xorshift(seed);
buf[i] = mod255(seed);
}
}
/* Holds most testing variables for the int test. */
struct test_hash_params {
/* Pointer to integer to be hashed. */
unsigned long long *h64;
/* Low 32-bits of integer to be hashed. */
u32 h0;
/* Arch-specific hash result. */
u32 h1;
/* Generic hash result. */
u32 h2;
/* ORed hashes of given size (in bits). */
u32 (*hash_or)[33];
};
#ifdef HAVE_ARCH__HASH_32
static void
test_int__hash_32(struct kunit *test, struct test_hash_params *params)
{
params->hash_or[1][0] |= params->h2 = __hash_32_generic(params->h0);
#if HAVE_ARCH__HASH_32 == 1
KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
"__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
params->h0, params->h1, params->h2);
#endif
}
#endif
#ifdef HAVE_ARCH_HASH_64
static void
test_int_hash_64(struct kunit *test, struct test_hash_params *params, u32 const *m, int *k)
{
params->h2 = hash_64_generic(*params->h64, *k);
#if HAVE_ARCH_HASH_64 == 1
KUNIT_EXPECT_EQ_MSG(test, params->h1, params->h2,
"hash_64(%#llx, %d) = %#x != hash_64_generic() = %#x",
*params->h64, *k, params->h1, params->h2);
#else
KUNIT_EXPECT_LE_MSG(test, params->h1, params->h2,
"hash_64_generic(%#llx, %d) = %#x > %#x",
*params->h64, *k, params->h1, *m);
#endif
}
#endif
/*
* Test the various integer hash functions. h64 (or its low-order bits)
* is the integer to hash. hash_or accumulates the OR of the hash values,
* which are later checked to see that they cover all the requested bits.
*
* Because these functions (as opposed to the string hashes) are all
* inline, the code being tested is actually in the module, and you can
* recompile and re-test the module without rebooting.
*/
static void
test_int_hash(struct kunit *test, unsigned long long h64, u32 hash_or[2][33])
{
int k;
struct test_hash_params params = { &h64, (u32)h64, 0, 0, hash_or };
/* Test __hash32 */
hash_or[0][0] |= params.h1 = __hash_32(params.h0);
#ifdef HAVE_ARCH__HASH_32
test_int__hash_32(test, ¶ms);
#endif
/* Test k = 1..32 bits */
for (k = 1; k <= 32; k++) {
u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
/* Test hash_32 */
hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
KUNIT_EXPECT_LE_MSG(test, params.h1, m,
"hash_32(%#x, %d) = %#x > %#x",
params.h0, k, params.h1, m);
/* Test hash_64 */
hash_or[1][k] |= params.h1 = hash_64(h64, k);
KUNIT_EXPECT_LE_MSG(test, params.h1, m,
"hash_64(%#llx, %d) = %#x > %#x",
h64, k, params.h1, m);
#ifdef HAVE_ARCH_HASH_64
test_int_hash_64(test, ¶ms, &m, &k);
#endif
}
}
#define SIZE 256 /* Run time is cubic in SIZE */
static void test_string_or(struct kunit *test)
{
char buf[SIZE+1];
u32 string_or = 0;
int i, j;
fill_buf(buf, SIZE, 1);
/* Test every possible non-empty substring in the buffer. */
for (j = SIZE; j > 0; --j) {
buf[j] = '\0';
for (i = 0; i <= j; i++) {
u32 h0 = full_name_hash(buf+i, buf+i, j-i);
string_or |= h0;
} /* i */
} /* j */
/* The OR of all the hash values should cover all the bits */
KUNIT_EXPECT_EQ_MSG(test, string_or, -1u,
"OR of all string hash results = %#x != %#x",
string_or, -1u);
}
static void test_hash_or(struct kunit *test)
{
char buf[SIZE+1];
u32 hash_or[2][33] = { { 0, } };
unsigned long long h64 = 0;
int i, j;
fill_buf(buf, SIZE, 1);
/* Test every possible non-empty substring in the buffer. */
for (j = SIZE; j > 0; --j) {
buf[j] = '\0';
for (i = 0; i <= j; i++) {
u64 hashlen = hashlen_string(buf+i, buf+i);
u32 h0 = full_name_hash(buf+i, buf+i, j-i);
/* Check that hashlen_string gets the length right */
KUNIT_EXPECT_EQ_MSG(test, hashlen_len(hashlen), j-i,
"hashlen_string(%d..%d) returned length %u, expected %d",
i, j, hashlen_len(hashlen), j-i);
/* Check that the hashes match */
KUNIT_EXPECT_EQ_MSG(test, hashlen_hash(hashlen), h0,
"hashlen_string(%d..%d) = %08x != full_name_hash() = %08x",
i, j, hashlen_hash(hashlen), h0);
h64 = h64 << 32 | h0; /* For use with hash_64 */
test_int_hash(test, h64, hash_or);
} /* i */
} /* j */
KUNIT_EXPECT_EQ_MSG(test, hash_or[0][0], -1u,
"OR of all __hash_32 results = %#x != %#x",
hash_or[0][0], -1u);
#ifdef HAVE_ARCH__HASH_32
#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
KUNIT_EXPECT_EQ_MSG(test, hash_or[1][0], -1u,
"OR of all __hash_32_generic results = %#x != %#x",
hash_or[1][0], -1u);
#endif
#endif
/* Likewise for all the i-bit hash values */
for (i = 1; i <= 32; i++) {
u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
KUNIT_EXPECT_EQ_MSG(test, hash_or[0][i], m,
"OR of all hash_32(%d) results = %#x (%#x expected)",
i, hash_or[0][i], m);
KUNIT_EXPECT_EQ_MSG(test, hash_or[1][i], m,
"OR of all hash_64(%d) results = %#x (%#x expected)",
i, hash_or[1][i], m);
}
}
static struct kunit_case hash_test_cases[] __refdata = {
KUNIT_CASE(test_string_or),
KUNIT_CASE(test_hash_or),
{}
};
static struct kunit_suite hash_test_suite = {
.name = "hash",
.test_cases = hash_test_cases,
};
kunit_test_suite(hash_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/test_hash.c |
/*
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
* This file contains the linked list validation and error reporting for
* LIST_HARDENED and DEBUG_LIST.
*/
#include <linux/export.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
/*
* Check that the data structures for the list manipulations are reasonably
* valid. Failures here indicate memory corruption (and possibly an exploit
* attempt).
*/
__list_valid_slowpath
bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
if (CHECK_DATA_CORRUPTION(prev == NULL,
"list_add corruption. prev is NULL.\n") ||
CHECK_DATA_CORRUPTION(next == NULL,
"list_add corruption. next is NULL.\n") ||
CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
prev, next->prev, next) ||
CHECK_DATA_CORRUPTION(prev->next != next,
"list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
next, prev->next, prev) ||
CHECK_DATA_CORRUPTION(new == prev || new == next,
"list_add double add: new=%px, prev=%px, next=%px.\n",
new, prev, next))
return false;
return true;
}
EXPORT_SYMBOL(__list_add_valid_or_report);
__list_valid_slowpath
bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
prev = entry->prev;
next = entry->next;
if (CHECK_DATA_CORRUPTION(next == NULL,
"list_del corruption, %px->next is NULL\n", entry) ||
CHECK_DATA_CORRUPTION(prev == NULL,
"list_del corruption, %px->prev is NULL\n", entry) ||
CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %px->next is LIST_POISON1 (%px)\n",
entry, LIST_POISON1) ||
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
"list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
entry, LIST_POISON2) ||
CHECK_DATA_CORRUPTION(prev->next != entry,
"list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n",
entry, prev->next, prev) ||
CHECK_DATA_CORRUPTION(next->prev != entry,
"list_del corruption. next->prev should be %px, but was %px. (next=%px)\n",
entry, next->prev, next))
return false;
return true;
}
EXPORT_SYMBOL(__list_del_entry_valid_or_report);
| linux-master | lib/list_debug.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
/* a tiny module only meant to test
*
* set/clear_bit
* get_count_order/long
*/
/* use an enum because that's the most common BITMAP usage */
enum bitops_fun {
BITOPS_4 = 4,
BITOPS_7 = 7,
BITOPS_11 = 11,
BITOPS_31 = 31,
BITOPS_88 = 88,
BITOPS_LAST = 255,
BITOPS_LENGTH = 256
};
static DECLARE_BITMAP(g_bitmap, BITOPS_LENGTH);
static unsigned int order_comb[][2] = {
{0x00000003, 2},
{0x00000004, 2},
{0x00001fff, 13},
{0x00002000, 13},
{0x50000000, 31},
{0x80000000, 31},
{0x80003000, 32},
};
#ifdef CONFIG_64BIT
static unsigned long order_comb_long[][2] = {
{0x0000000300000000, 34},
{0x0000000400000000, 34},
{0x00001fff00000000, 45},
{0x0000200000000000, 45},
{0x5000000000000000, 63},
{0x8000000000000000, 63},
{0x8000300000000000, 64},
};
#endif
static int __init test_bitops_startup(void)
{
int i, bit_set;
pr_info("Starting bitops test\n");
set_bit(BITOPS_4, g_bitmap);
set_bit(BITOPS_7, g_bitmap);
set_bit(BITOPS_11, g_bitmap);
set_bit(BITOPS_31, g_bitmap);
set_bit(BITOPS_88, g_bitmap);
for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
if (order_comb[i][1] != get_count_order(order_comb[i][0]))
pr_warn("get_count_order wrong for %x\n",
order_comb[i][0]);
}
for (i = 0; i < ARRAY_SIZE(order_comb); i++) {
if (order_comb[i][1] != get_count_order_long(order_comb[i][0]))
pr_warn("get_count_order_long wrong for %x\n",
order_comb[i][0]);
}
#ifdef CONFIG_64BIT
for (i = 0; i < ARRAY_SIZE(order_comb_long); i++) {
if (order_comb_long[i][1] !=
get_count_order_long(order_comb_long[i][0]))
pr_warn("get_count_order_long wrong for %lx\n",
order_comb_long[i][0]);
}
#endif
barrier();
clear_bit(BITOPS_4, g_bitmap);
clear_bit(BITOPS_7, g_bitmap);
clear_bit(BITOPS_11, g_bitmap);
clear_bit(BITOPS_31, g_bitmap);
clear_bit(BITOPS_88, g_bitmap);
bit_set = find_first_bit(g_bitmap, BITOPS_LAST);
if (bit_set != BITOPS_LAST)
pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
pr_info("Completed bitops test\n");
return 0;
}
static void __exit test_bitops_unstartup(void)
{
}
module_init(test_bitops_startup);
module_exit(test_bitops_unstartup);
MODULE_AUTHOR("Jesse Brandeburg <[email protected]>, Wei Yang <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Bit testing module");
| linux-master | lib/test_bitops.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/bitops.h>
#include <asm/types.h>
/**
* hweightN - returns the hamming weight of a N-bit word
* @x: the word to weigh
*
* The Hamming Weight of a number is the total number of bits set in it.
*/
unsigned int __sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
return (w * 0x01010101) >> 24;
#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
#endif
}
EXPORT_SYMBOL(__sw_hweight32);
unsigned int __sw_hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
EXPORT_SYMBOL(__sw_hweight16);
unsigned int __sw_hweight8(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res + (res >> 4)) & 0x0F;
}
EXPORT_SYMBOL(__sw_hweight8);
unsigned long __sw_hweight64(__u64 w)
{
#if BITS_PER_LONG == 32
return __sw_hweight32((unsigned int)(w >> 32)) +
__sw_hweight32((unsigned int)w);
#elif BITS_PER_LONG == 64
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x5555555555555555ul;
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
return (w * 0x0101010101010101ul) >> 56;
#else
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
res = res + (res >> 8);
res = res + (res >> 16);
return (res + (res >> 32)) & 0x00000000000000FFul;
#endif
#endif
}
EXPORT_SYMBOL(__sw_hweight64);
| linux-master | lib/hweight.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/interval_tree.h>
#include <linux/interval_tree_generic.h>
#include <linux/compiler.h>
#include <linux/export.h>
#define START(node) ((node)->start)
#define LAST(node) ((node)->last)
INTERVAL_TREE_DEFINE(struct interval_tree_node, rb,
unsigned long, __subtree_last,
START, LAST,, interval_tree)
EXPORT_SYMBOL_GPL(interval_tree_insert);
EXPORT_SYMBOL_GPL(interval_tree_remove);
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
EXPORT_SYMBOL_GPL(interval_tree_iter_next);
#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
/*
* Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
* span of nodes. This makes nodes[0]->last the end of that contiguous used span
* indexes that started at the original nodes[1]->start. nodes[1] is now the
* first node starting the next used span. A hole span is between nodes[0]->last
* and nodes[1]->start. nodes[1] must be !NULL.
*/
static void
interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
{
struct interval_tree_node *cur = state->nodes[1];
state->nodes[0] = cur;
do {
if (cur->last > state->nodes[0]->last)
state->nodes[0] = cur;
cur = interval_tree_iter_next(cur, state->first_index,
state->last_index);
} while (cur && (state->nodes[0]->last >= cur->start ||
state->nodes[0]->last + 1 == cur->start));
state->nodes[1] = cur;
}
void interval_tree_span_iter_first(struct interval_tree_span_iter *iter,
struct rb_root_cached *itree,
unsigned long first_index,
unsigned long last_index)
{
iter->first_index = first_index;
iter->last_index = last_index;
iter->nodes[0] = NULL;
iter->nodes[1] =
interval_tree_iter_first(itree, first_index, last_index);
if (!iter->nodes[1]) {
/* No nodes intersect the span, whole span is hole */
iter->start_hole = first_index;
iter->last_hole = last_index;
iter->is_hole = 1;
return;
}
if (iter->nodes[1]->start > first_index) {
/* Leading hole on first iteration */
iter->start_hole = first_index;
iter->last_hole = iter->nodes[1]->start - 1;
iter->is_hole = 1;
interval_tree_span_iter_next_gap(iter);
return;
}
/* Starting inside a used */
iter->start_used = first_index;
iter->is_hole = 0;
interval_tree_span_iter_next_gap(iter);
iter->last_used = iter->nodes[0]->last;
if (iter->last_used >= last_index) {
iter->last_used = last_index;
iter->nodes[0] = NULL;
iter->nodes[1] = NULL;
}
}
EXPORT_SYMBOL_GPL(interval_tree_span_iter_first);
void interval_tree_span_iter_next(struct interval_tree_span_iter *iter)
{
if (!iter->nodes[0] && !iter->nodes[1]) {
iter->is_hole = -1;
return;
}
if (iter->is_hole) {
iter->start_used = iter->last_hole + 1;
iter->last_used = iter->nodes[0]->last;
if (iter->last_used >= iter->last_index) {
iter->last_used = iter->last_index;
iter->nodes[0] = NULL;
iter->nodes[1] = NULL;
}
iter->is_hole = 0;
return;
}
if (!iter->nodes[1]) {
/* Trailing hole */
iter->start_hole = iter->nodes[0]->last + 1;
iter->last_hole = iter->last_index;
iter->nodes[0] = NULL;
iter->is_hole = 1;
return;
}
/* must have both nodes[0] and [1], interior hole */
iter->start_hole = iter->nodes[0]->last + 1;
iter->last_hole = iter->nodes[1]->start - 1;
iter->is_hole = 1;
interval_tree_span_iter_next_gap(iter);
}
EXPORT_SYMBOL_GPL(interval_tree_span_iter_next);
/*
* Advance the iterator index to a specific position. The returned used/hole is
* updated to start at new_index. This is faster than calling
* interval_tree_span_iter_first() as it can avoid full searches in several
* cases where the iterator is already set.
*/
void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
struct rb_root_cached *itree,
unsigned long new_index)
{
if (iter->is_hole == -1)
return;
iter->first_index = new_index;
if (new_index > iter->last_index) {
iter->is_hole = -1;
return;
}
/* Rely on the union aliasing hole/used */
if (iter->start_hole <= new_index && new_index <= iter->last_hole) {
iter->start_hole = new_index;
return;
}
if (new_index == iter->last_hole + 1)
interval_tree_span_iter_next(iter);
else
interval_tree_span_iter_first(iter, itree, new_index,
iter->last_index);
}
EXPORT_SYMBOL_GPL(interval_tree_span_iter_advance);
#endif
| linux-master | lib/interval_tree.c |
// SPDX-License-Identifier: GPL-2.0
/*
Generic support for BUG()
This respects the following config options:
CONFIG_BUG - emit BUG traps. Nothing happens without this.
CONFIG_GENERIC_BUG - enable this code.
CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file
CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
(though they're generally always on).
CONFIG_GENERIC_BUG is set by each architecture using this code.
To use this, your architecture must:
1. Set up the config options:
- Enable CONFIG_GENERIC_BUG if CONFIG_BUG
2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
- Define HAVE_ARCH_BUG
- Implement BUG() to generate a faulting instruction
- NOTE: struct bug_entry does not have "file" or "line" entries
when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
the values accordingly.
3. Implement the trap
- In the illegal instruction trap handler (typically), verify
that the fault was in kernel mode, and call report_bug()
- report_bug() will return whether it was a false alarm, a warning,
or an actual bug.
- You must implement the is_valid_bugaddr(bugaddr) callback which
returns true if the eip is a real kernel address, and it points
to the expected BUG trap instruction.
Jeremy Fitzhardinge <[email protected]> 2006
*/
#define pr_fmt(fmt) fmt
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>
#include <linux/rculist.h>
#include <linux/ftrace.h>
#include <linux/context_tracking.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
static inline unsigned long bug_addr(const struct bug_entry *bug)
{
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp;
#else
return bug->bug_addr;
#endif
}
#ifdef CONFIG_MODULES
/* Updates are protected by module mutex */
static LIST_HEAD(module_bug_list);
static struct bug_entry *module_find_bug(unsigned long bugaddr)
{
struct module *mod;
struct bug_entry *bug = NULL;
rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
unsigned i;
bug = mod->bug_table;
for (i = 0; i < mod->num_bugs; ++i, ++bug)
if (bugaddr == bug_addr(bug))
goto out;
}
bug = NULL;
out:
rcu_read_unlock_sched();
return bug;
}
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
char *secstrings;
unsigned int i;
mod->bug_table = NULL;
mod->num_bugs = 0;
/* Find the __bug_table section, if present */
secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (i = 1; i < hdr->e_shnum; i++) {
if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
continue;
mod->bug_table = (void *) sechdrs[i].sh_addr;
mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
break;
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
* Thus, this uses RCU to safely manipulate the bug list, since BUG
* must run in non-interruptive state.
*/
list_add_rcu(&mod->bug_list, &module_bug_list);
}
void module_bug_cleanup(struct module *mod)
{
list_del_rcu(&mod->bug_list);
}
#else
static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
{
return NULL;
}
#endif
void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line)
{
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
*file = (const char *)&bug->file_disp + bug->file_disp;
#else
*file = bug->file;
#endif
*line = bug->line;
#else
*file = NULL;
*line = 0;
#endif
}
struct bug_entry *find_bug(unsigned long bugaddr)
{
struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug_addr(bug))
return bug;
return module_find_bug(bugaddr);
}
static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
struct bug_entry *bug;
const char *file;
unsigned line, warning, once, done;
if (!is_valid_bugaddr(bugaddr))
return BUG_TRAP_TYPE_NONE;
bug = find_bug(bugaddr);
if (!bug)
return BUG_TRAP_TYPE_NONE;
disable_trace_on_warning();
bug_get_file_line(bug, &file, &line);
warning = (bug->flags & BUGFLAG_WARNING) != 0;
once = (bug->flags & BUGFLAG_ONCE) != 0;
done = (bug->flags & BUGFLAG_DONE) != 0;
if (warning && once) {
if (done)
return BUG_TRAP_TYPE_WARN;
/*
* Since this is the only store, concurrency is not an issue.
*/
bug->flags |= BUGFLAG_DONE;
}
/*
* BUG() and WARN_ON() families don't print a custom debug message
* before triggering the exception handler, so we must add the
* "cut here" line now. WARN() issues its own "cut here" before the
* extra debugging message it writes before triggering the handler.
*/
if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0)
printk(KERN_DEFAULT CUT_HERE);
if (warning) {
/* this is a WARN_ON rather than BUG/BUG_ON */
__warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
NULL);
return BUG_TRAP_TYPE_WARN;
}
if (file)
pr_crit("kernel BUG at %s:%u!\n", file, line);
else
pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
(void *)bugaddr);
return BUG_TRAP_TYPE_BUG;
}
enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
enum bug_trap_type ret;
bool rcu = false;
rcu = warn_rcu_enter();
ret = __report_bug(bugaddr, regs);
warn_rcu_exit(rcu);
return ret;
}
static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
{
struct bug_entry *bug;
for (bug = start; bug < end; bug++)
bug->flags &= ~BUGFLAG_DONE;
}
void generic_bug_clear_once(void)
{
#ifdef CONFIG_MODULES
struct module *mod;
rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
clear_once_table(mod->bug_table,
mod->bug_table + mod->num_bugs);
rcu_read_unlock_sched();
#endif
clear_once_table(__start___bug_table, __stop___bug_table);
}
| linux-master | lib/bug.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include "../include/linux/crc32poly.h"
#include "../include/generated/autoconf.h"
#include "crc32defs.h"
#include <inttypes.h>
#define ENTRIES_PER_LINE 4
#if CRC_LE_BITS > 8
# define LE_TABLE_ROWS (CRC_LE_BITS/8)
# define LE_TABLE_SIZE 256
#else
# define LE_TABLE_ROWS 1
# define LE_TABLE_SIZE (1 << CRC_LE_BITS)
#endif
#if CRC_BE_BITS > 8
# define BE_TABLE_ROWS (CRC_BE_BITS/8)
# define BE_TABLE_SIZE 256
#else
# define BE_TABLE_ROWS 1
# define BE_TABLE_SIZE (1 << CRC_BE_BITS)
#endif
static uint32_t crc32table_le[LE_TABLE_ROWS][256];
static uint32_t crc32table_be[BE_TABLE_ROWS][256];
static uint32_t crc32ctable_le[LE_TABLE_ROWS][256];
/**
* crc32init_le() - allocate and initialize LE table data
*
* crc is the crc of the byte i; other entries are filled in based on the
* fact that crctable[i^j] = crctable[i] ^ crctable[j].
*
*/
static void crc32init_le_generic(const uint32_t polynomial,
uint32_t (*tab)[256])
{
unsigned i, j;
uint32_t crc = 1;
tab[0][0] = 0;
for (i = LE_TABLE_SIZE >> 1; i; i >>= 1) {
crc = (crc >> 1) ^ ((crc & 1) ? polynomial : 0);
for (j = 0; j < LE_TABLE_SIZE; j += 2 * i)
tab[0][i + j] = crc ^ tab[0][j];
}
for (i = 0; i < LE_TABLE_SIZE; i++) {
crc = tab[0][i];
for (j = 1; j < LE_TABLE_ROWS; j++) {
crc = tab[0][crc & 0xff] ^ (crc >> 8);
tab[j][i] = crc;
}
}
}
static void crc32init_le(void)
{
crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
}
static void crc32cinit_le(void)
{
crc32init_le_generic(CRC32C_POLY_LE, crc32ctable_le);
}
/**
* crc32init_be() - allocate and initialize BE table data
*/
static void crc32init_be(void)
{
unsigned i, j;
uint32_t crc = 0x80000000;
crc32table_be[0][0] = 0;
for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
for (j = 0; j < i; j++)
crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
}
for (i = 0; i < BE_TABLE_SIZE; i++) {
crc = crc32table_be[0][i];
for (j = 1; j < BE_TABLE_ROWS; j++) {
crc = crc32table_be[0][(crc >> 24) & 0xff] ^ (crc << 8);
crc32table_be[j][i] = crc;
}
}
}
static void output_table(uint32_t (*table)[256], int rows, int len, char *trans)
{
int i, j;
for (j = 0 ; j < rows; j++) {
printf("{");
for (i = 0; i < len - 1; i++) {
if (i % ENTRIES_PER_LINE == 0)
printf("\n");
printf("%s(0x%8.8xL), ", trans, table[j][i]);
}
printf("%s(0x%8.8xL)},\n", trans, table[j][len - 1]);
}
}
int main(int argc, char** argv)
{
printf("/* this file is generated - do not edit */\n\n");
if (CRC_LE_BITS > 1) {
crc32init_le();
printf("static const u32 ____cacheline_aligned "
"crc32table_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32table_le, LE_TABLE_ROWS,
LE_TABLE_SIZE, "tole");
printf("};\n");
}
if (CRC_BE_BITS > 1) {
crc32init_be();
printf("static const u32 ____cacheline_aligned "
"crc32table_be[%d][%d] = {",
BE_TABLE_ROWS, BE_TABLE_SIZE);
output_table(crc32table_be, LE_TABLE_ROWS,
BE_TABLE_SIZE, "tobe");
printf("};\n");
}
if (CRC_LE_BITS > 1) {
crc32cinit_le();
printf("static const u32 ____cacheline_aligned "
"crc32ctable_le[%d][%d] = {",
LE_TABLE_ROWS, LE_TABLE_SIZE);
output_table(crc32ctable_le, LE_TABLE_ROWS,
LE_TABLE_SIZE, "tole");
printf("};\n");
}
return 0;
}
| linux-master | lib/gen_crc32table.c |
// SPDX-License-Identifier: GPL-2.0
/*
* decompress.c
*
* Detect the decompression method based on magic number
*/
#include <linux/decompress/generic.h>
#include <linux/decompress/bunzip2.h>
#include <linux/decompress/unlzma.h>
#include <linux/decompress/unxz.h>
#include <linux/decompress/inflate.h>
#include <linux/decompress/unlzo.h>
#include <linux/decompress/unlz4.h>
#include <linux/decompress/unzstd.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/printk.h>
#ifndef CONFIG_DECOMPRESS_GZIP
# define gunzip NULL
#endif
#ifndef CONFIG_DECOMPRESS_BZIP2
# define bunzip2 NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZMA
# define unlzma NULL
#endif
#ifndef CONFIG_DECOMPRESS_XZ
# define unxz NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZO
# define unlzo NULL
#endif
#ifndef CONFIG_DECOMPRESS_LZ4
# define unlz4 NULL
#endif
#ifndef CONFIG_DECOMPRESS_ZSTD
# define unzstd NULL
#endif
struct compress_format {
unsigned char magic[2];
const char *name;
decompress_fn decompressor;
};
static const struct compress_format compressed_formats[] __initconst = {
{ {0x1f, 0x8b}, "gzip", gunzip },
{ {0x1f, 0x9e}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
{ {0xfd, 0x37}, "xz", unxz },
{ {0x89, 0x4c}, "lzo", unlzo },
{ {0x02, 0x21}, "lz4", unlz4 },
{ {0x28, 0xb5}, "zstd", unzstd },
{ {0, 0}, NULL, NULL }
};
decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
const char **name)
{
const struct compress_format *cf;
if (len < 2) {
if (name)
*name = NULL;
return NULL; /* Need at least this much... */
}
pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
for (cf = compressed_formats; cf->name; cf++) {
if (!memcmp(inbuf, cf->magic, 2))
break;
}
if (name)
*name = cf->name;
return cf->decompressor;
}
| linux-master | lib/decompress.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Facebook
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
{
unsigned depth = sb->depth;
sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
if (!sb->alloc_hint)
return -ENOMEM;
if (depth && !sb->round_robin) {
int i;
for_each_possible_cpu(i)
*per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
}
return 0;
}
static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
unsigned int depth)
{
unsigned hint;
hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
hint = depth ? get_random_u32_below(depth) : 0;
this_cpu_write(*sb->alloc_hint, hint);
}
return hint;
}
static inline void update_alloc_hint_after_get(struct sbitmap *sb,
unsigned int depth,
unsigned int hint,
unsigned int nr)
{
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
this_cpu_write(*sb->alloc_hint, 0);
} else if (nr == hint || unlikely(sb->round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= depth - 1)
hint = 0;
this_cpu_write(*sb->alloc_hint, hint);
}
}
/*
* See if we have deferred clears that we can batch move
*/
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
{
unsigned long mask;
if (!READ_ONCE(map->cleared))
return false;
/*
* First get a stable cleared mask, setting the old mask to 0.
*/
mask = xchg(&map->cleared, 0);
/*
* Now clear the masked bits in our free word
*/
atomic_long_andnot(mask, (atomic_long_t *)&map->word);
BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
return true;
}
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node, bool round_robin,
bool alloc_hint)
{
unsigned int bits_per_word;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
bits_per_word = 1U << shift;
if (bits_per_word > BITS_PER_LONG)
return -EINVAL;
sb->shift = shift;
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
sb->round_robin = round_robin;
if (depth == 0) {
sb->map = NULL;
return 0;
}
if (alloc_hint) {
if (init_alloc_hint(sb, flags))
return -ENOMEM;
} else {
sb->alloc_hint = NULL;
}
sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
if (!sb->map) {
free_percpu(sb->alloc_hint);
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
{
unsigned int bits_per_word = 1U << sb->shift;
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
sbitmap_deferred_clear(&sb->map[i]);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
}
EXPORT_SYMBOL_GPL(sbitmap_resize);
static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
unsigned int hint, bool wrap)
{
int nr;
/* don't wrap if starting from 0 */
wrap = wrap && hint;
while (1) {
nr = find_next_zero_bit(word, depth, hint);
if (unlikely(nr >= depth)) {
/*
* We started with an offset, and we didn't reset the
* offset to 0 in a failure case, so start from 0 to
* exhaust the map.
*/
if (hint && wrap) {
hint = 0;
continue;
}
return -1;
}
if (!test_and_set_bit_lock(nr, word))
break;
hint = nr + 1;
if (hint >= depth - 1)
hint = 0;
}
return nr;
}
static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
unsigned int depth,
unsigned int alloc_hint,
bool wrap)
{
int nr;
do {
nr = __sbitmap_get_word(&map->word, depth,
alloc_hint, wrap);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(map))
break;
} while (1);
return nr;
}
static int sbitmap_find_bit(struct sbitmap *sb,
unsigned int depth,
unsigned int index,
unsigned int alloc_hint,
bool wrap)
{
unsigned int i;
int nr = -1;
for (i = 0; i < sb->map_nr; i++) {
nr = sbitmap_find_bit_in_word(&sb->map[index],
min_t(unsigned int,
__map_depth(sb, index),
depth),
alloc_hint, wrap);
if (nr != -1) {
nr += index << sb->shift;
break;
}
/* Jump to next index. */
alloc_hint = 0;
if (++index >= sb->map_nr)
index = 0;
}
return nr;
}
static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
{
unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
/*
* Unless we're doing round robin tag allocation, just use the
* alloc_hint to find the right word index. No point in looping
* twice in find_next_zero_bit() for that case.
*/
if (sb->round_robin)
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
else
alloc_hint = 0;
return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
!sb->round_robin);
}
int sbitmap_get(struct sbitmap *sb)
{
int nr;
unsigned int hint, depth;
if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
return -1;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
nr = __sbitmap_get(sb, hint);
update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
EXPORT_SYMBOL_GPL(sbitmap_get);
static int __sbitmap_get_shallow(struct sbitmap *sb,
unsigned int alloc_hint,
unsigned long shallow_depth)
{
unsigned int index;
index = SB_NR_TO_INDEX(sb, alloc_hint);
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
}
int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
{
int nr;
unsigned int hint, depth;
if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
return -1;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
update_alloc_hint_after_get(sb, depth, hint, nr);
return nr;
}
EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
bool sbitmap_any_bit_set(const struct sbitmap *sb)
{
unsigned int i;
for (i = 0; i < sb->map_nr; i++) {
if (sb->map[i].word & ~sb->map[i].cleared)
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
unsigned int word_depth = __map_depth(sb, i);
if (set)
weight += bitmap_weight(&word->word, word_depth);
else
weight += bitmap_weight(&word->cleared, word_depth);
}
return weight;
}
static unsigned int sbitmap_cleared(const struct sbitmap *sb)
{
return __sbitmap_weight(sb, false);
}
unsigned int sbitmap_weight(const struct sbitmap *sb)
{
return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
}
EXPORT_SYMBOL_GPL(sbitmap_weight);
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
}
EXPORT_SYMBOL_GPL(sbitmap_show);
static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
{
if ((offset & 0xf) == 0) {
if (offset != 0)
seq_putc(m, '\n');
seq_printf(m, "%08x:", offset);
}
if ((offset & 0x1) == 0)
seq_putc(m, ' ');
seq_printf(m, "%02x", byte);
}
void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
{
u8 byte = 0;
unsigned int byte_bits = 0;
unsigned int offset = 0;
int i;
for (i = 0; i < sb->map_nr; i++) {
unsigned long word = READ_ONCE(sb->map[i].word);
unsigned long cleared = READ_ONCE(sb->map[i].cleared);
unsigned int word_bits = __map_depth(sb, i);
word &= ~cleared;
while (word_bits > 0) {
unsigned int bits = min(8 - byte_bits, word_bits);
byte |= (word & (BIT(bits) - 1)) << byte_bits;
byte_bits += bits;
if (byte_bits == 8) {
emit_byte(m, offset, byte);
byte = 0;
byte_bits = 0;
offset++;
}
word >>= bits;
word_bits -= bits;
}
}
if (byte_bits) {
emit_byte(m, offset, byte);
offset++;
}
if (offset)
seq_putc(m, '\n');
}
EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
unsigned int shallow_depth;
/*
* For each batch, we wake up one queue. We need to make sure that our
* batch size is small enough that the full depth of the bitmap,
* potentially limited by a shallow depth, is enough to wake up all of
* the queues.
*
* Each full word of the bitmap has bits_per_word bits, and there might
* be a partial word. There are depth / bits_per_word full words and
* depth % bits_per_word bits left over. In bitwise arithmetic:
*
* bits_per_word = 1 << shift
* depth / bits_per_word = depth >> shift
* depth % bits_per_word = depth & ((1 << shift) - 1)
*
* Each word can be limited to sbq->min_shallow_depth bits.
*/
shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
depth = ((depth >> sbq->sb.shift) * shallow_depth +
min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
SBQ_WAKE_BATCH);
return wake_batch;
}
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
int shift, bool round_robin, gfp_t flags, int node)
{
int ret;
int i;
ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
round_robin, true);
if (ret)
return ret;
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
atomic_set(&sbq->completion_cnt, 0);
atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
sbitmap_free(&sbq->sb);
return -ENOMEM;
}
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
if (sbq->wake_batch != wake_batch)
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
unsigned int users)
{
unsigned int wake_batch;
unsigned int depth = (sbq->sb.depth + users - 1) / users;
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
1, SBQ_WAKE_BATCH);
WRITE_ONCE(sbq->wake_batch, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
{
sbitmap_queue_update_wake_batch(sbq, depth);
sbitmap_resize(&sbq->sb, depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
return sbitmap_get(&sbq->sb);
}
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
unsigned int *offset)
{
struct sbitmap *sb = &sbq->sb;
unsigned int hint, depth;
unsigned long index, nr;
int i;
if (unlikely(sb->round_robin))
return 0;
depth = READ_ONCE(sb->depth);
hint = update_alloc_hint_before_get(sb, depth);
index = SB_NR_TO_INDEX(sb, hint);
for (i = 0; i < sb->map_nr; i++) {
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
sbitmap_deferred_clear(map);
if (map->word == (1UL << (map_depth - 1)) - 1)
goto next;
nr = find_first_zero_bit(&map->word, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;
get_mask = (get_mask & ~val) >> nr;
if (get_mask) {
*offset = nr + (index << sb->shift);
update_alloc_hint_after_get(sb, depth, hint,
*offset + nr_tags - 1);
return get_mask;
}
}
next:
/* Jump to next index. */
if (++index >= sb->map_nr)
index = 0;
}
return 0;
}
int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
unsigned int shallow_depth)
{
WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
return sbitmap_get_shallow(&sbq->sb, shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
unsigned int min_shallow_depth)
{
sbq->min_shallow_depth = min_shallow_depth;
sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
int i, wake_index, woken;
if (!atomic_read(&sbq->ws_active))
return;
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
/*
* Advance the index before checking the current queue.
* It improves fairness, by ensuring the queue doesn't
* need to be fully emptied before trying to wake up
* from the next one.
*/
wake_index = sbq_index_inc(wake_index);
if (waitqueue_active(&ws->wait)) {
woken = wake_up_nr(&ws->wait, nr);
if (woken == nr)
break;
nr -= woken;
}
}
if (wake_index != atomic_read(&sbq->wake_index))
atomic_set(&sbq->wake_index, wake_index);
}
void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
unsigned int wakeups;
if (!atomic_read(&sbq->ws_active))
return;
atomic_add(nr, &sbq->completion_cnt);
wakeups = atomic_read(&sbq->wakeup_cnt);
do {
if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
return;
} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
&wakeups, wakeups + wake_batch));
__sbitmap_queue_wake_up(sbq, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
if (likely(!sb->round_robin && tag < sb->depth))
data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
}
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
int *tags, int nr_tags)
{
struct sbitmap *sb = &sbq->sb;
unsigned long *addr = NULL;
unsigned long mask = 0;
int i;
smp_mb__before_atomic();
for (i = 0; i < nr_tags; i++) {
const int tag = tags[i] - offset;
unsigned long *this_addr;
/* since we're clearing a batch, skip the deferred map */
this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
if (!addr) {
addr = this_addr;
} else if (addr != this_addr) {
atomic_long_andnot(mask, (atomic_long_t *) addr);
mask = 0;
addr = this_addr;
}
mask |= (1UL << SB_NR_TO_BIT(sb, tag));
}
if (mask)
atomic_long_andnot(mask, (atomic_long_t *) addr);
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq, nr_tags);
sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
tags[nr_tags - 1] - offset);
}
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
/*
* Once the clear bit is set, the bit may be allocated out.
*
* Orders READ/WRITE on the associated instance(such as request
* of blk_mq) by this bit for avoiding race with re-allocation,
* and its pair is the memory barrier implied in __sbitmap_get_word.
*
* One invariant is that the clear bit has to be zero when the bit
* is in use.
*/
smp_mb__before_atomic();
sbitmap_deferred_clear_bit(&sbq->sb, nr);
/*
* Pairs with the memory barrier in set_current_state() to ensure the
* proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
* and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
* waiter. See the comment on waitqueue_active().
*/
smp_mb__after_atomic();
sbitmap_queue_wake_up(sbq, 1);
sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
{
int i, wake_index;
/*
* Pairs with the memory barrier in set_current_state() like in
* sbitmap_queue_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
if (waitqueue_active(&ws->wait))
wake_up(&ws->wait);
wake_index = sbq_index_inc(wake_index);
}
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
{
bool first;
int i;
sbitmap_show(&sbq->sb, m);
seq_puts(m, "alloc_hint={");
first = true;
for_each_possible_cpu(i) {
if (!first)
seq_puts(m, ", ");
first = false;
seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
}
seq_puts(m, "}\n");
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");
seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait)
{
if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active);
add_wait_queue(&ws->wait, &sbq_wait->wait);
}
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
{
list_del_init(&sbq_wait->wait.entry);
if (sbq_wait->sbq) {
atomic_dec(&sbq_wait->sbq->ws_active);
sbq_wait->sbq = NULL;
}
}
EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait, int state)
{
if (!sbq_wait->sbq) {
atomic_inc(&sbq->ws_active);
sbq_wait->sbq = sbq;
}
prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
}
EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait)
{
finish_wait(&ws->wait, &sbq_wait->wait);
if (sbq_wait->sbq) {
atomic_dec(&sbq->ws_active);
sbq_wait->sbq = NULL;
}
}
EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
| linux-master | lib/sbitmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/lib/ctype.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/ctype.h>
#include <linux/compiler.h>
#include <linux/export.h>
const unsigned char _ctype[] = {
_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
EXPORT_SYMBOL(_ctype);
| linux-master | lib/ctype.c |
/*
* lib/parman.c - Manager for linear priority array areas
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/parman.h>
struct parman_algo {
int (*item_add)(struct parman *parman, struct parman_prio *prio,
struct parman_item *item);
void (*item_remove)(struct parman *parman, struct parman_prio *prio,
struct parman_item *item);
};
struct parman {
const struct parman_ops *ops;
void *priv;
const struct parman_algo *algo;
unsigned long count;
unsigned long limit_count;
struct list_head prio_list;
};
static int parman_enlarge(struct parman *parman)
{
unsigned long new_count = parman->limit_count +
parman->ops->resize_step;
int err;
err = parman->ops->resize(parman->priv, new_count);
if (err)
return err;
parman->limit_count = new_count;
return 0;
}
static int parman_shrink(struct parman *parman)
{
unsigned long new_count = parman->limit_count -
parman->ops->resize_step;
int err;
if (new_count < parman->ops->base_count)
return 0;
err = parman->ops->resize(parman->priv, new_count);
if (err)
return err;
parman->limit_count = new_count;
return 0;
}
static bool parman_prio_used(struct parman_prio *prio)
{
return !list_empty(&prio->item_list);
}
static struct parman_item *parman_prio_first_item(struct parman_prio *prio)
{
return list_first_entry(&prio->item_list,
typeof(struct parman_item), list);
}
static unsigned long parman_prio_first_index(struct parman_prio *prio)
{
return parman_prio_first_item(prio)->index;
}
static struct parman_item *parman_prio_last_item(struct parman_prio *prio)
{
return list_last_entry(&prio->item_list,
typeof(struct parman_item), list);
}
static unsigned long parman_prio_last_index(struct parman_prio *prio)
{
return parman_prio_last_item(prio)->index;
}
static unsigned long parman_lsort_new_index_find(struct parman *parman,
struct parman_prio *prio)
{
list_for_each_entry_from_reverse(prio, &parman->prio_list, list) {
if (!parman_prio_used(prio))
continue;
return parman_prio_last_index(prio) + 1;
}
return 0;
}
static void __parman_prio_move(struct parman *parman, struct parman_prio *prio,
struct parman_item *item, unsigned long to_index,
unsigned long count)
{
parman->ops->move(parman->priv, item->index, to_index, count);
}
static void parman_prio_shift_down(struct parman *parman,
struct parman_prio *prio)
{
struct parman_item *item;
unsigned long to_index;
if (!parman_prio_used(prio))
return;
item = parman_prio_first_item(prio);
to_index = parman_prio_last_index(prio) + 1;
__parman_prio_move(parman, prio, item, to_index, 1);
list_move_tail(&item->list, &prio->item_list);
item->index = to_index;
}
static void parman_prio_shift_up(struct parman *parman,
struct parman_prio *prio)
{
struct parman_item *item;
unsigned long to_index;
if (!parman_prio_used(prio))
return;
item = parman_prio_last_item(prio);
to_index = parman_prio_first_index(prio) - 1;
__parman_prio_move(parman, prio, item, to_index, 1);
list_move(&item->list, &prio->item_list);
item->index = to_index;
}
static void parman_prio_item_remove(struct parman *parman,
struct parman_prio *prio,
struct parman_item *item)
{
struct parman_item *last_item;
unsigned long to_index;
last_item = parman_prio_last_item(prio);
if (last_item == item) {
list_del(&item->list);
return;
}
to_index = item->index;
__parman_prio_move(parman, prio, last_item, to_index, 1);
list_del(&last_item->list);
list_replace(&item->list, &last_item->list);
last_item->index = to_index;
}
static int parman_lsort_item_add(struct parman *parman,
struct parman_prio *prio,
struct parman_item *item)
{
struct parman_prio *prio2;
unsigned long new_index;
int err;
if (parman->count + 1 > parman->limit_count) {
err = parman_enlarge(parman);
if (err)
return err;
}
new_index = parman_lsort_new_index_find(parman, prio);
list_for_each_entry_reverse(prio2, &parman->prio_list, list) {
if (prio2 == prio)
break;
parman_prio_shift_down(parman, prio2);
}
item->index = new_index;
list_add_tail(&item->list, &prio->item_list);
parman->count++;
return 0;
}
static void parman_lsort_item_remove(struct parman *parman,
struct parman_prio *prio,
struct parman_item *item)
{
parman_prio_item_remove(parman, prio, item);
list_for_each_entry_continue(prio, &parman->prio_list, list)
parman_prio_shift_up(parman, prio);
parman->count--;
if (parman->limit_count - parman->count >= parman->ops->resize_step)
parman_shrink(parman);
}
static const struct parman_algo parman_lsort = {
.item_add = parman_lsort_item_add,
.item_remove = parman_lsort_item_remove,
};
static const struct parman_algo *parman_algos[] = {
&parman_lsort,
};
/**
* parman_create - creates a new parman instance
* @ops: caller-specific callbacks
* @priv: pointer to a private data passed to the ops
*
* Note: all locking must be provided by the caller.
*
* Each parman instance manages an array area with chunks of entries
* with the same priority. Consider following example:
*
* item 1 with prio 10
* item 2 with prio 10
* item 3 with prio 10
* item 4 with prio 20
* item 5 with prio 20
* item 6 with prio 30
* item 7 with prio 30
* item 8 with prio 30
*
* In this example, there are 3 priority chunks. The order of the priorities
* matters, however the order of items within a single priority chunk does not
* matter. So the same array could be ordered as follows:
*
* item 2 with prio 10
* item 3 with prio 10
* item 1 with prio 10
* item 5 with prio 20
* item 4 with prio 20
* item 7 with prio 30
* item 8 with prio 30
* item 6 with prio 30
*
* The goal of parman is to maintain the priority ordering. The caller
* provides @ops with callbacks parman uses to move the items
* and resize the array area.
*
* Returns a pointer to newly created parman instance in case of success,
* otherwise it returns NULL.
*/
struct parman *parman_create(const struct parman_ops *ops, void *priv)
{
struct parman *parman;
parman = kzalloc(sizeof(*parman), GFP_KERNEL);
if (!parman)
return NULL;
INIT_LIST_HEAD(&parman->prio_list);
parman->ops = ops;
parman->priv = priv;
parman->limit_count = ops->base_count;
parman->algo = parman_algos[ops->algo];
return parman;
}
EXPORT_SYMBOL(parman_create);
/**
* parman_destroy - destroys existing parman instance
* @parman: parman instance
*
* Note: all locking must be provided by the caller.
*/
void parman_destroy(struct parman *parman)
{
WARN_ON(!list_empty(&parman->prio_list));
kfree(parman);
}
EXPORT_SYMBOL(parman_destroy);
/**
* parman_prio_init - initializes a parman priority chunk
* @parman: parman instance
* @prio: parman prio structure to be initialized
* @priority: desired priority of the chunk
*
* Note: all locking must be provided by the caller.
*
* Before caller could add an item with certain priority, he has to
* initialize a priority chunk for it using this function.
*/
void parman_prio_init(struct parman *parman, struct parman_prio *prio,
unsigned long priority)
{
struct parman_prio *prio2;
struct list_head *pos;
INIT_LIST_HEAD(&prio->item_list);
prio->priority = priority;
/* Position inside the list according to priority */
list_for_each(pos, &parman->prio_list) {
prio2 = list_entry(pos, typeof(*prio2), list);
if (prio2->priority > prio->priority)
break;
}
list_add_tail(&prio->list, pos);
}
EXPORT_SYMBOL(parman_prio_init);
/**
* parman_prio_fini - finalizes use of parman priority chunk
* @prio: parman prio structure
*
* Note: all locking must be provided by the caller.
*/
void parman_prio_fini(struct parman_prio *prio)
{
WARN_ON(parman_prio_used(prio));
list_del(&prio->list);
}
EXPORT_SYMBOL(parman_prio_fini);
/**
* parman_item_add - adds a parman item under defined priority
* @parman: parman instance
* @prio: parman prio instance to add the item to
* @item: parman item instance
*
* Note: all locking must be provided by the caller.
*
* Adds item to a array managed by parman instance under the specified priority.
*
* Returns 0 in case of success, negative number to indicate an error.
*/
int parman_item_add(struct parman *parman, struct parman_prio *prio,
struct parman_item *item)
{
return parman->algo->item_add(parman, prio, item);
}
EXPORT_SYMBOL(parman_item_add);
/**
* parman_item_remove - deletes parman item
* @parman: parman instance
* @prio: parman prio instance to delete the item from
* @item: parman item instance
*
* Note: all locking must be provided by the caller.
*/
void parman_item_remove(struct parman *parman, struct parman_prio *prio,
struct parman_item *item)
{
parman->algo->item_remove(parman, prio, item);
}
EXPORT_SYMBOL(parman_item_remove);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <[email protected]>");
MODULE_DESCRIPTION("Priority-based array manager");
| linux-master | lib/parman.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/export.h>
#include <linux/libgcc.h>
word_type notrace __cmpdi2(long long a, long long b)
{
const DWunion au = {
.ll = a
};
const DWunion bu = {
.ll = b
};
if (au.s.high < bu.s.high)
return 0;
else if (au.s.high > bu.s.high)
return 2;
if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
return 0;
else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
return 2;
return 1;
}
EXPORT_SYMBOL(__cmpdi2);
| linux-master | lib/cmpdi2.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/bitmap.h>
/**
* memweight - count the total number of bits set in memory area
* @ptr: pointer to the start of the area
* @bytes: the size of the area
*/
size_t memweight(const void *ptr, size_t bytes)
{
size_t ret = 0;
size_t longs;
const unsigned char *bitmap = ptr;
for (; bytes > 0 && ((unsigned long)bitmap) % sizeof(long);
bytes--, bitmap++)
ret += hweight8(*bitmap);
longs = bytes / sizeof(long);
if (longs) {
BUG_ON(longs >= INT_MAX / BITS_PER_LONG);
ret += bitmap_weight((unsigned long *)bitmap,
longs * BITS_PER_LONG);
bytes -= longs * sizeof(long);
bitmap += longs * sizeof(long);
}
/*
* The reason that this last loop is distinct from the preceding
* bitmap_weight() call is to compute 1-bits in the last region smaller
* than sizeof(long) properly on big-endian systems.
*/
for (; bytes > 0; bytes--, bitmap++)
ret += hweight8(*bitmap);
return ret;
}
EXPORT_SYMBOL(memweight);
| linux-master | lib/memweight.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* bit search implementation
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*
* Copyright (C) 2008 IBM Corporation
* 'find_last_bit' is written by Rusty Russell <[email protected]>
* (Inspired by David Howell's find_next_bit implementation)
*
* Rewritten by Yury Norov <[email protected]> to decrease
* size and improve performance, 2015.
*/
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/swab.h>
/*
* Common helper for find_bit() function family
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
* @size: The bitmap size in bits
*/
#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
({ \
unsigned long idx, val, sz = (size); \
\
for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
val = (FETCH); \
if (val) { \
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
break; \
} \
} \
\
sz; \
})
/*
* Common helper for find_next_bit() function family
* @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
* @MUNGE: The expression that post-processes a word containing found bit (may be empty)
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
*/
#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
({ \
unsigned long mask, idx, tmp, sz = (size), __start = (start); \
\
if (unlikely(__start >= sz)) \
goto out; \
\
mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
idx = __start / BITS_PER_LONG; \
\
for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
if ((idx + 1) * BITS_PER_LONG >= sz) \
goto out; \
idx++; \
} \
\
sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
out: \
sz; \
})
#define FIND_NTH_BIT(FETCH, size, num) \
({ \
unsigned long sz = (size), nr = (num), idx, w, tmp; \
\
for (idx = 0; (idx + 1) * BITS_PER_LONG <= sz; idx++) { \
if (idx * BITS_PER_LONG + nr >= sz) \
goto out; \
\
tmp = (FETCH); \
w = hweight_long(tmp); \
if (w > nr) \
goto found; \
\
nr -= w; \
} \
\
if (sz % BITS_PER_LONG) \
tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
found: \
sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
out: \
sz; \
})
#ifndef find_first_bit
/*
* Find the first set bit in a memory region.
*/
unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
{
return FIND_FIRST_BIT(addr[idx], /* nop */, size);
}
EXPORT_SYMBOL(_find_first_bit);
#endif
#ifndef find_first_and_bit
/*
* Find the first set bit in two memory regions.
*/
unsigned long _find_first_and_bit(const unsigned long *addr1,
const unsigned long *addr2,
unsigned long size)
{
return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
}
EXPORT_SYMBOL(_find_first_and_bit);
#endif
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
*/
unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
}
EXPORT_SYMBOL(_find_first_zero_bit);
#endif
#ifndef find_next_bit
unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
{
return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
}
EXPORT_SYMBOL(_find_next_bit);
#endif
unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
{
return FIND_NTH_BIT(addr[idx], size, n);
}
EXPORT_SYMBOL(__find_nth_bit);
unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
return FIND_NTH_BIT(addr1[idx] & addr2[idx], size, n);
}
EXPORT_SYMBOL(__find_nth_and_bit);
unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long size, unsigned long n)
{
return FIND_NTH_BIT(addr1[idx] & ~addr2[idx], size, n);
}
EXPORT_SYMBOL(__find_nth_andnot_bit);
unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1,
const unsigned long *addr2,
const unsigned long *addr3,
unsigned long size, unsigned long n)
{
return FIND_NTH_BIT(addr1[idx] & addr2[idx] & ~addr3[idx], size, n);
}
EXPORT_SYMBOL(__find_nth_and_andnot_bit);
#ifndef find_next_and_bit
unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start)
{
return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
}
EXPORT_SYMBOL(_find_next_and_bit);
#endif
#ifndef find_next_andnot_bit
unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start)
{
return FIND_NEXT_BIT(addr1[idx] & ~addr2[idx], /* nop */, nbits, start);
}
EXPORT_SYMBOL(_find_next_andnot_bit);
#endif
#ifndef find_next_or_bit
unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
unsigned long nbits, unsigned long start)
{
return FIND_NEXT_BIT(addr1[idx] | addr2[idx], /* nop */, nbits, start);
}
EXPORT_SYMBOL(_find_next_or_bit);
#endif
#ifndef find_next_zero_bit
unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
unsigned long start)
{
return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
}
EXPORT_SYMBOL(_find_next_zero_bit);
#endif
#ifndef find_last_bit
unsigned long _find_last_bit(const unsigned long *addr, unsigned long size)
{
if (size) {
unsigned long val = BITMAP_LAST_WORD_MASK(size);
unsigned long idx = (size-1) / BITS_PER_LONG;
do {
val &= addr[idx];
if (val)
return idx * BITS_PER_LONG + __fls(val);
val = ~0ul;
} while (idx--);
}
return size;
}
EXPORT_SYMBOL(_find_last_bit);
#endif
unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
unsigned long size, unsigned long offset)
{
offset = find_next_bit(addr, size, offset);
if (offset == size)
return size;
offset = round_down(offset, 8);
*clump = bitmap_get_value8(addr, offset);
return offset;
}
EXPORT_SYMBOL(find_next_clump8);
#ifdef __BIG_ENDIAN
#ifndef find_first_zero_bit_le
/*
* Find the first cleared bit in an LE memory region.
*/
unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size)
{
return FIND_FIRST_BIT(~addr[idx], swab, size);
}
EXPORT_SYMBOL(_find_first_zero_bit_le);
#endif
#ifndef find_next_zero_bit_le
unsigned long _find_next_zero_bit_le(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
return FIND_NEXT_BIT(~addr[idx], swab, size, offset);
}
EXPORT_SYMBOL(_find_next_zero_bit_le);
#endif
#ifndef find_next_bit_le
unsigned long _find_next_bit_le(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
return FIND_NEXT_BIT(addr[idx], swab, size, offset);
}
EXPORT_SYMBOL(_find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */
| linux-master | lib/find_bit.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/bitmap.h>
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/xarray.h>
/**
* idr_alloc_u32() - Allocate an ID.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @nextid: Pointer to an ID.
* @max: The maximum ID to allocate (inclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @nextid and @max.
* Note that @max is inclusive whereas the @end parameter to idr_alloc()
* is exclusive. The new ID is assigned to @nextid before the pointer
* is inserted into the IDR, so if @nextid points into the object pointed
* to by @ptr, a concurrent lookup will not find an uninitialised ID.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found. If an error occurred,
* @nextid is unchanged.
*/
int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
unsigned long max, gfp_t gfp)
{
struct radix_tree_iter iter;
void __rcu **slot;
unsigned int base = idr->idr_base;
unsigned int id = *nextid;
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
id = (id < base) ? 0 : id - base;
radix_tree_iter_init(&iter, id);
slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
if (IS_ERR(slot))
return PTR_ERR(slot);
*nextid = iter.index + base;
/* there is a memory barrier inside radix_tree_iter_replace() */
radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
return 0;
}
EXPORT_SYMBOL_GPL(idr_alloc_u32);
/**
* idr_alloc() - Allocate an ID.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @start: The minimum ID (inclusive).
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: The newly allocated ID, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found.
*/
int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
u32 id = start;
int ret;
if (WARN_ON_ONCE(start < 0))
return -EINVAL;
ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
if (ret)
return ret;
return id;
}
EXPORT_SYMBOL_GPL(idr_alloc);
/**
* idr_alloc_cyclic() - Allocate an ID cyclically.
* @idr: IDR handle.
* @ptr: Pointer to be associated with the new ID.
* @start: The minimum ID (inclusive).
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
* Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
* wrap around to @start if no free IDs are found before reaching @end.
*
* The caller should provide their own locking to ensure that two
* concurrent modifications to the IDR are not possible. Read-only
* accesses to the IDR may be done under the RCU read lock or may
* exclude simultaneous writers.
*
* Return: The newly allocated ID, -ENOMEM if memory allocation failed,
* or -ENOSPC if no free IDs could be found.
*/
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
u32 id = idr->idr_next;
int err, max = end > 0 ? end - 1 : INT_MAX;
if ((int)id < start)
id = start;
err = idr_alloc_u32(idr, ptr, &id, max, gfp);
if ((err == -ENOSPC) && (id > start)) {
id = start;
err = idr_alloc_u32(idr, ptr, &id, max, gfp);
}
if (err)
return err;
idr->idr_next = id + 1;
return id;
}
EXPORT_SYMBOL(idr_alloc_cyclic);
/**
* idr_remove() - Remove an ID from the IDR.
* @idr: IDR handle.
* @id: Pointer ID.
*
* Removes this ID from the IDR. If the ID was not previously in the IDR,
* this function returns %NULL.
*
* Since this function modifies the IDR, the caller should provide their
* own locking to ensure that concurrent modification of the same IDR is
* not possible.
*
* Return: The pointer formerly associated with this ID.
*/
void *idr_remove(struct idr *idr, unsigned long id)
{
return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
}
EXPORT_SYMBOL_GPL(idr_remove);
/**
* idr_find() - Return pointer for given ID.
* @idr: IDR handle.
* @id: Pointer ID.
*
* Looks up the pointer associated with this ID. A %NULL pointer may
* indicate that @id is not allocated or that the %NULL pointer was
* associated with this ID.
*
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*
* Return: The pointer associated with this ID.
*/
void *idr_find(const struct idr *idr, unsigned long id)
{
return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
}
EXPORT_SYMBOL_GPL(idr_find);
/**
* idr_for_each() - Iterate through all stored pointers.
* @idr: IDR handle.
* @fn: Function to be called for each pointer.
* @data: Data passed to callback function.
*
* The callback function will be called for each entry in @idr, passing
* the ID, the entry and @data.
*
* If @fn returns anything other than %0, the iteration stops and that
* value is returned from this function.
*
* idr_for_each() can be called concurrently with idr_alloc() and
* idr_remove() if protected by RCU. Newly added entries may not be
* seen and deleted entries may be seen, but adding and removing entries
* will not cause other entries to be skipped, nor spurious ones to be seen.
*/
int idr_for_each(const struct idr *idr,
int (*fn)(int id, void *p, void *data), void *data)
{
struct radix_tree_iter iter;
void __rcu **slot;
int base = idr->idr_base;
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
int ret;
unsigned long id = iter.index + base;
if (WARN_ON_ONCE(id > INT_MAX))
break;
ret = fn(id, rcu_dereference_raw(*slot), data);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(idr_for_each);
/**
* idr_get_next_ul() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
* Returns the next populated entry in the tree with an ID greater than
* or equal to the value pointed to by @nextid. On exit, @nextid is updated
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
void *entry = NULL;
unsigned long base = idr->idr_base;
unsigned long id = *nextid;
id = (id < base) ? 0 : id - base;
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
entry = rcu_dereference_raw(*slot);
if (!entry)
continue;
if (!xa_is_internal(entry))
break;
if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
break;
slot = radix_tree_iter_retry(&iter);
}
if (!slot)
return NULL;
*nextid = iter.index + base;
return entry;
}
EXPORT_SYMBOL(idr_get_next_ul);
/**
* idr_get_next() - Find next populated entry.
* @idr: IDR handle.
* @nextid: Pointer to an ID.
*
* Returns the next populated entry in the tree with an ID greater than
* or equal to the value pointed to by @nextid. On exit, @nextid is updated
* to the ID of the found value. To use in a loop, the value pointed to by
* nextid must be incremented by the user.
*/
void *idr_get_next(struct idr *idr, int *nextid)
{
unsigned long id = *nextid;
void *entry = idr_get_next_ul(idr, &id);
if (WARN_ON_ONCE(id > INT_MAX))
return NULL;
*nextid = id;
return entry;
}
EXPORT_SYMBOL(idr_get_next);
/**
* idr_replace() - replace pointer for given ID.
* @idr: IDR handle.
* @ptr: New pointer to associate with the ID.
* @id: ID to change.
*
* Replace the pointer registered with an ID and return the old value.
* This function can be called under the RCU read lock concurrently with
* idr_alloc() and idr_remove() (as long as the ID being removed is not
* the one being replaced!).
*
* Returns: the old value on success. %-ENOENT indicates that @id was not
* found. %-EINVAL indicates that @ptr was not valid.
*/
void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
{
struct radix_tree_node *node;
void __rcu **slot = NULL;
void *entry;
id -= idr->idr_base;
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
return ERR_PTR(-ENOENT);
__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
return entry;
}
EXPORT_SYMBOL(idr_replace);
/**
* DOC: IDA description
*
* The IDA is an ID allocator which does not provide the ability to
* associate an ID with a pointer. As such, it only needs to store one
* bit per ID, and so is more space efficient than an IDR. To use an IDA,
* define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
* then initialise it using ida_init()). To allocate a new ID, call
* ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
* To free an ID, call ida_free().
*
* ida_destroy() can be used to dispose of an IDA without needing to
* free the individual IDs in it. You can use ida_is_empty() to find
* out whether the IDA has any IDs currently allocated.
*
* The IDA handles its own locking. It is safe to call any of the IDA
* functions without synchronisation in your code.
*
* IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
* limitation, it should be quite straightforward to raise the maximum.
*/
/*
* Developer's notes:
*
* The IDA uses the functionality provided by the XArray to store bitmaps in
* each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
* have been set.
*
* I considered telling the XArray that each slot is an order-10 node
* and indexing by bit number, but the XArray can't allow a single multi-index
* entry in the head, which would significantly increase memory consumption
* for the IDA. So instead we divide the index by the number of bits in the
* leaf bitmap before doing a radix tree lookup.
*
* As an optimisation, if there are only a few low bits set in any given
* leaf, instead of allocating a 128-byte bitmap, we store the bits
* as a value entry. Value entries never have the XA_FREE_MARK cleared
* because we can always convert them into a bitmap entry.
*
* It would be possible to optimise further; once we've run out of a
* single 128-byte bitmap, we currently switch to a 576-byte node, put
* the 128-byte bitmap in the first entry and then start allocating extra
* 128-byte entries. We could instead use the 512 bytes of the node's
* data as a bitmap before moving to that scheme. I do not believe this
* is a worthwhile optimisation; Rasmus Villemoes surveyed the current
* users of the IDA and almost none of them use more than 1024 entries.
* Those that do use more than the 8192 IDs that the 512 bytes would
* provide.
*
* The IDA always uses a lock to alloc/free. If we add a 'test_bit'
* equivalent, it will still need locking. Going to RCU lookup would require
* using RCU to free bitmaps, and that's not trivial without embedding an
* RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
* bitmap, which is excessive.
*/
/**
* ida_alloc_range() - Allocate an unused ID.
* @ida: IDA handle.
* @min: Lowest ID to allocate.
* @max: Highest ID to allocate.
* @gfp: Memory allocation flags.
*
* Allocate an ID between @min and @max, inclusive. The allocated ID will
* not exceed %INT_MAX, even if @max is larger.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
* or %-ENOSPC if there are no free IDs.
*/
int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
gfp_t gfp)
{
XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
unsigned bit = min % IDA_BITMAP_BITS;
unsigned long flags;
struct ida_bitmap *bitmap, *alloc = NULL;
if ((int)min < 0)
return -ENOSPC;
if ((int)max < 0)
max = INT_MAX;
retry:
xas_lock_irqsave(&xas, flags);
next:
bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
if (xas.xa_index > min / IDA_BITMAP_BITS)
bit = 0;
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc;
if (xa_is_value(bitmap)) {
unsigned long tmp = xa_to_value(bitmap);
if (bit < BITS_PER_XA_VALUE) {
bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc;
if (bit < BITS_PER_XA_VALUE) {
tmp |= 1UL << bit;
xas_store(&xas, xa_mk_value(tmp));
goto out;
}
}
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
goto alloc;
bitmap->bitmap[0] = tmp;
xas_store(&xas, bitmap);
if (xas_error(&xas)) {
bitmap->bitmap[0] = 0;
goto out;
}
}
if (bitmap) {
bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
goto nospc;
if (bit == IDA_BITMAP_BITS)
goto next;
__set_bit(bit, bitmap->bitmap);
if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
if (bit < BITS_PER_XA_VALUE) {
bitmap = xa_mk_value(1UL << bit);
} else {
bitmap = alloc;
if (!bitmap)
bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
if (!bitmap)
goto alloc;
__set_bit(bit, bitmap->bitmap);
}
xas_store(&xas, bitmap);
}
out:
xas_unlock_irqrestore(&xas, flags);
if (xas_nomem(&xas, gfp)) {
xas.xa_index = min / IDA_BITMAP_BITS;
bit = min % IDA_BITMAP_BITS;
goto retry;
}
if (bitmap != alloc)
kfree(alloc);
if (xas_error(&xas))
return xas_error(&xas);
return xas.xa_index * IDA_BITMAP_BITS + bit;
alloc:
xas_unlock_irqrestore(&xas, flags);
alloc = kzalloc(sizeof(*bitmap), gfp);
if (!alloc)
return -ENOMEM;
xas_set(&xas, min / IDA_BITMAP_BITS);
bit = min % IDA_BITMAP_BITS;
goto retry;
nospc:
xas_unlock_irqrestore(&xas, flags);
kfree(alloc);
return -ENOSPC;
}
EXPORT_SYMBOL(ida_alloc_range);
/**
* ida_free() - Release an allocated ID.
* @ida: IDA handle.
* @id: Previously allocated ID.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
*/
void ida_free(struct ida *ida, unsigned int id)
{
XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
unsigned bit = id % IDA_BITMAP_BITS;
struct ida_bitmap *bitmap;
unsigned long flags;
if ((int)id < 0)
return;
xas_lock_irqsave(&xas, flags);
bitmap = xas_load(&xas);
if (xa_is_value(bitmap)) {
unsigned long v = xa_to_value(bitmap);
if (bit >= BITS_PER_XA_VALUE)
goto err;
if (!(v & (1UL << bit)))
goto err;
v &= ~(1UL << bit);
if (!v)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
if (!test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
kfree(bitmap);
delete:
xas_store(&xas, NULL);
}
}
xas_unlock_irqrestore(&xas, flags);
return;
err:
xas_unlock_irqrestore(&xas, flags);
WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
}
EXPORT_SYMBOL(ida_free);
/**
* ida_destroy() - Free all IDs.
* @ida: IDA handle.
*
* Calling this function frees all IDs and releases all resources used
* by an IDA. When this call returns, the IDA is empty and can be reused
* or freed. If the IDA is already empty, there is no need to call this
* function.
*
* Context: Any context. It is safe to call this function without
* locking in your code.
*/
void ida_destroy(struct ida *ida)
{
XA_STATE(xas, &ida->xa, 0);
struct ida_bitmap *bitmap;
unsigned long flags;
xas_lock_irqsave(&xas, flags);
xas_for_each(&xas, bitmap, ULONG_MAX) {
if (!xa_is_value(bitmap))
kfree(bitmap);
xas_store(&xas, NULL);
}
xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(ida_destroy);
#ifndef __KERNEL__
extern void xa_dump_index(unsigned long index, unsigned int shift);
#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
static void ida_dump_entry(void *entry, unsigned long index)
{
unsigned long i;
if (!entry)
return;
if (xa_is_node(entry)) {
struct xa_node *node = xa_to_node(entry);
unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
XA_CHUNK_SHIFT;
xa_dump_index(index * IDA_BITMAP_BITS, shift);
xa_dump_node(node);
for (i = 0; i < XA_CHUNK_SIZE; i++)
ida_dump_entry(node->slots[i],
index | (i << node->shift));
} else if (xa_is_value(entry)) {
xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
} else {
struct ida_bitmap *bitmap = entry;
xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
pr_cont("bitmap: %p data", bitmap);
for (i = 0; i < IDA_BITMAP_LONGS; i++)
pr_cont(" %lx", bitmap->bitmap[i]);
pr_cont("\n");
}
}
static void ida_dump(struct ida *ida)
{
struct xarray *xa = &ida->xa;
pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
xa->xa_flags >> ROOT_TAG_SHIFT);
ida_dump_entry(xa->xa_head, 0);
}
#endif
| linux-master | lib/idr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/export.h>
#include <linux/libgcc.h>
#define W_TYPE_SIZE 32
#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
/* If we still don't have umul_ppmm, define it using plain C. */
#if !defined(umul_ppmm)
#define umul_ppmm(w1, w0, u, v) \
do { \
unsigned long __x0, __x1, __x2, __x3; \
unsigned short __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart(u); \
__uh = __ll_highpart(u); \
__vl = __ll_lowpart(v); \
__vh = __ll_highpart(v); \
\
__x0 = (unsigned long) __ul * __vl; \
__x1 = (unsigned long) __ul * __vh; \
__x2 = (unsigned long) __uh * __vl; \
__x3 = (unsigned long) __uh * __vh; \
\
__x1 += __ll_highpart(__x0); /* this can't give carry */\
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos */ \
\
(w1) = __x3 + __ll_highpart(__x1); \
(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
} while (0)
#endif
#if !defined(__umulsidi3)
#define __umulsidi3(u, v) ({ \
DWunion __w; \
umul_ppmm(__w.s.high, __w.s.low, u, v); \
__w.ll; \
})
#endif
long long notrace __muldi3(long long u, long long v)
{
const DWunion uu = {.ll = u};
const DWunion vv = {.ll = v};
DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high
+ (unsigned long) uu.s.high * (unsigned long) vv.s.low);
return w.ll;
}
EXPORT_SYMBOL(__muldi3);
| linux-master | lib/muldi3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack depot - a stack trace storage that avoids duplication.
*
* Internally, stack depot maintains a hash table of unique stacktraces. The
* stack traces themselves are stored contiguously one after another in a set
* of separate page allocations.
*
* Author: Alexander Potapenko <[email protected]>
* Copyright (C) 2016 Google, Inc.
*
* Based on the code by Dmitry Chernenkov.
*/
#define pr_fmt(fmt) "stackdepot: " fmt
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
#define DEPOT_VALID_BITS 1
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
#define DEPOT_STACK_ALIGN 4
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
#define DEPOT_POOLS_CAP 8192
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
/* Compact structure that stores a reference to a stack. */
union handle_parts {
depot_stack_handle_t handle;
struct {
u32 pool_index : DEPOT_POOL_INDEX_BITS;
u32 offset : DEPOT_OFFSET_BITS;
u32 valid : DEPOT_VALID_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
struct stack_record *next; /* Link in the hash table */
u32 hash; /* Hash in the hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
unsigned long entries[]; /* Variable-sized array of frames */
};
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
/* Use one hash table bucket per 16 KB of memory. */
#define STACK_HASH_TABLE_SCALE 14
/* Limit the number of buckets between 4K and 1M. */
#define STACK_BUCKET_NUMBER_ORDER_MIN 12
#define STACK_BUCKET_NUMBER_ORDER_MAX 20
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
/* Hash table of pointers to stored stack traces. */
static struct stack_record **stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */
static void *stack_pools[DEPOT_MAX_POOLS];
/* Currently used pool in stack_pools. */
static int pool_index;
/* Offset to the unused space in the currently used pool. */
static size_t pool_offset;
/* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/*
* Stack depot tries to keep an extra pool allocated even before it runs out
* of space in the currently used pool.
* This flag marks that this next extra pool needs to be allocated and
* initialized. It has the value 0 when either the next pool is not yet
* initialized or the limit on the number of pools is reached.
*/
static int next_pool_required = 1;
static int __init disable_stack_depot(char *str)
{
int ret;
ret = kstrtobool(str, &stack_depot_disabled);
if (!ret && stack_depot_disabled) {
pr_info("disabled\n");
stack_table = NULL;
}
return 0;
}
early_param("stack_depot_disable", disable_stack_depot);
void __init stack_depot_request_early_init(void)
{
/* Too late to request early init now. */
WARN_ON(__stack_depot_early_init_passed);
__stack_depot_early_init_requested = true;
}
/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
unsigned long entries = 0;
/* This function must be called only once, from mm_init(). */
if (WARN_ON(__stack_depot_early_init_passed))
return 0;
__stack_depot_early_init_passed = true;
/*
* If KASAN is enabled, use the maximum order: KASAN is frequently used
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
*/
if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
/*
* If stack_bucket_number_order is not set, leave entries as 0 to rely
* on the automatic calculations performed by alloc_large_system_hash.
*/
if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *),
entries,
STACK_HASH_TABLE_SCALE,
HASH_EARLY | HASH_ZERO,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
return -ENOMEM;
}
return 0;
}
/* Allocates a hash table via kvcalloc. Can be used after boot. */
int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
unsigned long entries;
int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (stack_depot_disabled || stack_table)
goto out_unlock;
/*
* Similarly to stack_depot_early_init, use stack_bucket_number_order
* if assigned, and rely on automatic scaling otherwise.
*/
if (stack_bucket_number_order) {
entries = 1UL << stack_bucket_number_order;
} else {
int scale = STACK_HASH_TABLE_SCALE;
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
if (scale > PAGE_SHIFT)
entries >>= (scale - PAGE_SHIFT);
else
entries <<= (PAGE_SHIFT - scale);
}
if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
ret = -ENOMEM;
goto out_unlock;
}
stack_hash_mask = entries - 1;
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
/* Uses preallocated memory to initialize a new stack depot pool. */
static void depot_init_pool(void **prealloc)
{
/*
* If the next pool is already initialized or the maximum number of
* pools is reached, do not use the preallocated memory.
* smp_load_acquire() here pairs with smp_store_release() below and
* in depot_alloc_stack().
*/
if (!smp_load_acquire(&next_pool_required))
return;
/* Check if the current pool is not yet allocated. */
if (stack_pools[pool_index] == NULL) {
/* Use the preallocated memory for the current pool. */
stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
} else {
/*
* Otherwise, use the preallocated memory for the next pool
* as long as we do not exceed the maximum number of pools.
*/
if (pool_index + 1 < DEPOT_MAX_POOLS) {
stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
* At this point, either the next pool is initialized or the
* maximum number of pools is reached. In either case, take
* note that initializing another pool is not required.
* This smp_store_release pairs with smp_load_acquire() above
* and in stack_depot_save().
*/
smp_store_release(&next_pool_required, 0);
}
}
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
/* Check if there is not enough space in the current pool. */
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
/* Bail out if we reached the pool limit. */
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
/*
* Move on to the next pool.
* WRITE_ONCE pairs with potential concurrent read in
* stack_depot_fetch().
*/
WRITE_ONCE(pool_index, pool_index + 1);
pool_offset = 0;
/*
* If the maximum number of pools is not reached, take note
* that the next pool needs to initialized.
* smp_store_release() here pairs with smp_load_acquire() in
* stack_depot_save() and depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_required, 1);
}
/* Assign the preallocated memory to a pool if required. */
if (*prealloc)
depot_init_pool(prealloc);
/* Check if we have a pool to save the stack trace. */
if (stack_pools[pool_index] == NULL)
return NULL;
/* Save the stack trace. */
stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
stack->size = size;
stack->handle.pool_index = pool_index;
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.valid = 1;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
pool_offset += required_size;
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
kmsan_unpoison_memory(stack, required_size);
return stack;
}
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
array_size(size, sizeof(*entries)) / sizeof(u32),
STACK_HASH_SEED);
}
/*
* Non-instrumented version of memcmp().
* Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
unsigned int n)
{
for ( ; n-- ; u1++, u2++) {
if (*u1 != *u2)
return 1;
}
return 0;
}
/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
u32 hash)
{
struct stack_record *found;
for (found = bucket; found; found = found->next) {
if (found->hash == hash &&
found->size == size &&
!stackdepot_memcmp(entries, found->entries, size))
return found;
}
return NULL;
}
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
union handle_parts retval = { .handle = 0 };
struct page *page = NULL;
void *prealloc = NULL;
unsigned long flags;
u32 hash;
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth.
*
* Since use of filter_irq_stacks() is a requirement to ensure stack
* depot can efficiently deduplicate interrupt stacks, always
* filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
/*
* Fast path: look the stack trace up without locking.
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found)
goto exit;
/*
* Check if another stack pool needs to be initialized. If so, allocate
* the memory now - we won't be able to do that under the lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
*/
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
* contexts and I/O.
*/
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
raw_spin_lock_irqsave(&pool_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
/*
* This smp_store_release() pairs with
* smp_load_acquire() from |bucket| above.
*/
smp_store_release(bucket, new);
found = new;
}
} else if (prealloc) {
/*
* Stack depot already contains this stack trace, but let's
* keep the preallocated memory for the future.
*/
depot_init_pool(&prealloc);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
retval.handle = found->handle.handle;
fast_exit:
return retval.handle;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
/*
* READ_ONCE pairs with potential concurrent write in
* depot_alloc_stack.
*/
int pool_index_cached = READ_ONCE(pool_index);
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
*entries = NULL;
/*
* Let KMSAN know *entries is initialized. This shall prevent false
* positive reports if instrumented code accesses it.
*/
kmsan_unpoison_memory(entries, sizeof(*entries));
if (!handle)
return 0;
if (parts.pool_index > pool_index_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pool_index_cached, handle);
return 0;
}
pool = stack_pools[parts.pool_index];
if (!pool)
return 0;
stack = pool + offset;
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(stack, &entries);
if (nr_entries > 0)
stack_trace_print(entries, nr_entries, 0);
}
EXPORT_SYMBOL_GPL(stack_depot_print);
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(handle, &entries);
return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
spaces) : 0;
}
EXPORT_SYMBOL_GPL(stack_depot_snprint);
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits)
{
union handle_parts parts = { .handle = handle };
/* Don't set extra bits on empty handles. */
if (!handle)
return 0;
parts.extra = extra_bits;
return parts.handle;
}
EXPORT_SYMBOL(stack_depot_set_extra_bits);
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{
union handle_parts parts = { .handle = handle };
return parts.extra;
}
EXPORT_SYMBOL(stack_depot_get_extra_bits);
| linux-master | lib/stackdepot.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Test cases for bitfield helpers.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/bitfield.h>
#define CHECK_ENC_GET_U(tp, v, field, res) do { \
{ \
u##tp _res; \
\
_res = u##tp##_encode_bits(v, field); \
KUNIT_ASSERT_FALSE_MSG(context, _res != res, \
"u" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != " #res "\n", \
(u64)_res); \
KUNIT_ASSERT_FALSE(context, \
u##tp##_get_bits(_res, field) != v); \
} \
} while (0)
#define CHECK_ENC_GET_LE(tp, v, field, res) do { \
{ \
__le##tp _res; \
\
_res = le##tp##_encode_bits(v, field); \
KUNIT_ASSERT_FALSE_MSG(context, \
_res != cpu_to_le##tp(res), \
"le" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx",\
(u64)le##tp##_to_cpu(_res), \
(u64)(res)); \
KUNIT_ASSERT_FALSE(context, \
le##tp##_get_bits(_res, field) != v);\
} \
} while (0)
#define CHECK_ENC_GET_BE(tp, v, field, res) do { \
{ \
__be##tp _res; \
\
_res = be##tp##_encode_bits(v, field); \
KUNIT_ASSERT_FALSE_MSG(context, \
_res != cpu_to_be##tp(res), \
"be" #tp "_encode_bits(" #v ", " #field ") is 0x%llx != 0x%llx", \
(u64)be##tp##_to_cpu(_res), \
(u64)(res)); \
KUNIT_ASSERT_FALSE(context, \
be##tp##_get_bits(_res, field) != v);\
} \
} while (0)
#define CHECK_ENC_GET(tp, v, field, res) do { \
CHECK_ENC_GET_U(tp, v, field, res); \
CHECK_ENC_GET_LE(tp, v, field, res); \
CHECK_ENC_GET_BE(tp, v, field, res); \
} while (0)
static void __init test_bitfields_constants(struct kunit *context)
{
/*
* NOTE
* This whole function compiles (or at least should, if everything
* is going according to plan) to nothing after optimisation.
*/
CHECK_ENC_GET(16, 1, 0x000f, 0x0001);
CHECK_ENC_GET(16, 3, 0x00f0, 0x0030);
CHECK_ENC_GET(16, 5, 0x0f00, 0x0500);
CHECK_ENC_GET(16, 7, 0xf000, 0x7000);
CHECK_ENC_GET(16, 14, 0x000f, 0x000e);
CHECK_ENC_GET(16, 15, 0x00f0, 0x00f0);
CHECK_ENC_GET_U(8, 1, 0x0f, 0x01);
CHECK_ENC_GET_U(8, 3, 0xf0, 0x30);
CHECK_ENC_GET_U(8, 14, 0x0f, 0x0e);
CHECK_ENC_GET_U(8, 15, 0xf0, 0xf0);
CHECK_ENC_GET(32, 1, 0x00000f00, 0x00000100);
CHECK_ENC_GET(32, 3, 0x0000f000, 0x00003000);
CHECK_ENC_GET(32, 5, 0x000f0000, 0x00050000);
CHECK_ENC_GET(32, 7, 0x00f00000, 0x00700000);
CHECK_ENC_GET(32, 14, 0x0f000000, 0x0e000000);
CHECK_ENC_GET(32, 15, 0xf0000000, 0xf0000000);
CHECK_ENC_GET(64, 1, 0x00000f0000000000ull, 0x0000010000000000ull);
CHECK_ENC_GET(64, 3, 0x0000f00000000000ull, 0x0000300000000000ull);
CHECK_ENC_GET(64, 5, 0x000f000000000000ull, 0x0005000000000000ull);
CHECK_ENC_GET(64, 7, 0x00f0000000000000ull, 0x0070000000000000ull);
CHECK_ENC_GET(64, 14, 0x0f00000000000000ull, 0x0e00000000000000ull);
CHECK_ENC_GET(64, 15, 0xf000000000000000ull, 0xf000000000000000ull);
}
#define CHECK(tp, mask) do { \
u64 v; \
\
for (v = 0; v < 1 << hweight32(mask); v++) \
KUNIT_ASSERT_FALSE(context, \
tp##_encode_bits(v, mask) != v << __ffs64(mask));\
} while (0)
static void __init test_bitfields_variables(struct kunit *context)
{
CHECK(u8, 0x0f);
CHECK(u8, 0xf0);
CHECK(u8, 0x38);
CHECK(u16, 0x0038);
CHECK(u16, 0x0380);
CHECK(u16, 0x3800);
CHECK(u16, 0x8000);
CHECK(u32, 0x80000000);
CHECK(u32, 0x7f000000);
CHECK(u32, 0x07e00000);
CHECK(u32, 0x00018000);
CHECK(u64, 0x8000000000000000ull);
CHECK(u64, 0x7f00000000000000ull);
CHECK(u64, 0x0001800000000000ull);
CHECK(u64, 0x0000000080000000ull);
CHECK(u64, 0x000000007f000000ull);
CHECK(u64, 0x0000000018000000ull);
CHECK(u64, 0x0000001f8000000ull);
}
#ifdef TEST_BITFIELD_COMPILE
static void __init test_bitfields_compile(struct kunit *context)
{
/* these should fail compilation */
CHECK_ENC_GET(16, 16, 0x0f00, 0x1000);
u32_encode_bits(7, 0x06000000);
/* this should at least give a warning */
u16_encode_bits(0, 0x60000);
}
#endif
static struct kunit_case __refdata bitfields_test_cases[] = {
KUNIT_CASE(test_bitfields_constants),
KUNIT_CASE(test_bitfields_variables),
{}
};
static struct kunit_suite bitfields_test_suite = {
.name = "bitfields",
.test_cases = bitfields_test_cases,
};
kunit_test_suites(&bitfields_test_suite);
MODULE_AUTHOR("Johannes Berg <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/bitfield_kunit.c |
/* Small bzip2 deflate implementation, by Rob Landley ([email protected]).
Based on bzip2 decompression code by Julian R Seward ([email protected]),
which also acknowledges contributions by Mike Burrows, David Wheeler,
Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
Robert Sedgewick, and Jon L. Bentley.
This code is licensed under the LGPLv2:
LGPL (http://www.gnu.org/copyleft/lgpl.html
*/
/*
Size and speed optimizations by Manuel Novoa III ([email protected]).
More efficient reading of Huffman codes, a streamlined read_bunzip()
function, and various other tweaks. In (limited) tests, approximately
20% faster than bzcat on x86 and about 10% faster on arm.
Note that about 2/3 of the time is spent in read_unzip() reversing
the Burrows-Wheeler transformation. Much of that time is delay
resulting from cache misses.
I would ask that anyone benefiting from this work, especially those
using it in commercial products, consider making a donation to my local
non-profit hospice organization in the name of the woman I loved, who
passed away Feb. 12, 2003.
In memory of Toni W. Hagan
Hospice of Acadiana, Inc.
2600 Johnston St., Suite 200
Lafayette, LA 70503-3240
Phone (337) 232-1234 or 1-800-738-2226
Fax (337) 232-1297
https://www.hospiceacadiana.com/
Manuel
*/
/*
Made it fit for running in Linux Kernel by Alain Knaff ([email protected])
*/
#ifdef STATIC
#define PREBOOT
#else
#include <linux/decompress/bunzip2.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#include <linux/crc32poly.h>
#ifndef INT_MAX
#define INT_MAX 0x7fffffff
#endif
/* Constants for Huffman coding */
#define MAX_GROUPS 6
#define GROUP_SIZE 50 /* 64 would have been more efficient */
#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
#define SYMBOL_RUNA 0
#define SYMBOL_RUNB 1
/* Status return values */
#define RETVAL_OK 0
#define RETVAL_LAST_BLOCK (-1)
#define RETVAL_NOT_BZIP_DATA (-2)
#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
#define RETVAL_DATA_ERROR (-5)
#define RETVAL_OUT_OF_MEMORY (-6)
#define RETVAL_OBSOLETE_INPUT (-7)
/* Other housekeeping constants */
#define BZIP2_IOBUF_SIZE 4096
/* This is what we know about each Huffman coding group */
struct group_data {
/* We have an extra slot at the end of limit[] for a sentinel value. */
int limit[MAX_HUFCODE_BITS+1];
int base[MAX_HUFCODE_BITS];
int permute[MAX_SYMBOLS];
int minLen, maxLen;
};
/* Structure holding all the housekeeping data, including IO buffers and
memory that persists between calls to bunzip */
struct bunzip_data {
/* State for interrupting output loop */
int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
/* I/O tracking data (file handles, buffers, positions, etc.) */
long (*fill)(void*, unsigned long);
long inbufCount, inbufPos /*, outbufPos*/;
unsigned char *inbuf /*,*outbuf*/;
unsigned int inbufBitCount, inbufBits;
/* The CRC values stored in the block header and calculated from the
data */
unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
/* Intermediate buffer and its size (in bytes) */
unsigned int *dbuf, dbufSize;
/* These things are a bit too big to go on the stack */
unsigned char selectors[32768]; /* nSelectors = 15 bits */
struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
int io_error; /* non-zero if we have IO error */
int byteCount[256];
unsigned char symToByte[256], mtfSymbol[256];
};
/* Return the next nnn bits of input. All reads from the compressed input
are done through this function. All reads are big endian */
static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
{
unsigned int bits = 0;
/* If we need to get more data from the byte buffer, do so.
(Loop getting one byte at a time to enforce endianness and avoid
unaligned access.) */
while (bd->inbufBitCount < bits_wanted) {
/* If we need to read more data from file into byte buffer, do
so */
if (bd->inbufPos == bd->inbufCount) {
if (bd->io_error)
return 0;
bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
if (bd->inbufCount <= 0) {
bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
return 0;
}
bd->inbufPos = 0;
}
/* Avoid 32-bit overflow (dump bit buffer to top of output) */
if (bd->inbufBitCount >= 24) {
bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
bits_wanted -= bd->inbufBitCount;
bits <<= bits_wanted;
bd->inbufBitCount = 0;
}
/* Grab next 8 bits of input from buffer. */
bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
}
/* Calculate result */
bd->inbufBitCount -= bits_wanted;
bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
return bits;
}
/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
static int INIT get_next_block(struct bunzip_data *bd)
{
struct group_data *hufGroup = NULL;
int *base = NULL;
int *limit = NULL;
int dbufCount, nextSym, dbufSize, groupCount, selector,
i, j, k, t, runPos, symCount, symTotal, nSelectors, *byteCount;
unsigned char uc, *symToByte, *mtfSymbol, *selectors;
unsigned int *dbuf, origPtr;
dbuf = bd->dbuf;
dbufSize = bd->dbufSize;
selectors = bd->selectors;
byteCount = bd->byteCount;
symToByte = bd->symToByte;
mtfSymbol = bd->mtfSymbol;
/* Read in header signature and CRC, then validate signature.
(last block signature means CRC is for whole file, return now) */
i = get_bits(bd, 24);
j = get_bits(bd, 24);
bd->headerCRC = get_bits(bd, 32);
if ((i == 0x177245) && (j == 0x385090))
return RETVAL_LAST_BLOCK;
if ((i != 0x314159) || (j != 0x265359))
return RETVAL_NOT_BZIP_DATA;
/* We can add support for blockRandomised if anybody complains.
There was some code for this in busybox 1.0.0-pre3, but nobody ever
noticed that it didn't actually work. */
if (get_bits(bd, 1))
return RETVAL_OBSOLETE_INPUT;
origPtr = get_bits(bd, 24);
if (origPtr >= dbufSize)
return RETVAL_DATA_ERROR;
/* mapping table: if some byte values are never used (encoding things
like ascii text), the compression code removes the gaps to have fewer
symbols to deal with, and writes a sparse bitfield indicating which
values were present. We make a translation table to convert the
symbols back to the corresponding bytes. */
t = get_bits(bd, 16);
symTotal = 0;
for (i = 0; i < 16; i++) {
if (t&(1 << (15-i))) {
k = get_bits(bd, 16);
for (j = 0; j < 16; j++)
if (k&(1 << (15-j)))
symToByte[symTotal++] = (16*i)+j;
}
}
/* How many different Huffman coding groups does this block use? */
groupCount = get_bits(bd, 3);
if (groupCount < 2 || groupCount > MAX_GROUPS)
return RETVAL_DATA_ERROR;
/* nSelectors: Every GROUP_SIZE many symbols we select a new
Huffman coding group. Read in the group selector list,
which is stored as MTF encoded bit runs. (MTF = Move To
Front, as each value is used it's moved to the start of the
list.) */
nSelectors = get_bits(bd, 15);
if (!nSelectors)
return RETVAL_DATA_ERROR;
for (i = 0; i < groupCount; i++)
mtfSymbol[i] = i;
for (i = 0; i < nSelectors; i++) {
/* Get next value */
for (j = 0; get_bits(bd, 1); j++)
if (j >= groupCount)
return RETVAL_DATA_ERROR;
/* Decode MTF to get the next selector */
uc = mtfSymbol[j];
for (; j; j--)
mtfSymbol[j] = mtfSymbol[j-1];
mtfSymbol[0] = selectors[i] = uc;
}
/* Read the Huffman coding tables for each group, which code
for symTotal literal symbols, plus two run symbols (RUNA,
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
value for the first symbol, and an offset from the
previous value for everys symbol after that.
(Subtracting 1 before the loop and then adding it
back at the end is an optimization that makes the
test inside the loop simpler: symbol length 0
becomes negative, so an unsigned inequality catches
it.) */
t = get_bits(bd, 5)-1;
for (i = 0; i < symCount; i++) {
for (;;) {
if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
return RETVAL_DATA_ERROR;
/* If first bit is 0, stop. Else
second bit indicates whether to
increment or decrement the value.
Optimization: grab 2 bits and unget
the second if the first was 0. */
k = get_bits(bd, 2);
if (k < 2) {
bd->inbufBitCount++;
break;
}
/* Add one if second bit 1, else
* subtract 1. Avoids if/else */
t += (((k+1)&2)-1);
}
/* Correct for the initial -1, to get the
* final symbol length */
length[i] = t+1;
}
/* Find largest and smallest lengths in this group */
minLen = maxLen = length[0];
for (i = 1; i < symCount; i++) {
if (length[i] > maxLen)
maxLen = length[i];
else if (length[i] < minLen)
minLen = length[i];
}
/* Calculate permute[], base[], and limit[] tables from
* length[].
*
* permute[] is the lookup table for converting
* Huffman coded symbols into decoded symbols. base[]
* is the amount to subtract from the value of a
* Huffman symbol of a given length when using
* permute[].
*
* limit[] indicates the largest numerical value a
* symbol with a given number of bits can have. This
* is how the Huffman codes can vary in length: each
* code with a value > limit[length] needs another
* bit.
*/
hufGroup = bd->groups+j;
hufGroup->minLen = minLen;
hufGroup->maxLen = maxLen;
/* Note that minLen can't be smaller than 1, so we
adjust the base and limit array pointers so we're
not always wasting the first entry. We do this
again when using them (during symbol decoding).*/
base = hufGroup->base-1;
limit = hufGroup->limit-1;
/* Calculate permute[]. Concurrently, initialize
* temp[] and limit[]. */
pp = 0;
for (i = minLen; i <= maxLen; i++) {
temp[i] = limit[i] = 0;
for (t = 0; t < symCount; t++)
if (length[t] == i)
hufGroup->permute[pp++] = t;
}
/* Count symbols coded for at each bit length */
for (i = 0; i < symCount; i++)
temp[length[i]]++;
/* Calculate limit[] (the largest symbol-coding value
*at each bit length, which is (previous limit <<
*1)+symbols at this level), and base[] (number of
*symbols to ignore at each bit length, which is limit
*minus the cumulative count of symbols coded for
*already). */
pp = t = 0;
for (i = minLen; i < maxLen; i++) {
pp += temp[i];
/* We read the largest possible symbol size
and then unget bits after determining how
many we need, and those extra bits could be
set to anything. (They're noise from
future symbols.) At each level we're
really only interested in the first few
bits, so here we set all the trailing
to-be-ignored bits to 1 so they don't
affect the value > limit[length]
comparison. */
limit[i] = (pp << (maxLen - i)) - 1;
pp <<= 1;
base[i+1] = pp-(t += temp[i]);
}
limit[maxLen+1] = INT_MAX; /* Sentinel value for
* reading next sym. */
limit[maxLen] = pp+temp[maxLen]-1;
base[minLen] = 0;
}
/* We've finished reading and digesting the block header. Now
read this block's Huffman coded symbols from the file and
undo the Huffman coding and run length encoding, saving the
result into dbuf[dbufCount++] = uc */
/* Initialize symbol occurrence counters and symbol Move To
* Front table */
for (i = 0; i < 256; i++) {
byteCount[i] = 0;
mtfSymbol[i] = (unsigned char)i;
}
/* Loop through compressed symbols. */
runPos = dbufCount = symCount = selector = 0;
for (;;) {
/* Determine which Huffman coding group to use. */
if (!(symCount--)) {
symCount = GROUP_SIZE-1;
if (selector >= nSelectors)
return RETVAL_DATA_ERROR;
hufGroup = bd->groups+selectors[selector++];
base = hufGroup->base-1;
limit = hufGroup->limit-1;
}
/* Read next Huffman-coded symbol. */
/* Note: It is far cheaper to read maxLen bits and
back up than it is to read minLen bits and then an
additional bit at a time, testing as we go.
Because there is a trailing last block (with file
CRC), there is no danger of the overread causing an
unexpected EOF for a valid compressed file. As a
further optimization, we do the read inline
(falling back to a call to get_bits if the buffer
runs dry). The following (up to got_huff_bits:) is
equivalent to j = get_bits(bd, hufGroup->maxLen);
*/
while (bd->inbufBitCount < hufGroup->maxLen) {
if (bd->inbufPos == bd->inbufCount) {
j = get_bits(bd, hufGroup->maxLen);
goto got_huff_bits;
}
bd->inbufBits =
(bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
bd->inbufBitCount += 8;
}
bd->inbufBitCount -= hufGroup->maxLen;
j = (bd->inbufBits >> bd->inbufBitCount)&
((1 << hufGroup->maxLen)-1);
got_huff_bits:
/* Figure how many bits are in next symbol and
* unget extras */
i = hufGroup->minLen;
while (j > limit[i])
++i;
bd->inbufBitCount += (hufGroup->maxLen - i);
/* Huffman decode value to get nextSym (with bounds checking) */
if ((i > hufGroup->maxLen)
|| (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
>= MAX_SYMBOLS))
return RETVAL_DATA_ERROR;
nextSym = hufGroup->permute[j];
/* We have now decoded the symbol, which indicates
either a new literal byte, or a repeated run of the
most recent literal byte. First, check if nextSym
indicates a repeated run, and if so loop collecting
how many times to repeat the last literal. */
if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
/* If this is the start of a new run, zero out
* counter */
if (!runPos) {
runPos = 1;
t = 0;
}
/* Neat trick that saves 1 symbol: instead of
or-ing 0 or 1 at each bit position, add 1
or 2 instead. For example, 1011 is 1 << 0
+ 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1
+ 1 << 2. You can make any bit pattern
that way using 1 less symbol than the basic
or 0/1 method (except all bits 0, which
would use no symbols, but a run of length 0
doesn't mean anything in this context).
Thus space is saved. */
t += (runPos << nextSym);
/* +runPos if RUNA; +2*runPos if RUNB */
runPos <<= 1;
continue;
}
/* When we hit the first non-run symbol after a run,
we now know how many times to repeat the last
literal, so append that many copies to our buffer
of decoded symbols (dbuf) now. (The last literal
used is the one at the head of the mtfSymbol
array.) */
if (runPos) {
runPos = 0;
if (dbufCount+t >= dbufSize)
return RETVAL_DATA_ERROR;
uc = symToByte[mtfSymbol[0]];
byteCount[uc] += t;
while (t--)
dbuf[dbufCount++] = uc;
}
/* Is this the terminating symbol? */
if (nextSym > symTotal)
break;
/* At this point, nextSym indicates a new literal
character. Subtract one to get the position in the
MTF array at which this literal is currently to be
found. (Note that the result can't be -1 or 0,
because 0 and 1 are RUNA and RUNB. But another
instance of the first symbol in the mtf array,
position 0, would have been handled as part of a
run above. Therefore 1 unused mtf position minus 2
non-literal nextSym values equals -1.) */
if (dbufCount >= dbufSize)
return RETVAL_DATA_ERROR;
i = nextSym - 1;
uc = mtfSymbol[i];
/* Adjust the MTF array. Since we typically expect to
*move only a small number of symbols, and are bound
*by 256 in any case, using memmove here would
*typically be bigger and slower due to function call
*overhead and other assorted setup costs. */
do {
mtfSymbol[i] = mtfSymbol[i-1];
} while (--i);
mtfSymbol[0] = uc;
uc = symToByte[uc];
/* We have our literal byte. Save it into dbuf. */
byteCount[uc]++;
dbuf[dbufCount++] = (unsigned int)uc;
}
/* At this point, we've read all the Huffman-coded symbols
(and repeated runs) for this block from the input stream,
and decoded them into the intermediate buffer. There are
dbufCount many decoded bytes in dbuf[]. Now undo the
Burrows-Wheeler transform on dbuf. See
http://dogma.net/markn/articles/bwt/bwt.htm
*/
/* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
j = 0;
for (i = 0; i < 256; i++) {
k = j+byteCount[i];
byteCount[i] = j;
j = k;
}
/* Figure out what order dbuf would be in if we sorted it. */
for (i = 0; i < dbufCount; i++) {
uc = (unsigned char)(dbuf[i] & 0xff);
dbuf[byteCount[uc]] |= (i << 8);
byteCount[uc]++;
}
/* Decode first byte by hand to initialize "previous" byte.
Note that it doesn't get output, and if the first three
characters are identical it doesn't qualify as a run (hence
writeRunCountdown = 5). */
if (dbufCount) {
if (origPtr >= dbufCount)
return RETVAL_DATA_ERROR;
bd->writePos = dbuf[origPtr];
bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
bd->writePos >>= 8;
bd->writeRunCountdown = 5;
}
bd->writeCount = dbufCount;
return RETVAL_OK;
}
/* Undo burrows-wheeler transform on intermediate buffer to produce output.
If start_bunzip was initialized with out_fd =-1, then up to len bytes of
data are written to outbuf. Return value is number of bytes written or
error (all errors are negative numbers). If out_fd!=-1, outbuf and len
are ignored, data is written to out_fd and return is RETVAL_OK or error.
*/
static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
{
const unsigned int *dbuf;
int pos, xcurrent, previous, gotcount;
/* If last read was short due to end of file, return last block now */
if (bd->writeCount < 0)
return bd->writeCount;
gotcount = 0;
dbuf = bd->dbuf;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
/* We will always have pending decoded data to write into the output
buffer unless this is the very first call (in which case we haven't
Huffman-decoded a block into the intermediate buffer yet). */
if (bd->writeCopies) {
/* Inside the loop, writeCopies means extra copies (beyond 1) */
--bd->writeCopies;
/* Loop outputting bytes */
for (;;) {
/* If the output buffer is full, snapshot
* state and return */
if (gotcount >= len) {
bd->writePos = pos;
bd->writeCurrent = xcurrent;
bd->writeCopies++;
return len;
}
/* Write next byte into output buffer, updating CRC */
outbuf[gotcount++] = xcurrent;
bd->writeCRC = (((bd->writeCRC) << 8)
^bd->crc32Table[((bd->writeCRC) >> 24)
^xcurrent]);
/* Loop now if we're outputting multiple
* copies of this byte */
if (bd->writeCopies) {
--bd->writeCopies;
continue;
}
decode_next_byte:
if (!bd->writeCount--)
break;
/* Follow sequence vector to undo
* Burrows-Wheeler transform */
previous = xcurrent;
pos = dbuf[pos];
xcurrent = pos&0xff;
pos >>= 8;
/* After 3 consecutive copies of the same
byte, the 4th is a repeat count. We count
down from 4 instead *of counting up because
testing for non-zero is faster */
if (--bd->writeRunCountdown) {
if (xcurrent != previous)
bd->writeRunCountdown = 4;
} else {
/* We have a repeated run, this byte
* indicates the count */
bd->writeCopies = xcurrent;
xcurrent = previous;
bd->writeRunCountdown = 5;
/* Sometimes there are just 3 bytes
* (run length 0) */
if (!bd->writeCopies)
goto decode_next_byte;
/* Subtract the 1 copy we'd output
* anyway to get extras */
--bd->writeCopies;
}
}
/* Decompression of this block completed successfully */
bd->writeCRC = ~bd->writeCRC;
bd->totalCRC = ((bd->totalCRC << 1) |
(bd->totalCRC >> 31)) ^ bd->writeCRC;
/* If this block had a CRC error, force file level CRC error. */
if (bd->writeCRC != bd->headerCRC) {
bd->totalCRC = bd->headerCRC+1;
return RETVAL_LAST_BLOCK;
}
}
/* Refill the intermediate buffer by Huffman-decoding next
* block of input */
/* (previous is just a convenient unused temp variable here) */
previous = get_next_block(bd);
if (previous) {
bd->writeCount = previous;
return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
}
bd->writeCRC = 0xffffffffUL;
pos = bd->writePos;
xcurrent = bd->writeCurrent;
goto decode_next_byte;
}
static long INIT nofill(void *buf, unsigned long len)
{
return -1;
}
/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain
a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
ignored, and data is read from file handle into temporary buffer. */
static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
long (*fill)(void*, unsigned long))
{
struct bunzip_data *bd;
unsigned int i, j, c;
const unsigned int BZh0 =
(((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
+(((unsigned int)'h') << 8)+(unsigned int)'0';
/* Figure out how much data to allocate */
i = sizeof(struct bunzip_data);
/* Allocate bunzip_data. Most fields initialize to zero. */
bd = *bdp = malloc(i);
if (!bd)
return RETVAL_OUT_OF_MEMORY;
memset(bd, 0, sizeof(struct bunzip_data));
/* Setup input buffer */
bd->inbuf = inbuf;
bd->inbufCount = len;
if (fill != NULL)
bd->fill = fill;
else
bd->fill = nofill;
/* Init the CRC32 table (big endian) */
for (i = 0; i < 256; i++) {
c = i << 24;
for (j = 8; j; j--)
c = c&0x80000000 ? (c << 1)^(CRC32_POLY_BE) : (c << 1);
bd->crc32Table[i] = c;
}
/* Ensure that file starts with "BZh['1'-'9']." */
i = get_bits(bd, 32);
if (((unsigned int)(i-BZh0-1)) >= 9)
return RETVAL_NOT_BZIP_DATA;
/* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
uncompressed data. Allocate intermediate buffer for block. */
bd->dbufSize = 100000*(i-BZh0);
bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
if (!bd->dbuf)
return RETVAL_OUT_OF_MEMORY;
return RETVAL_OK;
}
/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data,
not end of file.) */
STATIC int INIT bunzip2(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf,
long *pos,
void(*error)(char *x))
{
struct bunzip_data *bd;
int i = -1;
unsigned char *inbuf;
if (flush)
outbuf = malloc(BZIP2_IOBUF_SIZE);
if (!outbuf) {
error("Could not allocate output buffer");
return RETVAL_OUT_OF_MEMORY;
}
if (buf)
inbuf = buf;
else
inbuf = malloc(BZIP2_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input buffer");
i = RETVAL_OUT_OF_MEMORY;
goto exit_0;
}
i = start_bunzip(&bd, inbuf, len, fill);
if (!i) {
for (;;) {
i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
if (i <= 0)
break;
if (!flush)
outbuf += i;
else
if (i != flush(outbuf, i)) {
i = RETVAL_UNEXPECTED_OUTPUT_EOF;
break;
}
}
}
/* Check CRC and release memory */
if (i == RETVAL_LAST_BLOCK) {
if (bd->headerCRC != bd->totalCRC)
error("Data integrity error when decompressing.");
else
i = RETVAL_OK;
} else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
error("Compressed file ends unexpectedly");
}
if (!bd)
goto exit_1;
if (bd->dbuf)
large_free(bd->dbuf);
if (pos)
*pos = bd->inbufPos;
free(bd);
exit_1:
if (!buf)
free(inbuf);
exit_0:
if (flush)
free(outbuf);
return i;
}
#ifdef PREBOOT
STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *outbuf, long olen,
long *pos,
void (*error)(char *x))
{
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
}
#endif
| linux-master | lib/decompress_bunzip2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/bitmap.c
* Helper functions for bitmap.h.
*/
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include "kstrtox.h"
/**
* DOC: bitmap introduction
*
* bitmaps provide an array of bits, implemented using an
* array of unsigned longs. The number of valid bits in a
* given bitmap does _not_ need to be an exact multiple of
* BITS_PER_LONG.
*
* The possible unused bits in the last, partially used word
* of a bitmap are 'don't care'. The implementation makes
* no particular effort to keep them zero. It ensures that
* their value will not affect the results of any operation.
* The bitmap operations that return Boolean (bitmap_empty,
* for example) or scalar (bitmap_weight, for example) results
* carefully filter out these unused bits from impacting their
* results.
*
* The byte ordering of bitmaps is more natural on little
* endian architectures. See the big-endian headers
* include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
* for the best explanations of this ordering.
*/
bool __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return false;
return true;
}
EXPORT_SYMBOL(__bitmap_equal);
bool __bitmap_or_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2,
const unsigned long *bitmap3,
unsigned int bits)
{
unsigned int k, lim = bits / BITS_PER_LONG;
unsigned long tmp;
for (k = 0; k < lim; ++k) {
if ((bitmap1[k] | bitmap2[k]) != bitmap3[k])
return false;
}
if (!(bits % BITS_PER_LONG))
return true;
tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k];
return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0;
}
void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits)
{
unsigned int k, lim = BITS_TO_LONGS(bits);
for (k = 0; k < lim; ++k)
dst[k] = ~src[k];
}
EXPORT_SYMBOL(__bitmap_complement);
/**
* __bitmap_shift_right - logical right shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @nbits : bitmap size, in bits
*
* Shifting right (dividing) means moving bits in the MS -> LS bit
* direction. Zeros are fed into the vacated MS positions and the
* LS bits shifted off the bottom are lost.
*/
void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned shift, unsigned nbits)
{
unsigned k, lim = BITS_TO_LONGS(nbits);
unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
for (k = 0; off + k < lim; ++k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take lower rem bits of
* word above and make them the top rem bits of result.
*/
if (!rem || off + k + 1 >= lim)
upper = 0;
else {
upper = src[off + k + 1];
if (off + k + 1 == lim - 1)
upper &= mask;
upper <<= (BITS_PER_LONG - rem);
}
lower = src[off + k];
if (off + k == lim - 1)
lower &= mask;
lower >>= rem;
dst[k] = lower | upper;
}
if (off)
memset(&dst[lim - off], 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_right);
/**
* __bitmap_shift_left - logical left shift of the bits in a bitmap
* @dst : destination bitmap
* @src : source bitmap
* @shift : shift by this many bits
* @nbits : bitmap size, in bits
*
* Shifting left (multiplying) means moving bits in the LS -> MS
* direction. Zeros are fed into the vacated LS bit positions
* and those MS bits shifted off the top are lost.
*/
void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
int k;
unsigned int lim = BITS_TO_LONGS(nbits);
unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG;
for (k = lim - off - 1; k >= 0; --k) {
unsigned long upper, lower;
/*
* If shift is not word aligned, take upper rem bits of
* word below and make them the bottom rem bits of result.
*/
if (rem && k > 0)
lower = src[k - 1] >> (BITS_PER_LONG - rem);
else
lower = 0;
upper = src[k] << rem;
dst[k + off] = lower | upper;
}
if (off)
memset(dst, 0, off*sizeof(unsigned long));
}
EXPORT_SYMBOL(__bitmap_shift_left);
/**
* bitmap_cut() - remove bit region from bitmap and right shift remaining bits
* @dst: destination bitmap, might overlap with src
* @src: source bitmap
* @first: start bit of region to be removed
* @cut: number of bits to remove
* @nbits: bitmap size, in bits
*
* Set the n-th bit of @dst iff the n-th bit of @src is set and
* n is less than @first, or the m-th bit of @src is set for any
* m such that @first <= n < nbits, and m = n + @cut.
*
* In pictures, example for a big-endian 32-bit architecture:
*
* The @src bitmap is::
*
* 31 63
* | |
* 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101
* | | | |
* 16 14 0 32
*
* if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is::
*
* 31 63
* | |
* 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010
* | | |
* 14 (bit 17 0 32
* from @src)
*
* Note that @dst and @src might overlap partially or entirely.
*
* This is implemented in the obvious way, with a shift and carry
* step for each moved bit. Optimisation is left as an exercise
* for the compiler.
*/
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits)
{
unsigned int len = BITS_TO_LONGS(nbits);
unsigned long keep = 0, carry;
int i;
if (first % BITS_PER_LONG) {
keep = src[first / BITS_PER_LONG] &
(~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG));
}
memmove(dst, src, len * sizeof(*dst));
while (cut--) {
for (i = first / BITS_PER_LONG; i < len; i++) {
if (i < len - 1)
carry = dst[i + 1] & 1UL;
else
carry = 0;
dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1));
}
}
dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG);
dst[first / BITS_PER_LONG] |= keep;
}
EXPORT_SYMBOL(bitmap_cut);
bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_and);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] | bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_or);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(bits);
for (k = 0; k < nr; k++)
dst[k] = bitmap1[k] ^ bitmap2[k];
}
EXPORT_SYMBOL(__bitmap_xor);
bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
unsigned int lim = bits/BITS_PER_LONG;
unsigned long result = 0;
for (k = 0; k < lim; k++)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k]);
if (bits % BITS_PER_LONG)
result |= (dst[k] = bitmap1[k] & ~bitmap2[k] &
BITMAP_LAST_WORD_MASK(bits));
return result != 0;
}
EXPORT_SYMBOL(__bitmap_andnot);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits)
{
unsigned int k;
unsigned int nr = BITS_TO_LONGS(nbits);
for (k = 0; k < nr; k++)
dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
}
EXPORT_SYMBOL(__bitmap_replace);
bool __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return true;
return false;
}
EXPORT_SYMBOL(__bitmap_intersects);
bool __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & ~bitmap2[k])
return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
return false;
return true;
}
EXPORT_SYMBOL(__bitmap_subset);
#define BITMAP_WEIGHT(FETCH, bits) \
({ \
unsigned int __bits = (bits), idx, w = 0; \
\
for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \
w += hweight_long(FETCH); \
\
if (__bits % BITS_PER_LONG) \
w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \
\
w; \
})
unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
{
return BITMAP_WEIGHT(bitmap[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight);
unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits);
}
EXPORT_SYMBOL(__bitmap_weight_and);
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (len - bits_to_set >= 0) {
*p |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (len) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(__bitmap_set);
void __bitmap_clear(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
const unsigned int size = start + len;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (len - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (len) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(__bitmap_clear);
/**
* bitmap_find_next_zero_area_off - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
* @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds plus @align_offset
* is multiple of that power of 2.
*/
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
/*
* Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
* second version by Paul Jackson, third by Joe Korty.
*/
/**
* bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*/
int bitmap_parse_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
char *buf;
int ret;
buf = memdup_user_nul(ubuf, ulen);
if (IS_ERR(buf))
return PTR_ERR(buf);
ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
kfree(buf);
return ret;
}
EXPORT_SYMBOL(bitmap_parse_user);
/**
* bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
* @list: indicates whether the bitmap must be list
* @buf: page aligned buffer into which string is placed
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
*
* Output format is a comma-separated list of decimal numbers and
* ranges if list is specified or hex digits grouped into comma-separated
* sets of 8 digits/set. Returns the number of characters written to buf.
*
* It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
* area and that sufficient storage remains at @buf to accommodate the
* bitmap_print_to_pagebuf() output. Returns the number of characters
* actually printed to @buf, excluding terminating '\0'.
*/
int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits)
{
ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
/**
* bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
* @list: indicates whether the bitmap must be list
* true: print in decimal list format
* false: print in hexadecimal bitmask format
* @buf: buffer into which string is placed
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
* @off: in the string from which we are copying, We copy to @buf
* @count: the maximum number of bytes to print
*/
static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count)
{
const char *fmt = list ? "%*pbl\n" : "%*pb\n";
ssize_t size;
void *data;
data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
if (!data)
return -ENOMEM;
size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
kfree(data);
return size;
}
/**
* bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
* @buf: buffer into which string is placed
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
* @off: in the string from which we are copying, We copy to @buf
* @count: the maximum number of bytes to print
*
* The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
* cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
* bitmask and decimal list to userspace by sysfs ABI.
* Drivers might be using a normal attribute for this kind of ABIs. A
* normal attribute typically has show entry as below::
*
* static ssize_t example_attribute_show(struct device *dev,
* struct device_attribute *attr, char *buf)
* {
* ...
* return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
* }
*
* show entry of attribute has no offset and count parameters and this
* means the file is limited to one page only.
* bitmap_print_to_pagebuf() API works terribly well for this kind of
* normal attribute with buf parameter and without offset, count::
*
* bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
* int nmaskbits)
* {
* }
*
* The problem is once we have a large bitmap, we have a chance to get a
* bitmask or list more than one page. Especially for list, it could be
* as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
* It turns out bin_attribute is a way to break this limit. bin_attribute
* has show entry as below::
*
* static ssize_t
* example_bin_attribute_show(struct file *filp, struct kobject *kobj,
* struct bin_attribute *attr, char *buf,
* loff_t offset, size_t count)
* {
* ...
* }
*
* With the new offset and count parameters, this makes sysfs ABI be able
* to support file size more than one page. For example, offset could be
* >= 4096.
* bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
* cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
* make those drivers be able to support large bitmask and list after they
* move to use bin_attribute. In result, we have to pass the corresponding
* parameters such as off, count from bin_attribute show entry to this API.
*
* The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
* is similar with cpumap_print_to_pagebuf(), the difference is that
* bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
* the destination buffer is exactly one page and won't be more than one page.
* cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
* hand, mainly serves bin_attribute which doesn't work with exact one page,
* and it can break the size limit of converted decimal list and hexadecimal
* bitmask.
*
* WARNING!
*
* This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
* It is intended to workaround sysfs limitations discussed above and should be
* used carefully in general case for the following reasons:
*
* - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
* - Memory complexity is O(nbits), comparing to O(1) for snprintf().
* - @off and @count are NOT offset and number of bits to print.
* - If printing part of bitmap as list, the resulting string is not a correct
* list representation of bitmap. Particularly, some bits within or out of
* related interval may be erroneously set or unset. The format of the string
* may be broken, so bitmap_parselist-like parser may fail parsing it.
* - If printing the whole bitmap as list by parts, user must ensure the order
* of calls of the function such that the offset is incremented linearly.
* - If printing the whole bitmap as list by parts, user must keep bitmap
* unchanged between the very first and very last call. Otherwise concatenated
* result may be incorrect, and format may be broken.
*
* Returns the number of characters actually printed to @buf
*/
int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count)
{
return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
}
EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
/**
* bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
* @buf: buffer into which string is placed
* @maskp: pointer to bitmap to convert
* @nmaskbits: size of bitmap, in bits
* @off: in the string from which we are copying, We copy to @buf
* @count: the maximum number of bytes to print
*
* Everything is same with the above bitmap_print_bitmask_to_buf() except
* the print format.
*/
int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count)
{
return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
}
EXPORT_SYMBOL(bitmap_print_list_to_buf);
/*
* Region 9-38:4/10 describes the following bitmap structure:
* 0 9 12 18 38 N
* .........****......****......****..................
* ^ ^ ^ ^ ^
* start off group_len end nbits
*/
struct region {
unsigned int start;
unsigned int off;
unsigned int group_len;
unsigned int end;
unsigned int nbits;
};
static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
{
unsigned int start;
for (start = r->start; start <= r->end; start += r->group_len)
bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
}
static int bitmap_check_region(const struct region *r)
{
if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
return -EINVAL;
if (r->end >= r->nbits)
return -ERANGE;
return 0;
}
static const char *bitmap_getnum(const char *str, unsigned int *num,
unsigned int lastbit)
{
unsigned long long n;
unsigned int len;
if (str[0] == 'N') {
*num = lastbit;
return str + 1;
}
len = _parse_integer(str, 10, &n);
if (!len)
return ERR_PTR(-EINVAL);
if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
return ERR_PTR(-EOVERFLOW);
*num = n;
return str + len;
}
static inline bool end_of_str(char c)
{
return c == '\0' || c == '\n';
}
static inline bool __end_of_region(char c)
{
return isspace(c) || c == ',';
}
static inline bool end_of_region(char c)
{
return __end_of_region(c) || end_of_str(c);
}
/*
* The format allows commas and whitespaces at the beginning
* of the region.
*/
static const char *bitmap_find_region(const char *str)
{
while (__end_of_region(*str))
str++;
return end_of_str(*str) ? NULL : str;
}
static const char *bitmap_find_region_reverse(const char *start, const char *end)
{
while (start <= end && __end_of_region(*end))
end--;
return end;
}
static const char *bitmap_parse_region(const char *str, struct region *r)
{
unsigned int lastbit = r->nbits - 1;
if (!strncasecmp(str, "all", 3)) {
r->start = 0;
r->end = lastbit;
str += 3;
goto check_pattern;
}
str = bitmap_getnum(str, &r->start, lastbit);
if (IS_ERR(str))
return str;
if (end_of_region(*str))
goto no_end;
if (*str != '-')
return ERR_PTR(-EINVAL);
str = bitmap_getnum(str + 1, &r->end, lastbit);
if (IS_ERR(str))
return str;
check_pattern:
if (end_of_region(*str))
goto no_pattern;
if (*str != ':')
return ERR_PTR(-EINVAL);
str = bitmap_getnum(str + 1, &r->off, lastbit);
if (IS_ERR(str))
return str;
if (*str != '/')
return ERR_PTR(-EINVAL);
return bitmap_getnum(str + 1, &r->group_len, lastbit);
no_end:
r->end = r->start;
no_pattern:
r->off = r->end + 1;
r->group_len = r->end + 1;
return end_of_str(*str) ? NULL : str;
}
/**
* bitmap_parselist - convert list format ASCII string to bitmap
* @buf: read user string from this buffer; must be terminated
* with a \0 or \n.
* @maskp: write resulting mask here
* @nmaskbits: number of bits in mask to be written
*
* Input format is a comma-separated list of decimal numbers and
* ranges. Consecutively set bits are shown as two hyphen-separated
* decimal numbers, the smallest and largest bit numbers set in
* the range.
* Optionally each range can be postfixed to denote that only parts of it
* should be set. The range will divided to groups of specific size.
* From each group will be used only defined amount of bits.
* Syntax: range:used_size/group_size
* Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
* The value 'N' can be used as a dynamically substituted token for the
* maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
* dynamic, so if system changes cause the bitmap width to change, such
* as more cores in a CPU list, then any ranges using N will also change.
*
* Returns: 0 on success, -errno on invalid input strings. Error values:
*
* - ``-EINVAL``: wrong region format
* - ``-EINVAL``: invalid character in string
* - ``-ERANGE``: bit number specified too large for mask
* - ``-EOVERFLOW``: integer overflow in the input parameters
*/
int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
{
struct region r;
long ret;
r.nbits = nmaskbits;
bitmap_zero(maskp, r.nbits);
while (buf) {
buf = bitmap_find_region(buf);
if (buf == NULL)
return 0;
buf = bitmap_parse_region(buf, &r);
if (IS_ERR(buf))
return PTR_ERR(buf);
ret = bitmap_check_region(&r);
if (ret)
return ret;
bitmap_set_region(&r, maskp);
}
return 0;
}
EXPORT_SYMBOL(bitmap_parselist);
/**
* bitmap_parselist_user() - convert user buffer's list format ASCII
* string to bitmap
*
* @ubuf: pointer to user buffer containing string.
* @ulen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Wrapper for bitmap_parselist(), providing it with user buffer.
*/
int bitmap_parselist_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
char *buf;
int ret;
buf = memdup_user_nul(ubuf, ulen);
if (IS_ERR(buf))
return PTR_ERR(buf);
ret = bitmap_parselist(buf, maskp, nmaskbits);
kfree(buf);
return ret;
}
EXPORT_SYMBOL(bitmap_parselist_user);
static const char *bitmap_get_x32_reverse(const char *start,
const char *end, u32 *num)
{
u32 ret = 0;
int c, i;
for (i = 0; i < 32; i += 4) {
c = hex_to_bin(*end--);
if (c < 0)
return ERR_PTR(-EINVAL);
ret |= c << i;
if (start > end || __end_of_region(*end))
goto out;
}
if (hex_to_bin(*end--) >= 0)
return ERR_PTR(-EOVERFLOW);
out:
*num = ret;
return end;
}
/**
* bitmap_parse - convert an ASCII hex string into a bitmap.
* @start: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0 or \n. In that case,
* UINT_MAX may be provided instead of string length.
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
* Commas group hex digits into chunks. Each chunk defines exactly 32
* bits of the resultant bitmask. No chunk may specify a value larger
* than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
* then leading 0-bits are prepended. %-EINVAL is returned for illegal
* characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
* Leading, embedded and trailing whitespace accepted.
*/
int bitmap_parse(const char *start, unsigned int buflen,
unsigned long *maskp, int nmaskbits)
{
const char *end = strnchrnul(start, buflen, '\n') - 1;
int chunks = BITS_TO_U32(nmaskbits);
u32 *bitmap = (u32 *)maskp;
int unset_bit;
int chunk;
for (chunk = 0; ; chunk++) {
end = bitmap_find_region_reverse(start, end);
if (start > end)
break;
if (!chunks--)
return -EOVERFLOW;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
#else
end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
#endif
if (IS_ERR(end))
return PTR_ERR(end);
}
unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
if (unset_bit < nmaskbits) {
bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
return 0;
}
if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
return -EOVERFLOW;
return 0;
}
EXPORT_SYMBOL(bitmap_parse);
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
* @pos: a bit position in @buf (0 <= @pos < @nbits)
* @nbits: number of valid bit positions in @buf
*
* Map the bit at position @pos in @buf (of length @nbits) to the
* ordinal of which set bit it is. If it is not set or if @pos
* is not a valid bit position, map to -1.
*
* If for example, just bits 4 through 7 are set in @buf, then @pos
* values 4 through 7 will get mapped to 0 through 3, respectively,
* and other @pos values will get mapped to -1. When @pos value 7
* gets mapped to (returns) @ord value 3 in this example, that means
* that bit 7 is the 3rd (starting with 0th) set bit in @buf.
*
* The bit positions 0 through @bits are valid positions in @buf.
*/
static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits)
{
if (pos >= nbits || !test_bit(pos, buf))
return -1;
return bitmap_weight(buf, pos);
}
/**
* bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap
* @dst: remapped result
* @src: subset to be remapped
* @old: defines domain of map
* @new: defines range of map
* @nbits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* If either of the @old and @new bitmaps are empty, or if @src and
* @dst point to the same location, then this routine copies @src
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @src comes into this routine
* with bits 1, 5 and 7 set, then @dst should leave with bits 1,
* 13 and 15 set.
*/
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new,
unsigned int nbits)
{
unsigned int oldbit, w;
if (dst == src) /* following doesn't handle inplace remaps */
return;
bitmap_zero(dst, nbits);
w = bitmap_weight(new, nbits);
for_each_set_bit(oldbit, src, nbits) {
int n = bitmap_pos_to_ord(old, oldbit, nbits);
if (n < 0 || w == 0)
set_bit(oldbit, dst); /* identity map */
else
set_bit(find_nth_bit(new, nbits, n % w), dst);
}
}
EXPORT_SYMBOL(bitmap_remap);
/**
* bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
* @oldbit: bit position to be mapped
* @old: defines domain of map
* @new: defines range of map
* @bits: number of bits in each of these bitmaps
*
* Let @old and @new define a mapping of bit positions, such that
* whatever position is held by the n-th set bit in @old is mapped
* to the n-th set bit in @new. In the more general case, allowing
* for the possibility that the weight 'w' of @new is less than the
* weight of @old, map the position of the n-th set bit in @old to
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
* (the identify map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
*
* For example, lets say that @old has bits 4 through 7 set, and
* @new has bits 12 through 15 set. This defines the mapping of bit
* position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other
* bit positions unchanged. So if say @oldbit is 5, then this routine
* returns 13.
*/
int bitmap_bitremap(int oldbit, const unsigned long *old,
const unsigned long *new, int bits)
{
int w = bitmap_weight(new, bits);
int n = bitmap_pos_to_ord(old, oldbit, bits);
if (n < 0 || w == 0)
return oldbit;
else
return find_nth_bit(new, bits, n % w);
}
EXPORT_SYMBOL(bitmap_bitremap);
#ifdef CONFIG_NUMA
/**
* bitmap_onto - translate one bitmap relative to another
* @dst: resulting translated bitmap
* @orig: original untranslated bitmap
* @relmap: bitmap relative to which translated
* @bits: number of bits in each of these bitmaps
*
* Set the n-th bit of @dst iff there exists some m such that the
* n-th bit of @relmap is set, the m-th bit of @orig is set, and
* the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
* (If you understood the previous sentence the first time your
* read it, you're overqualified for your current job.)
*
* In other words, @orig is mapped onto (surjectively) @dst,
* using the map { <n, m> | the n-th bit of @relmap is the
* m-th set bit of @relmap }.
*
* Any set bits in @orig above bit number W, where W is the
* weight of (number of set bits in) @relmap are mapped nowhere.
* In particular, if for all bits m set in @orig, m >= W, then
* @dst will end up empty. In situations where the possibility
* of such an empty result is not desired, one way to avoid it is
* to use the bitmap_fold() operator, below, to first fold the
* @orig bitmap over itself so that all its set bits x are in the
* range 0 <= x < W. The bitmap_fold() operator does this by
* setting the bit (m % W) in @dst, for each bit (m) set in @orig.
*
* Example [1] for bitmap_onto():
* Let's say @relmap has bits 30-39 set, and @orig has bits
* 1, 3, 5, 7, 9 and 11 set. Then on return from this routine,
* @dst will have bits 31, 33, 35, 37 and 39 set.
*
* When bit 0 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the first bit (if any)
* that is turned on in @relmap. Since bit 0 was off in the
* above example, we leave off that bit (bit 30) in @dst.
*
* When bit 1 is set in @orig (as in the above example), it
* means turn on the bit in @dst corresponding to whatever
* is the second bit that is turned on in @relmap. The second
* bit in @relmap that was turned on in the above example was
* bit 31, so we turned on bit 31 in @dst.
*
* Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
* because they were the 4th, 6th, 8th and 10th set bits
* set in @relmap, and the 4th, 6th, 8th and 10th bits of
* @orig (i.e. bits 3, 5, 7 and 9) were also set.
*
* When bit 11 is set in @orig, it means turn on the bit in
* @dst corresponding to whatever is the twelfth bit that is
* turned on in @relmap. In the above example, there were
* only ten bits turned on in @relmap (30..39), so that bit
* 11 was set in @orig had no affect on @dst.
*
* Example [2] for bitmap_fold() + bitmap_onto():
* Let's say @relmap has these ten bits set::
*
* 40 41 42 43 45 48 53 61 74 95
*
* (for the curious, that's 40 plus the first ten terms of the
* Fibonacci sequence.)
*
* Further lets say we use the following code, invoking
* bitmap_fold() then bitmap_onto, as suggested above to
* avoid the possibility of an empty @dst result::
*
* unsigned long *tmp; // a temporary bitmap's bits
*
* bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
* bitmap_onto(dst, tmp, relmap, bits);
*
* Then this table shows what various values of @dst would be, for
* various @orig's. I list the zero-based positions of each set bit.
* The tmp column shows the intermediate result, as computed by
* using bitmap_fold() to fold the @orig bitmap modulo ten
* (the weight of @relmap):
*
* =============== ============== =================
* @orig tmp @dst
* 0 0 40
* 1 1 41
* 9 9 95
* 10 0 40 [#f1]_
* 1 3 5 7 1 3 5 7 41 43 48 61
* 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
* 0 9 18 27 0 9 8 7 40 61 74 95
* 0 10 20 30 0 40
* 0 11 22 33 0 1 2 3 40 41 42 43
* 0 12 24 36 0 2 4 6 40 42 45 53
* 78 102 211 1 2 8 41 42 74 [#f1]_
* =============== ============== =================
*
* .. [#f1]
*
* For these marked lines, if we hadn't first done bitmap_fold()
* into tmp, then the @dst result would have been empty.
*
* If either of @orig or @relmap is empty (no set bits), then @dst
* will be returned empty.
*
* If (as explained above) the only set bits in @orig are in positions
* m where m >= W, (where W is the weight of @relmap) then @dst will
* once again be returned empty.
*
* All bits in @dst not set by the above rule are cleared.
*/
void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits)
{
unsigned int n, m; /* same meaning as in above comment */
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, bits);
/*
* The following code is a more efficient, but less
* obvious, equivalent to the loop:
* for (m = 0; m < bitmap_weight(relmap, bits); m++) {
* n = find_nth_bit(orig, bits, m);
* if (test_bit(m, orig))
* set_bit(n, dst);
* }
*/
m = 0;
for_each_set_bit(n, relmap, bits) {
/* m == bitmap_pos_to_ord(relmap, n, bits) */
if (test_bit(m, orig))
set_bit(n, dst);
m++;
}
}
/**
* bitmap_fold - fold larger bitmap into smaller, modulo specified size
* @dst: resulting smaller bitmap
* @orig: original larger bitmap
* @sz: specified size
* @nbits: number of bits in each of these bitmaps
*
* For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
* Clear all other bits in @dst. See further the comment and
* Example [2] for bitmap_onto() for why and how to use this.
*/
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits)
{
unsigned int oldbit;
if (dst == orig) /* following doesn't handle inplace mappings */
return;
bitmap_zero(dst, nbits);
for_each_set_bit(oldbit, orig, nbits)
set_bit(oldbit % sz, dst);
}
#endif /* CONFIG_NUMA */
/*
* Common code for bitmap_*_region() routines.
* bitmap: array of unsigned longs corresponding to the bitmap
* pos: the beginning of the region
* order: region size (log base 2 of number of bits)
* reg_op: operation(s) to perform on that region of bitmap
*
* Can set, verify and/or release a region of bits in a bitmap,
* depending on which combination of REG_OP_* flag bits is set.
*
* A region of a bitmap is a sequence of bits in the bitmap, of
* some size '1 << order' (a power of two), aligned to that same
* '1 << order' power of two.
*
* Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
* Returns 0 in all other cases and reg_ops.
*/
enum {
REG_OP_ISFREE, /* true if region is all zero bits */
REG_OP_ALLOC, /* set all bits in region */
REG_OP_RELEASE, /* clear all bits in region */
};
static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
{
int nbits_reg; /* number of bits in region */
int index; /* index first long of region in bitmap */
int offset; /* bit offset region in bitmap[index] */
int nlongs_reg; /* num longs spanned by region in bitmap */
int nbitsinlong; /* num bits of region in each spanned long */
unsigned long mask; /* bitmask for one long of region */
int i; /* scans bitmap by longs */
int ret = 0; /* return value */
/*
* Either nlongs_reg == 1 (for small orders that fit in one long)
* or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
*/
nbits_reg = 1 << order;
index = pos / BITS_PER_LONG;
offset = pos - (index * BITS_PER_LONG);
nlongs_reg = BITS_TO_LONGS(nbits_reg);
nbitsinlong = min(nbits_reg, BITS_PER_LONG);
/*
* Can't do "mask = (1UL << nbitsinlong) - 1", as that
* overflows if nbitsinlong == BITS_PER_LONG.
*/
mask = (1UL << (nbitsinlong - 1));
mask += mask - 1;
mask <<= offset;
switch (reg_op) {
case REG_OP_ISFREE:
for (i = 0; i < nlongs_reg; i++) {
if (bitmap[index + i] & mask)
goto done;
}
ret = 1; /* all bits in region free (zero) */
break;
case REG_OP_ALLOC:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] |= mask;
break;
case REG_OP_RELEASE:
for (i = 0; i < nlongs_reg; i++)
bitmap[index + i] &= ~mask;
break;
}
done:
return ret;
}
/**
* bitmap_find_free_region - find a contiguous aligned mem region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @bits: number of bits in the bitmap
* @order: region size (log base 2 of number of bits) to find
*
* Find a region of free (zero) bits in a @bitmap of @bits bits and
* allocate them (set them to one). Only consider regions of length
* a power (@order) of two, aligned to that power of two, which
* makes the search algorithm much faster.
*
* Return the bit offset in bitmap of the allocated region,
* or -errno on failure.
*/
int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
{
unsigned int pos, end; /* scans bitmap by regions of size order */
for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
continue;
__reg_op(bitmap, pos, order, REG_OP_ALLOC);
return pos;
}
return -ENOMEM;
}
EXPORT_SYMBOL(bitmap_find_free_region);
/**
* bitmap_release_region - release allocated bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to release
* @order: region size (log base 2 of number of bits) to release
*
* This is the complement to __bitmap_find_free_region() and releases
* the found region (by clearing it in the bitmap).
*
* No return value.
*/
void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
{
__reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
EXPORT_SYMBOL(bitmap_release_region);
/**
* bitmap_allocate_region - allocate bitmap region
* @bitmap: array of unsigned longs corresponding to the bitmap
* @pos: beginning of bit region to allocate
* @order: region size (log base 2 of number of bits) to allocate
*
* Allocate (set bits in) a specified region of a bitmap.
*
* Return 0 on success, or %-EBUSY if specified region wasn't
* free (not all bits were zero).
*/
int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
{
if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
return -EBUSY;
return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
}
EXPORT_SYMBOL(bitmap_allocate_region);
/**
* bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
* @dst: destination buffer
* @src: bitmap to copy
* @nbits: number of bits in the bitmap
*
* Require nbits % BITS_PER_LONG == 0.
*/
#ifdef __BIG_ENDIAN
void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
unsigned int i;
for (i = 0; i < nbits/BITS_PER_LONG; i++) {
if (BITS_PER_LONG == 64)
dst[i] = cpu_to_le64(src[i]);
else
dst[i] = cpu_to_le32(src[i]);
}
}
EXPORT_SYMBOL(bitmap_copy_le);
#endif
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags);
}
EXPORT_SYMBOL(bitmap_alloc);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
{
return bitmap_alloc(nbits, flags | __GFP_ZERO);
}
EXPORT_SYMBOL(bitmap_zalloc);
unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node)
{
return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long),
flags, node);
}
EXPORT_SYMBOL(bitmap_alloc_node);
unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node)
{
return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node);
}
EXPORT_SYMBOL(bitmap_zalloc_node);
void bitmap_free(const unsigned long *bitmap)
{
kfree(bitmap);
}
EXPORT_SYMBOL(bitmap_free);
static void devm_bitmap_free(void *data)
{
unsigned long *bitmap = data;
bitmap_free(bitmap);
}
unsigned long *devm_bitmap_alloc(struct device *dev,
unsigned int nbits, gfp_t flags)
{
unsigned long *bitmap;
int ret;
bitmap = bitmap_alloc(nbits, flags);
if (!bitmap)
return NULL;
ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap);
if (ret)
return NULL;
return bitmap;
}
EXPORT_SYMBOL_GPL(devm_bitmap_alloc);
unsigned long *devm_bitmap_zalloc(struct device *dev,
unsigned int nbits, gfp_t flags)
{
return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO);
}
EXPORT_SYMBOL_GPL(devm_bitmap_zalloc);
#if BITS_PER_LONG == 64
/**
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
* @buf: array of u32 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits)
{
unsigned int i, halfwords;
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
bitmap[i/2] = (unsigned long) buf[i];
if (++i < halfwords)
bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
}
/* Clear tail bits in last word beyond nbits. */
if (nbits % BITS_PER_LONG)
bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
}
EXPORT_SYMBOL(bitmap_from_arr32);
/**
* bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
* @buf: array of u32 (in host byte order), the dest bitmap
* @bitmap: array of unsigned longs, the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
{
unsigned int i, halfwords;
halfwords = DIV_ROUND_UP(nbits, 32);
for (i = 0; i < halfwords; i++) {
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
if (++i < halfwords)
buf[i] = (u32) (bitmap[i/2] >> 32);
}
/* Clear tail bits in last element of array beyond nbits. */
if (nbits % BITS_PER_LONG)
buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
}
EXPORT_SYMBOL(bitmap_to_arr32);
#endif
#if BITS_PER_LONG == 32
/**
* bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap
* @bitmap: array of unsigned longs, the destination bitmap
* @buf: array of u64 (in host byte order), the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits)
{
int n;
for (n = nbits; n > 0; n -= 64) {
u64 val = *buf++;
*bitmap++ = val;
if (n > 32)
*bitmap++ = val >> 32;
}
/*
* Clear tail bits in the last word beyond nbits.
*
* Negative index is OK because here we point to the word next
* to the last word of the bitmap, except for nbits == 0, which
* is tested implicitly.
*/
if (nbits % BITS_PER_LONG)
bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits);
}
EXPORT_SYMBOL(bitmap_from_arr64);
/**
* bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits
* @buf: array of u64 (in host byte order), the dest bitmap
* @bitmap: array of unsigned longs, the source bitmap
* @nbits: number of bits in @bitmap
*/
void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits)
{
const unsigned long *end = bitmap + BITS_TO_LONGS(nbits);
while (bitmap < end) {
*buf = *bitmap++;
if (bitmap < end)
*buf |= (u64)(*bitmap++) << 32;
buf++;
}
/* Clear tail bits in the last element of array beyond nbits. */
if (nbits % 64)
buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0);
}
EXPORT_SYMBOL(bitmap_to_arr64);
#endif
| linux-master | lib/bitmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* KUnit tests for cpumask.
*
* Author: Sander Vanheule <[email protected]>
*/
#include <kunit/test.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#define MASK_MSG(m) \
"%s contains %sCPUs %*pbl", #m, (cpumask_weight(m) ? "" : "no "), \
nr_cpumask_bits, cpumask_bits(m)
#define EXPECT_FOR_EACH_CPU_EQ(test, mask) \
do { \
const cpumask_t *m = (mask); \
int mask_weight = cpumask_weight(m); \
int cpu, iter = 0; \
for_each_cpu(cpu, m) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
} while (0)
#define EXPECT_FOR_EACH_CPU_OP_EQ(test, op, mask1, mask2) \
do { \
const cpumask_t *m1 = (mask1); \
const cpumask_t *m2 = (mask2); \
int weight; \
int cpu, iter = 0; \
cpumask_##op(&mask_tmp, m1, m2); \
weight = cpumask_weight(&mask_tmp); \
for_each_cpu_##op(cpu, mask1, mask2) \
iter++; \
KUNIT_EXPECT_EQ((test), weight, iter); \
} while (0)
#define EXPECT_FOR_EACH_CPU_WRAP_EQ(test, mask) \
do { \
const cpumask_t *m = (mask); \
int mask_weight = cpumask_weight(m); \
int cpu, iter = 0; \
for_each_cpu_wrap(cpu, m, nr_cpu_ids / 2) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(mask)); \
} while (0)
#define EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, name) \
do { \
int mask_weight = num_##name##_cpus(); \
int cpu, iter = 0; \
for_each_##name##_cpu(cpu) \
iter++; \
KUNIT_EXPECT_EQ_MSG((test), mask_weight, iter, MASK_MSG(cpu_##name##_mask)); \
} while (0)
static cpumask_t mask_empty;
static cpumask_t mask_all;
static cpumask_t mask_tmp;
static void test_cpumask_weight(struct kunit *test)
{
KUNIT_EXPECT_TRUE_MSG(test, cpumask_empty(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_TRUE_MSG(test, cpumask_full(&mask_all), MASK_MSG(&mask_all));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_weight(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids, cpumask_weight(&mask_all), MASK_MSG(&mask_all));
}
static void test_cpumask_first(struct kunit *test)
{
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first(cpu_possible_mask), MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_first_zero(&mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_first_zero(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_last(struct kunit *test)
{
KUNIT_EXPECT_LE_MSG(test, nr_cpumask_bits, cpumask_last(&mask_empty),
MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, nr_cpu_ids - 1, cpumask_last(cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_next(struct kunit *test)
{
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next_zero(-1, &mask_empty), MASK_MSG(&mask_empty));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next_zero(-1, cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
KUNIT_EXPECT_LE_MSG(test, nr_cpu_ids, cpumask_next(-1, &mask_empty),
MASK_MSG(&mask_empty));
KUNIT_EXPECT_EQ_MSG(test, 0, cpumask_next(-1, cpu_possible_mask),
MASK_MSG(cpu_possible_mask));
}
static void test_cpumask_iterators(struct kunit *test)
{
EXPECT_FOR_EACH_CPU_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, &mask_empty, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, &mask_empty);
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, &mask_empty, &mask_empty);
EXPECT_FOR_EACH_CPU_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_WRAP_EQ(test, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, and, cpu_possible_mask, cpu_possible_mask);
EXPECT_FOR_EACH_CPU_OP_EQ(test, andnot, cpu_possible_mask, &mask_empty);
}
static void test_cpumask_iterators_builtin(struct kunit *test)
{
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, possible);
/* Ensure the dynamic masks are stable while running the tests */
cpu_hotplug_disable();
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, online);
EXPECT_FOR_EACH_CPU_BUILTIN_EQ(test, present);
cpu_hotplug_enable();
}
static int test_cpumask_init(struct kunit *test)
{
cpumask_clear(&mask_empty);
cpumask_setall(&mask_all);
return 0;
}
static struct kunit_case test_cpumask_cases[] = {
KUNIT_CASE(test_cpumask_weight),
KUNIT_CASE(test_cpumask_first),
KUNIT_CASE(test_cpumask_last),
KUNIT_CASE(test_cpumask_next),
KUNIT_CASE(test_cpumask_iterators),
KUNIT_CASE(test_cpumask_iterators_builtin),
{}
};
static struct kunit_suite test_cpumask_suite = {
.name = "cpumask",
.init = test_cpumask_init,
.test_cases = test_cpumask_cases,
};
kunit_test_suite(test_cpumask_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/cpumask_kunit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Floating proportions with flexible aging period
*
* Copyright (C) 2011, SUSE, Jan Kara <[email protected]>
*
* The goal of this code is: Given different types of event, measure proportion
* of each type of event over time. The proportions are measured with
* exponentially decaying history to give smooth transitions. A formula
* expressing proportion of event of type 'j' is:
*
* p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
*
* Where x_{i,j} is j's number of events in i-th last time period and x_i is
* total number of events in i-th last time period.
*
* Note that p_{j}'s are normalised, i.e.
*
* \Sum_{j} p_{j} = 1,
*
* This formula can be straightforwardly computed by maintaining denominator
* (let's call it 'd') and for each event type its numerator (let's call it
* 'n_j'). When an event of type 'j' happens, we simply need to do:
* n_j++; d++;
*
* When a new period is declared, we could do:
* d /= 2
* for each j
* n_j /= 2
*
* To avoid iteration over all event types, we instead shift numerator of event
* j lazily when someone asks for a proportion of event j or when event j
* occurs. This can bit trivially implemented by remembering last period in
* which something happened with proportion of type j.
*/
#include <linux/flex_proportions.h>
int fprop_global_init(struct fprop_global *p, gfp_t gfp)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
err = percpu_counter_init(&p->events, 1, gfp);
if (err)
return err;
seqcount_init(&p->sequence);
return 0;
}
void fprop_global_destroy(struct fprop_global *p)
{
percpu_counter_destroy(&p->events);
}
/*
* Declare @periods new periods. It is upto the caller to make sure period
* transitions cannot happen in parallel.
*
* The function returns true if the proportions are still defined and false
* if aging zeroed out all events. This can be used to detect whether declaring
* further periods has any effect.
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
s64 events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
if (events <= 1)
return false;
preempt_disable_nested();
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
/* Use addition to avoid losing events happening between sum and set */
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
preempt_enable_nested();
return true;
}
/*
* ---- SINGLE ----
*/
int fprop_local_init_single(struct fprop_local_single *pl)
{
pl->events = 0;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_single(struct fprop_local_single *pl)
{
}
static void fprop_reflect_period_single(struct fprop_global *p,
struct fprop_local_single *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG)
pl->events >>= period - pl->period;
else
pl->events = 0;
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
fprop_reflect_period_single(p, pl);
pl->events++;
percpu_counter_add(&p->events, 1);
}
/* Return fraction of events of type pl */
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_single(p, pl);
num = pl->events;
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
{
int err;
err = percpu_counter_init(&pl->events, 0, gfp);
if (err)
return err;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
{
percpu_counter_destroy(&pl->events);
}
static void fprop_reflect_period_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG) {
s64 val = percpu_counter_read(&pl->events);
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
percpu_counter_add_batch(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
long nr)
{
fprop_reflect_period_percpu(p, pl);
percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
percpu_counter_add(&p->events, nr);
}
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_percpu(p, pl);
num = percpu_counter_read_positive(&pl->events);
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* Like __fprop_add_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
void __fprop_add_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac, long nr)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
s64 tmp;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
tmp = (u64)denominator * max_frac -
((u64)numerator << FPROP_FRAC_SHIFT);
if (tmp < 0) {
/* Maximum fraction already exceeded? */
return;
} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
/* Add just enough for the fraction to saturate */
nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
FPROP_FRAC_BASE - max_frac);
}
}
__fprop_add_percpu(p, pl, nr);
}
| linux-master | lib/flex_proportions.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
/* covers ubuf and kbuf alike */
#define iterate_buf(i, n, base, len, off, __p, STEP) { \
size_t __maybe_unused off = 0; \
len = n; \
base = __p + i->iov_offset; \
len -= (STEP); \
i->iov_offset += len; \
n = len; \
}
/* covers iovec and kvec alike */
#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
size_t off = 0; \
size_t skip = i->iov_offset; \
do { \
len = min(n, __p->iov_len - skip); \
if (likely(len)) { \
base = __p->iov_base + skip; \
len -= (STEP); \
off += len; \
skip += len; \
n -= len; \
if (skip < __p->iov_len) \
break; \
} \
__p++; \
skip = 0; \
} while (n); \
i->iov_offset = skip; \
n = off; \
}
#define iterate_bvec(i, n, base, len, off, p, STEP) { \
size_t off = 0; \
unsigned skip = i->iov_offset; \
while (n) { \
unsigned offset = p->bv_offset + skip; \
unsigned left; \
void *kaddr = kmap_local_page(p->bv_page + \
offset / PAGE_SIZE); \
base = kaddr + offset % PAGE_SIZE; \
len = min(min(n, (size_t)(p->bv_len - skip)), \
(size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
left = (STEP); \
kunmap_local(kaddr); \
len -= left; \
off += len; \
skip += len; \
if (skip == p->bv_len) { \
skip = 0; \
p++; \
} \
n -= len; \
if (left) \
break; \
} \
i->iov_offset = skip; \
n = off; \
}
#define iterate_xarray(i, n, base, len, __off, STEP) { \
__label__ __out; \
size_t __off = 0; \
struct folio *folio; \
loff_t start = i->xarray_start + i->iov_offset; \
pgoff_t index = start / PAGE_SIZE; \
XA_STATE(xas, i->xarray, index); \
\
len = PAGE_SIZE - offset_in_page(start); \
rcu_read_lock(); \
xas_for_each(&xas, folio, ULONG_MAX) { \
unsigned left; \
size_t offset; \
if (xas_retry(&xas, folio)) \
continue; \
if (WARN_ON(xa_is_value(folio))) \
break; \
if (WARN_ON(folio_test_hugetlb(folio))) \
break; \
offset = offset_in_folio(folio, start + __off); \
while (offset < folio_size(folio)) { \
base = kmap_local_folio(folio, offset); \
len = min(n, len); \
left = (STEP); \
kunmap_local(base); \
len -= left; \
__off += len; \
n -= len; \
if (left || n == 0) \
goto __out; \
offset += len; \
len = PAGE_SIZE; \
} \
} \
__out: \
rcu_read_unlock(); \
i->iov_offset += __off; \
n = __off; \
}
#define __iterate_and_advance(i, n, base, len, off, I, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
if (likely(n)) { \
if (likely(iter_is_ubuf(i))) { \
void __user *base; \
size_t len; \
iterate_buf(i, n, base, len, off, \
i->ubuf, (I)) \
} else if (likely(iter_is_iovec(i))) { \
const struct iovec *iov = iter_iov(i); \
void __user *base; \
size_t len; \
iterate_iovec(i, n, base, len, off, \
iov, (I)) \
i->nr_segs -= iov - iter_iov(i); \
i->__iov = iov; \
} else if (iov_iter_is_bvec(i)) { \
const struct bio_vec *bvec = i->bvec; \
void *base; \
size_t len; \
iterate_bvec(i, n, base, len, off, \
bvec, (K)) \
i->nr_segs -= bvec - i->bvec; \
i->bvec = bvec; \
} else if (iov_iter_is_kvec(i)) { \
const struct kvec *kvec = i->kvec; \
void *base; \
size_t len; \
iterate_iovec(i, n, base, len, off, \
kvec, (K)) \
i->nr_segs -= kvec - i->kvec; \
i->kvec = kvec; \
} else if (iov_iter_is_xarray(i)) { \
void *base; \
size_t len; \
iterate_xarray(i, n, base, len, off, \
(K)) \
} \
i->count -= n; \
} \
}
#define iterate_and_advance(i, n, base, len, off, I, K) \
__iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
static int copyout(void __user *to, const void *from, size_t n)
{
if (should_fail_usercopy())
return n;
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
}
static int copyout_nofault(void __user *to, const void *from, size_t n)
{
long res;
if (should_fail_usercopy())
return n;
res = copy_to_user_nofault(to, from, n);
return res < 0 ? n : res;
}
static int copyin(void *to, const void __user *from, size_t n)
{
size_t res = n;
if (should_fail_usercopy())
return n;
if (access_ok(from, n)) {
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
}
return res;
}
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
* @size: maximum length
*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* @size. For each iovec, fault in each page that constitutes the iovec.
*
* Returns the number of bytes not faulted in (like copy_to_user() and
* copy_from_user()).
*
* Always returns 0 for non-userspace iterators.
*/
size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
{
if (iter_is_ubuf(i)) {
size_t n = min(size, iov_iter_count(i));
n -= fault_in_readable(i->ubuf + i->iov_offset, n);
return size - n;
} else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_readable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(fault_in_iov_iter_readable);
/*
* fault_in_iov_iter_writeable - fault in iov iterator for writing
* @i: iterator
* @size: maximum length
*
* Faults in the iterator using get_user_pages(), i.e., without triggering
* hardware page faults. This is primarily useful when we already know that
* some or all of the pages in @i aren't in memory.
*
* Returns the number of bytes not faulted in, like copy_to_user() and
* copy_from_user().
*
* Always returns 0 for non-user-space iterators.
*/
size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
{
if (iter_is_ubuf(i)) {
size_t n = min(size, iov_iter_count(i));
n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
return size - n;
} else if (iter_is_iovec(i)) {
size_t count = min(size, iov_iter_count(i));
const struct iovec *p;
size_t skip;
size -= count;
for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
size_t len = min(count, p->iov_len - skip);
size_t ret;
if (unlikely(!len))
continue;
ret = fault_in_safe_writeable(p->iov_base + skip, len);
count -= len - ret;
if (ret)
break;
}
return count + size;
}
return 0;
}
EXPORT_SYMBOL(fault_in_iov_iter_writeable);
void iov_iter_init(struct iov_iter *i, unsigned int direction,
const struct iovec *iov, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter) {
.iter_type = ITER_IOVEC,
.copy_mc = false,
.nofault = false,
.user_backed = true,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_init);
static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
__wsum sum, size_t off)
{
__wsum next = csum_partial_copy_nocheck(from, to, len);
return csum_block_add(sum, next, off);
}
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
copyout(base, addr + off, len),
memcpy(base, addr + off, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
static int copyout_mc(void __user *to, const void *from, size_t n)
{
if (access_ok(to, n)) {
instrument_copy_to_user(to, from, n);
n = copy_mc_to_user((__force void *) to, from, n);
}
return n;
}
/**
* _copy_mc_to_iter - copy to iter with source memory error exception handling
* @addr: source kernel address
* @bytes: total transfer length
* @i: destination iterator
*
* The pmem driver deploys this for the dax operation
* (dax_copy_to_iter()) for dax reads (bypass page-cache and the
* block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
* successfully copied.
*
* The main differences between this and typical _copy_to_iter().
*
* * Typical tail/residue handling after a fault retries the copy
* byte-by-byte until the fault happens again. Re-triggering machine
* checks is potentially fatal so the implementation uses source
* alignment and poison alignment assumptions to avoid re-triggering
* hardware exceptions.
*
* * ITER_KVEC and ITER_BVEC can return short copies. Compare to
* copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
*
* Return: number of bytes copied (may be %0)
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
__iterate_and_advance(i, bytes, base, len, off,
copyout_mc(base, addr + off, len),
copy_mc_to_kernel(base, addr + off, len)
)
return bytes;
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
size_t size)
{
if (iov_iter_is_copy_mc(i))
return (void *)copy_mc_to_kernel(to, from, size);
return memcpy(to, from, size);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
copyin(addr + off, base, len),
memcpy_from_iter(i, addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_from_iter);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_inatomic_nocache(addr + off, base, len),
memcpy(addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
* @bytes: total transfer length
* @i: source iterator
*
* The pmem driver arranges for filesystem-dax to use this facility via
* dax_copy_from_iter() for ensuring that writes to persistent memory
* are flushed through the CPU cache. It is differentiated from
* _copy_from_iter_nocache() in that guarantees all data is flushed for
* all iterator types. The _copy_from_iter_nocache() only attempts to
* bypass the cache for the ITER_IOVEC case, and on some archs may use
* instructions that strand dirty-data in the cache.
*
* Return: number of bytes copied (may be %0)
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_flushcache(addr + off, base, len),
memcpy_flushcache(addr + off, base, len)
)
return bytes;
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
struct page *head;
size_t v = n + offset;
/*
* The general case needs to access the page order in order
* to compute the page size.
* However, we mostly deal with order-0 pages and thus can
* avoid a possible cache line miss for requests that fit all
* page orders.
*/
if (n <= v && v <= PAGE_SIZE)
return true;
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
if (WARN_ON(n > v || v > page_size(head)))
return false;
return true;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
n = _copy_to_iter(kaddr + offset, n, i);
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
iterate_and_advance(i, n, base, len, off,
copyout_nofault(base, kaddr + offset + off, len),
memcpy(base, kaddr + offset + off, len)
)
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_to_iter_nofault);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
n = _copy_from_iter(kaddr + offset, n, i);
kunmap_local(kaddr);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
}
EXPORT_SYMBOL(copy_page_from_iter);
size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{
iterate_and_advance(i, bytes, base, len, count,
clear_user(base, len),
memset(base, 0, len)
)
return bytes;
}
EXPORT_SYMBOL(iov_iter_zero);
size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i)
{
size_t n, copied = 0;
if (!page_copy_sane(page, offset, bytes))
return 0;
if (WARN_ON_ONCE(!i->data_source))
return 0;
do {
char *p;
n = bytes - copied;
if (PageHighMem(page)) {
page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
n = min_t(size_t, n, PAGE_SIZE - offset);
}
p = kmap_atomic(page) + offset;
iterate_and_advance(i, n, base, len, off,
copyin(p + off, base, len),
memcpy_from_iter(i, p + off, base, len)
)
kunmap_atomic(p);
copied += n;
offset += n;
} while (PageHighMem(page) && copied != bytes && n > 0);
return copied;
}
EXPORT_SYMBOL(copy_page_from_iter_atomic);
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{
const struct bio_vec *bvec, *end;
if (!i->count)
return;
i->count -= size;
size += i->iov_offset;
for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
if (likely(size < bvec->bv_len))
break;
size -= bvec->bv_len;
}
i->iov_offset = size;
i->nr_segs -= bvec - i->bvec;
i->bvec = bvec;
}
static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
{
const struct iovec *iov, *end;
if (!i->count)
return;
i->count -= size;
size += i->iov_offset; // from beginning of current segment
for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
if (likely(size < iov->iov_len))
break;
size -= iov->iov_len;
}
i->iov_offset = size;
i->nr_segs -= iov - iter_iov(i);
i->__iov = iov;
}
void iov_iter_advance(struct iov_iter *i, size_t size)
{
if (unlikely(i->count < size))
size = i->count;
if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
i->iov_offset += size;
i->count -= size;
} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
/* iovec and kvec have identical layouts */
iov_iter_iovec_advance(i, size);
} else if (iov_iter_is_bvec(i)) {
iov_iter_bvec_advance(i, size);
} else if (iov_iter_is_discard(i)) {
i->count -= size;
}
}
EXPORT_SYMBOL(iov_iter_advance);
void iov_iter_revert(struct iov_iter *i, size_t unroll)
{
if (!unroll)
return;
if (WARN_ON(unroll > MAX_RW_COUNT))
return;
i->count += unroll;
if (unlikely(iov_iter_is_discard(i)))
return;
if (unroll <= i->iov_offset) {
i->iov_offset -= unroll;
return;
}
unroll -= i->iov_offset;
if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
BUG(); /* We should never go beyond the start of the specified
* range since we might then be straying into pages that
* aren't pinned.
*/
} else if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec;
while (1) {
size_t n = (--bvec)->bv_len;
i->nr_segs++;
if (unroll <= n) {
i->bvec = bvec;
i->iov_offset = n - unroll;
return;
}
unroll -= n;
}
} else { /* same logics for iovec and kvec */
const struct iovec *iov = iter_iov(i);
while (1) {
size_t n = (--iov)->iov_len;
i->nr_segs++;
if (unroll <= n) {
i->__iov = iov;
i->iov_offset = n - unroll;
return;
}
unroll -= n;
}
}
}
EXPORT_SYMBOL(iov_iter_revert);
/*
* Return the count of just the current iov_iter segment.
*/
size_t iov_iter_single_seg_count(const struct iov_iter *i)
{
if (i->nr_segs > 1) {
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset);
}
return i->count;
}
EXPORT_SYMBOL(iov_iter_single_seg_count);
void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
const struct kvec *kvec, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_KVEC,
.copy_mc = false,
.data_source = direction,
.kvec = kvec,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_kvec);
void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
const struct bio_vec *bvec, unsigned long nr_segs,
size_t count)
{
WARN_ON(direction & ~(READ | WRITE));
*i = (struct iov_iter){
.iter_type = ITER_BVEC,
.copy_mc = false,
.data_source = direction,
.bvec = bvec,
.nr_segs = nr_segs,
.iov_offset = 0,
.count = count
};
}
EXPORT_SYMBOL(iov_iter_bvec);
/**
* iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
* @xarray: The xarray to access.
* @start: The start file position.
* @count: The size of the I/O buffer in bytes.
*
* Set up an I/O iterator to either draw data out of the pages attached to an
* inode or to inject data into those pages. The pages *must* be prevented
* from evaporation, either by taking a ref on them or locking them by the
* caller.
*/
void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
struct xarray *xarray, loff_t start, size_t count)
{
BUG_ON(direction & ~1);
*i = (struct iov_iter) {
.iter_type = ITER_XARRAY,
.copy_mc = false,
.data_source = direction,
.xarray = xarray,
.xarray_start = start,
.count = count,
.iov_offset = 0
};
}
EXPORT_SYMBOL(iov_iter_xarray);
/**
* iov_iter_discard - Initialise an I/O iterator that discards data
* @i: The iterator to initialise.
* @direction: The direction of the transfer.
* @count: The size of the I/O buffer in bytes.
*
* Set up an I/O iterator that just discards everything that's written to it.
* It's only available as a READ iterator.
*/
void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
{
BUG_ON(direction != READ);
*i = (struct iov_iter){
.iter_type = ITER_DISCARD,
.copy_mc = false,
.data_source = false,
.count = count,
.iov_offset = 0
};
}
EXPORT_SYMBOL(iov_iter_discard);
static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
size_t size = i->count;
size_t skip = i->iov_offset;
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
const struct iovec *iov = iter_iov(i) + k;
size_t len = iov->iov_len - skip;
if (len > size)
len = size;
if (len & len_mask)
return false;
if ((unsigned long)(iov->iov_base + skip) & addr_mask)
return false;
size -= len;
if (!size)
break;
}
return true;
}
static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
size_t size = i->count;
unsigned skip = i->iov_offset;
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
size_t len = i->bvec[k].bv_len - skip;
if (len > size)
len = size;
if (len & len_mask)
return false;
if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
return false;
size -= len;
if (!size)
break;
}
return true;
}
/**
* iov_iter_is_aligned() - Check if the addresses and lengths of each segments
* are aligned to the parameters.
*
* @i: &struct iov_iter to restore
* @addr_mask: bit mask to check against the iov element's addresses
* @len_mask: bit mask to check against the iov element's lengths
*
* Return: false if any addresses or lengths intersect with the provided masks
*/
bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
if (likely(iter_is_ubuf(i))) {
if (i->count & len_mask)
return false;
if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
return false;
return true;
}
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_aligned_iovec(i, addr_mask, len_mask);
if (iov_iter_is_bvec(i))
return iov_iter_aligned_bvec(i, addr_mask, len_mask);
if (iov_iter_is_xarray(i)) {
if (i->count & len_mask)
return false;
if ((i->xarray_start + i->iov_offset) & addr_mask)
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
size_t size = i->count;
size_t skip = i->iov_offset;
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
const struct iovec *iov = iter_iov(i) + k;
size_t len = iov->iov_len - skip;
if (len) {
res |= (unsigned long)iov->iov_base + skip;
if (len > size)
len = size;
res |= len;
size -= len;
if (!size)
break;
}
}
return res;
}
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
unsigned k;
for (k = 0; k < i->nr_segs; k++, skip = 0) {
size_t len = i->bvec[k].bv_len - skip;
res |= (unsigned long)i->bvec[k].bv_offset + skip;
if (len > size)
len = size;
res |= len;
size -= len;
if (!size)
break;
}
return res;
}
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
if (likely(iter_is_ubuf(i))) {
size_t size = i->count;
if (size)
return ((unsigned long)i->ubuf + i->iov_offset) | size;
return 0;
}
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_iter_alignment_iovec(i);
if (iov_iter_is_bvec(i))
return iov_iter_alignment_bvec(i);
if (iov_iter_is_xarray(i))
return (i->xarray_start + i->iov_offset) | i->count;
return 0;
}
EXPORT_SYMBOL(iov_iter_alignment);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
{
unsigned long res = 0;
unsigned long v = 0;
size_t size = i->count;
unsigned k;
if (iter_is_ubuf(i))
return 0;
if (WARN_ON(!iter_is_iovec(i)))
return ~0U;
for (k = 0; k < i->nr_segs; k++) {
const struct iovec *iov = iter_iov(i) + k;
if (iov->iov_len) {
unsigned long base = (unsigned long)iov->iov_base;
if (v) // if not the first one
res |= base | v; // this start | previous end
v = base + iov->iov_len;
if (size <= iov->iov_len)
break;
size -= iov->iov_len;
}
}
return res;
}
EXPORT_SYMBOL(iov_iter_gap_alignment);
static int want_pages_array(struct page ***res, size_t size,
size_t start, unsigned int maxpages)
{
unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
if (count > maxpages)
count = maxpages;
WARN_ON(!count); // caller should've prevented that
if (!*res) {
*res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
if (!*res)
return 0;
}
return count;
}
static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
pgoff_t index, unsigned int nr_pages)
{
XA_STATE(xas, xa, index);
struct page *page;
unsigned int ret = 0;
rcu_read_lock();
for (page = xas_load(&xas); page; page = xas_next(&xas)) {
if (xas_retry(&xas, page))
continue;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
pages[ret] = find_subpage(page, xas.xa_index);
get_page(pages[ret]);
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
static ssize_t iter_xarray_get_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned maxpages, size_t *_start_offset)
{
unsigned nr, offset, count;
pgoff_t index;
loff_t pos;
pos = i->xarray_start + i->iov_offset;
index = pos >> PAGE_SHIFT;
offset = pos & ~PAGE_MASK;
*_start_offset = offset;
count = want_pages_array(pages, maxsize, offset, maxpages);
if (!count)
return -ENOMEM;
nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
if (nr == 0)
return 0;
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
i->iov_offset += maxsize;
i->count -= maxsize;
return maxsize;
}
/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{
size_t skip;
long k;
if (iter_is_ubuf(i))
return (unsigned long)i->ubuf + i->iov_offset;
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
const struct iovec *iov = iter_iov(i) + k;
size_t len = iov->iov_len - skip;
if (unlikely(!len))
continue;
if (*size > len)
*size = len;
return (unsigned long)iov->iov_base + skip;
}
BUG(); // if it had been empty, we wouldn't get called
}
/* must be done on non-empty ITER_BVEC one */
static struct page *first_bvec_segment(const struct iov_iter *i,
size_t *size, size_t *start)
{
struct page *page;
size_t skip = i->iov_offset, len;
len = i->bvec->bv_len - skip;
if (*size > len)
*size = len;
skip += i->bvec->bv_offset;
page = i->bvec->bv_page + skip / PAGE_SIZE;
*start = skip % PAGE_SIZE;
return page;
}
static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages, size_t *start)
{
unsigned int n, gup_flags = 0;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
if (maxsize > MAX_RW_COUNT)
maxsize = MAX_RW_COUNT;
if (likely(user_backed_iter(i))) {
unsigned long addr;
int res;
if (iov_iter_rw(i) != WRITE)
gup_flags |= FOLL_WRITE;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &maxsize);
*start = addr % PAGE_SIZE;
addr &= PAGE_MASK;
n = want_pages_array(pages, maxsize, *start, maxpages);
if (!n)
return -ENOMEM;
res = get_user_pages_fast(addr, n, gup_flags, *pages);
if (unlikely(res <= 0))
return res;
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
iov_iter_advance(i, maxsize);
return maxsize;
}
if (iov_iter_is_bvec(i)) {
struct page **p;
struct page *page;
page = first_bvec_segment(i, &maxsize, start);
n = want_pages_array(pages, maxsize, *start, maxpages);
if (!n)
return -ENOMEM;
p = *pages;
for (int k = 0; k < n; k++)
get_page(p[k] = page + k);
maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
i->count -= maxsize;
i->iov_offset += maxsize;
if (i->iov_offset == i->bvec->bv_len) {
i->iov_offset = 0;
i->bvec++;
i->nr_segs--;
}
return maxsize;
}
if (iov_iter_is_xarray(i))
return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
return -EFAULT;
}
ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
size_t maxsize, unsigned maxpages, size_t *start)
{
if (!maxpages)
return 0;
BUG_ON(!pages);
return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
}
EXPORT_SYMBOL(iov_iter_get_pages2);
ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
struct page ***pages, size_t maxsize, size_t *start)
{
ssize_t len;
*pages = NULL;
len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
if (len <= 0) {
kvfree(*pages);
*pages = NULL;
}
return len;
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
struct iov_iter *i)
{
__wsum sum, next;
sum = *csum;
if (WARN_ON_ONCE(!i->data_source))
return 0;
iterate_and_advance(i, bytes, base, len, off, ({
next = csum_and_copy_from_user(base, addr + off, len);
sum = csum_block_add(sum, next, off);
next ? 0 : len;
}), ({
sum = csum_and_memcpy(addr + off, base, len, sum, off);
})
)
*csum = sum;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_from_iter);
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
struct csum_state *csstate = _csstate;
__wsum sum, next;
if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_discard(i))) {
// can't use csum_memcpy() for that one - data is not copied
csstate->csum = csum_block_add(csstate->csum,
csum_partial(addr, bytes, 0),
csstate->off);
csstate->off += bytes;
return bytes;
}
sum = csum_shift(csstate->csum, csstate->off);
iterate_and_advance(i, bytes, base, len, off, ({
next = csum_and_copy_to_user(addr + off, base, len);
sum = csum_block_add(sum, next, off);
next ? 0 : len;
}), ({
sum = csum_and_memcpy(base, addr + off, len, sum, off);
})
)
csstate->csum = csum_shift(sum, csstate->off);
csstate->off += bytes;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
struct iov_iter *i)
{
#ifdef CONFIG_CRYPTO_HASH
struct ahash_request *hash = hashp;
struct scatterlist sg;
size_t copied;
copied = copy_to_iter(addr, bytes, i);
sg_init_one(&sg, addr, copied);
ahash_request_set_crypt(hash, &sg, NULL, copied);
crypto_ahash_update(hash);
return copied;
#else
return 0;
#endif
}
EXPORT_SYMBOL(hash_and_copy_to_iter);
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
const struct iovec *p;
int npages = 0;
for (p = iter_iov(i); size; skip = 0, p++) {
unsigned offs = offset_in_page(p->iov_base + skip);
size_t len = min(p->iov_len - skip, size);
if (len) {
size -= len;
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
if (unlikely(npages > maxpages))
return maxpages;
}
}
return npages;
}
static int bvec_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
const struct bio_vec *p;
int npages = 0;
for (p = i->bvec; size; skip = 0, p++) {
unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
size_t len = min(p->bv_len - skip, size);
size -= len;
npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
if (unlikely(npages > maxpages))
return maxpages;
}
return npages;
}
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
if (unlikely(!i->count))
return 0;
if (likely(iter_is_ubuf(i))) {
unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
return min(npages, maxpages);
}
/* iovec and kvec have identical layouts */
if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
return iov_npages(i, maxpages);
if (iov_iter_is_bvec(i))
return bvec_npages(i, maxpages);
if (iov_iter_is_xarray(i)) {
unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
return min(npages, maxpages);
}
return 0;
}
EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{
*new = *old;
if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec),
flags);
else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
/* iovec and kvec have identical layout */
return new->__iov = kmemdup(new->__iov,
new->nr_segs * sizeof(struct iovec),
flags);
return NULL;
}
EXPORT_SYMBOL(dup_iter);
static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
const struct iovec __user *uvec, unsigned long nr_segs)
{
const struct compat_iovec __user *uiov =
(const struct compat_iovec __user *)uvec;
int ret = -EFAULT, i;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
for (i = 0; i < nr_segs; i++) {
compat_uptr_t buf;
compat_ssize_t len;
unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
/* check for compat_size_t not fitting in compat_ssize_t .. */
if (len < 0) {
ret = -EINVAL;
goto uaccess_end;
}
iov[i].iov_base = compat_ptr(buf);
iov[i].iov_len = len;
}
ret = 0;
uaccess_end:
user_access_end();
return ret;
}
static __noclone int copy_iovec_from_user(struct iovec *iov,
const struct iovec __user *uiov, unsigned long nr_segs)
{
int ret = -EFAULT;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
do {
void __user *buf;
ssize_t len;
unsafe_get_user(len, &uiov->iov_len, uaccess_end);
unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
/* check for size_t not fitting in ssize_t .. */
if (unlikely(len < 0)) {
ret = -EINVAL;
goto uaccess_end;
}
iov->iov_base = buf;
iov->iov_len = len;
uiov++; iov++;
} while (--nr_segs);
ret = 0;
uaccess_end:
user_access_end();
return ret;
}
struct iovec *iovec_from_user(const struct iovec __user *uvec,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat)
{
struct iovec *iov = fast_iov;
int ret;
/*
* SuS says "The readv() function *may* fail if the iovcnt argument was
* less than or equal to 0, or greater than {IOV_MAX}. Linux has
* traditionally returned zero for zero segments, so...
*/
if (nr_segs == 0)
return iov;
if (nr_segs > UIO_MAXIOV)
return ERR_PTR(-EINVAL);
if (nr_segs > fast_segs) {
iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
if (!iov)
return ERR_PTR(-ENOMEM);
}
if (unlikely(compat))
ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
else
ret = copy_iovec_from_user(iov, uvec, nr_segs);
if (ret) {
if (iov != fast_iov)
kfree(iov);
return ERR_PTR(ret);
}
return iov;
}
/*
* Single segment iovec supplied by the user, import it as ITER_UBUF.
*/
static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
struct iovec **iovp, struct iov_iter *i,
bool compat)
{
struct iovec *iov = *iovp;
ssize_t ret;
if (compat)
ret = copy_compat_iovec_from_user(iov, uvec, 1);
else
ret = copy_iovec_from_user(iov, uvec, 1);
if (unlikely(ret))
return ret;
ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
if (unlikely(ret))
return ret;
*iovp = NULL;
return i->count;
}
ssize_t __import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
struct iov_iter *i, bool compat)
{
ssize_t total_len = 0;
unsigned long seg;
struct iovec *iov;
if (nr_segs == 1)
return __import_iovec_ubuf(type, uvec, iovp, i, compat);
iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
if (IS_ERR(iov)) {
*iovp = NULL;
return PTR_ERR(iov);
}
/*
* According to the Single Unix Specification we should return EINVAL if
* an element length is < 0 when cast to ssize_t or if the total length
* would overflow the ssize_t return value of the system call.
*
* Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
* overflow case.
*/
for (seg = 0; seg < nr_segs; seg++) {
ssize_t len = (ssize_t)iov[seg].iov_len;
if (!access_ok(iov[seg].iov_base, len)) {
if (iov != *iovp)
kfree(iov);
*iovp = NULL;
return -EFAULT;
}
if (len > MAX_RW_COUNT - total_len) {
len = MAX_RW_COUNT - total_len;
iov[seg].iov_len = len;
}
total_len += len;
}
iov_iter_init(i, type, iov, nr_segs, total_len);
if (iov == *iovp)
*iovp = NULL;
else
*iovp = iov;
return total_len;
}
/**
* import_iovec() - Copy an array of &struct iovec from userspace
* into the kernel, check that it is valid, and initialize a new
* &struct iov_iter iterator to access it.
*
* @type: One of %READ or %WRITE.
* @uvec: Pointer to the userspace array.
* @nr_segs: Number of elements in userspace array.
* @fast_segs: Number of elements in @iov.
* @iovp: (input and output parameter) Pointer to pointer to (usually small
* on-stack) kernel array.
* @i: Pointer to iterator that will be initialized on success.
*
* If the array pointed to by *@iov is large enough to hold all @nr_segs,
* then this function places %NULL in *@iov on return. Otherwise, a new
* array will be allocated and the result placed in *@iov. This means that
* the caller may call kfree() on *@iov regardless of whether the small
* on-stack array was used or not (and regardless of whether this function
* returns an error or not).
*
* Return: Negative error code on error, bytes imported on success
*/
ssize_t import_iovec(int type, const struct iovec __user *uvec,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iovp, struct iov_iter *i)
{
return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
in_compat_syscall());
}
EXPORT_SYMBOL(import_iovec);
int import_single_range(int rw, void __user *buf, size_t len,
struct iovec *iov, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
if (unlikely(!access_ok(buf, len)))
return -EFAULT;
iov_iter_ubuf(i, rw, buf, len);
return 0;
}
EXPORT_SYMBOL(import_single_range);
int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
if (unlikely(!access_ok(buf, len)))
return -EFAULT;
iov_iter_ubuf(i, rw, buf, len);
return 0;
}
EXPORT_SYMBOL_GPL(import_ubuf);
/**
* iov_iter_restore() - Restore a &struct iov_iter to the same state as when
* iov_iter_save_state() was called.
*
* @i: &struct iov_iter to restore
* @state: state to restore from
*
* Used after iov_iter_save_state() to bring restore @i, if operations may
* have advanced it.
*
* Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
*/
void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
{
if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
!iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
return;
i->iov_offset = state->iov_offset;
i->count = state->count;
if (iter_is_ubuf(i))
return;
/*
* For the *vec iters, nr_segs + iov is constant - if we increment
* the vec, then we also decrement the nr_segs count. Hence we don't
* need to track both of these, just one is enough and we can deduct
* the other from that. ITER_KVEC and ITER_IOVEC are the same struct
* size, so we can just increment the iov pointer as they are unionzed.
* ITER_BVEC _may_ be the same size on some archs, but on others it is
* not. Be safe and handle it separately.
*/
BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
if (iov_iter_is_bvec(i))
i->bvec -= state->nr_segs - i->nr_segs;
else
i->__iov -= state->nr_segs - i->nr_segs;
i->nr_segs = state->nr_segs;
}
/*
* Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
* get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
struct page *page, **p;
unsigned int nr = 0, offset;
loff_t pos = i->xarray_start + i->iov_offset;
pgoff_t index = pos >> PAGE_SHIFT;
XA_STATE(xas, i->xarray, index);
offset = pos & ~PAGE_MASK;
*offset0 = offset;
maxpages = want_pages_array(pages, maxsize, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
rcu_read_lock();
for (page = xas_load(&xas); page; page = xas_next(&xas)) {
if (xas_retry(&xas, page))
continue;
/* Has the page moved or been split? */
if (unlikely(page != xas_reload(&xas))) {
xas_reset(&xas);
continue;
}
p[nr++] = find_subpage(page, xas.xa_index);
if (nr == maxpages)
break;
}
rcu_read_unlock();
maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
iov_iter_advance(i, maxsize);
return maxsize;
}
/*
* Extract a list of contiguous pages from an ITER_BVEC iterator. This does
* not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
struct page **p, *page;
size_t skip = i->iov_offset, offset, size;
int k;
for (;;) {
if (i->nr_segs == 0)
return 0;
size = min(maxsize, i->bvec->bv_len - skip);
if (size)
break;
i->iov_offset = 0;
i->nr_segs--;
i->bvec++;
skip = 0;
}
skip += i->bvec->bv_offset;
page = i->bvec->bv_page + skip / PAGE_SIZE;
offset = skip % PAGE_SIZE;
*offset0 = offset;
maxpages = want_pages_array(pages, size, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
for (k = 0; k < maxpages; k++)
p[k] = page + k;
size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
iov_iter_advance(i, size);
return size;
}
/*
* Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
* This does not get references on the pages, nor does it get a pin on them.
*/
static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
struct page **p, *page;
const void *kaddr;
size_t skip = i->iov_offset, offset, len, size;
int k;
for (;;) {
if (i->nr_segs == 0)
return 0;
size = min(maxsize, i->kvec->iov_len - skip);
if (size)
break;
i->iov_offset = 0;
i->nr_segs--;
i->kvec++;
skip = 0;
}
kaddr = i->kvec->iov_base + skip;
offset = (unsigned long)kaddr & ~PAGE_MASK;
*offset0 = offset;
maxpages = want_pages_array(pages, size, offset, maxpages);
if (!maxpages)
return -ENOMEM;
p = *pages;
kaddr -= offset;
len = offset + size;
for (k = 0; k < maxpages; k++) {
size_t seg = min_t(size_t, len, PAGE_SIZE);
if (is_vmalloc_or_module_addr(kaddr))
page = vmalloc_to_page(kaddr);
else
page = virt_to_page(kaddr);
p[k] = page;
len -= seg;
kaddr += PAGE_SIZE;
}
size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
iov_iter_advance(i, size);
return size;
}
/*
* Extract a list of contiguous pages from a user iterator and get a pin on
* each of them. This should only be used if the iterator is user-backed
* (IOBUF/UBUF).
*
* It does not get refs on the pages, but the pages must be unpinned by the
* caller once the transfer is complete.
*
* This is safe to be used where background IO/DMA *is* going to be modifying
* the buffer; using a pin rather than a ref makes forces fork() to give the
* child a copy of the page.
*/
static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
struct page ***pages,
size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
unsigned long addr;
unsigned int gup_flags = 0;
size_t offset;
int res;
if (i->data_source == ITER_DEST)
gup_flags |= FOLL_WRITE;
if (extraction_flags & ITER_ALLOW_P2PDMA)
gup_flags |= FOLL_PCI_P2PDMA;
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
addr = first_iovec_segment(i, &maxsize);
*offset0 = offset = addr % PAGE_SIZE;
addr &= PAGE_MASK;
maxpages = want_pages_array(pages, maxsize, offset, maxpages);
if (!maxpages)
return -ENOMEM;
res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
if (unlikely(res <= 0))
return res;
maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
iov_iter_advance(i, maxsize);
return maxsize;
}
/**
* iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
* @i: The iterator to extract from
* @pages: Where to return the list of pages
* @maxsize: The maximum amount of iterator to extract
* @maxpages: The maximum size of the list of pages
* @extraction_flags: Flags to qualify request
* @offset0: Where to return the starting offset into (*@pages)[0]
*
* Extract a list of contiguous pages from the current point of the iterator,
* advancing the iterator. The maximum number of pages and the maximum amount
* of page contents can be set.
*
* If *@pages is NULL, a page list will be allocated to the required size and
* *@pages will be set to its base. If *@pages is not NULL, it will be assumed
* that the caller allocated a page list at least @maxpages in size and this
* will be filled in.
*
* @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
* be allowed on the pages extracted.
*
* The iov_iter_extract_will_pin() function can be used to query how cleanup
* should be performed.
*
* Extra refs or pins on the pages may be obtained as follows:
*
* (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
* added to the pages, but refs will not be taken.
* iov_iter_extract_will_pin() will return true.
*
* (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
* merely listed; no extra refs or pins are obtained.
* iov_iter_extract_will_pin() will return 0.
*
* Note also:
*
* (*) Use with ITER_DISCARD is not supported as that has no content.
*
* On success, the function sets *@pages to the new pagelist, if allocated, and
* sets *offset0 to the offset into the first page.
*
* It may also return -ENOMEM and -EFAULT.
*/
ssize_t iov_iter_extract_pages(struct iov_iter *i,
struct page ***pages,
size_t maxsize,
unsigned int maxpages,
iov_iter_extraction_t extraction_flags,
size_t *offset0)
{
maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
if (!maxsize)
return 0;
if (likely(user_backed_iter(i)))
return iov_iter_extract_user_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_kvec(i))
return iov_iter_extract_kvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_bvec(i))
return iov_iter_extract_bvec_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
if (iov_iter_is_xarray(i))
return iov_iter_extract_xarray_pages(i, pages, maxsize,
maxpages, extraction_flags,
offset0);
return -EFAULT;
}
EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
| linux-master | lib/iov_iter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* crc7.c
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc7.h>
/*
* Table for CRC-7 (polynomial x^7 + x^3 + 1).
* This is a big-endian CRC (msbit is highest power of x),
* aligned so the msbit of the byte is the x^6 coefficient
* and the lsbit is not used.
*/
const u8 crc7_be_syndrome_table[256] = {
0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e,
0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee,
0x32, 0x20, 0x16, 0x04, 0x7a, 0x68, 0x5e, 0x4c,
0xa2, 0xb0, 0x86, 0x94, 0xea, 0xf8, 0xce, 0xdc,
0x64, 0x76, 0x40, 0x52, 0x2c, 0x3e, 0x08, 0x1a,
0xf4, 0xe6, 0xd0, 0xc2, 0xbc, 0xae, 0x98, 0x8a,
0x56, 0x44, 0x72, 0x60, 0x1e, 0x0c, 0x3a, 0x28,
0xc6, 0xd4, 0xe2, 0xf0, 0x8e, 0x9c, 0xaa, 0xb8,
0xc8, 0xda, 0xec, 0xfe, 0x80, 0x92, 0xa4, 0xb6,
0x58, 0x4a, 0x7c, 0x6e, 0x10, 0x02, 0x34, 0x26,
0xfa, 0xe8, 0xde, 0xcc, 0xb2, 0xa0, 0x96, 0x84,
0x6a, 0x78, 0x4e, 0x5c, 0x22, 0x30, 0x06, 0x14,
0xac, 0xbe, 0x88, 0x9a, 0xe4, 0xf6, 0xc0, 0xd2,
0x3c, 0x2e, 0x18, 0x0a, 0x74, 0x66, 0x50, 0x42,
0x9e, 0x8c, 0xba, 0xa8, 0xd6, 0xc4, 0xf2, 0xe0,
0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x70,
0x82, 0x90, 0xa6, 0xb4, 0xca, 0xd8, 0xee, 0xfc,
0x12, 0x00, 0x36, 0x24, 0x5a, 0x48, 0x7e, 0x6c,
0xb0, 0xa2, 0x94, 0x86, 0xf8, 0xea, 0xdc, 0xce,
0x20, 0x32, 0x04, 0x16, 0x68, 0x7a, 0x4c, 0x5e,
0xe6, 0xf4, 0xc2, 0xd0, 0xae, 0xbc, 0x8a, 0x98,
0x76, 0x64, 0x52, 0x40, 0x3e, 0x2c, 0x1a, 0x08,
0xd4, 0xc6, 0xf0, 0xe2, 0x9c, 0x8e, 0xb8, 0xaa,
0x44, 0x56, 0x60, 0x72, 0x0c, 0x1e, 0x28, 0x3a,
0x4a, 0x58, 0x6e, 0x7c, 0x02, 0x10, 0x26, 0x34,
0xda, 0xc8, 0xfe, 0xec, 0x92, 0x80, 0xb6, 0xa4,
0x78, 0x6a, 0x5c, 0x4e, 0x30, 0x22, 0x14, 0x06,
0xe8, 0xfa, 0xcc, 0xde, 0xa0, 0xb2, 0x84, 0x96,
0x2e, 0x3c, 0x0a, 0x18, 0x66, 0x74, 0x42, 0x50,
0xbe, 0xac, 0x9a, 0x88, 0xf6, 0xe4, 0xd2, 0xc0,
0x1c, 0x0e, 0x38, 0x2a, 0x54, 0x46, 0x70, 0x62,
0x8c, 0x9e, 0xa8, 0xba, 0xc4, 0xd6, 0xe0, 0xf2
};
EXPORT_SYMBOL(crc7_be_syndrome_table);
/**
* crc7_be - update the CRC7 for the data buffer
* @crc: previous CRC7 value
* @buffer: data pointer
* @len: number of bytes in the buffer
* Context: any
*
* Returns the updated CRC7 value.
* The CRC7 is left-aligned in the byte (the lsbit is always 0), as that
* makes the computation easier, and all callers want it in that form.
*
*/
u8 crc7_be(u8 crc, const u8 *buffer, size_t len)
{
while (len--)
crc = crc7_be_byte(crc, *buffer++);
return crc;
}
EXPORT_SYMBOL(crc7_be);
MODULE_DESCRIPTION("CRC7 calculations");
MODULE_LICENSE("GPL");
| linux-master | lib/crc7.c |
// SPDX-License-Identifier: GPL-2.0
/*
* A fast, small, non-recursive O(n log n) sort for the Linux kernel
*
* This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
* and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
*
* Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
* better) at the expense of stack usage and much larger code to avoid
* quicksort's O(n^2) worst case.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/export.h>
#include <linux/sort.h>
/**
* is_aligned - is this pointer & size okay for word-wide copying?
* @base: pointer to data
* @size: size of each element
* @align: required alignment (typically 4 or 8)
*
* Returns true if elements can be copied using word loads and stores.
* The size must be a multiple of the alignment, and the base address must
* be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
*
* For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
* to "if ((a | b) & mask)", so we do that by hand.
*/
__attribute_const__ __always_inline
static bool is_aligned(const void *base, size_t size, unsigned char align)
{
unsigned char lsbits = (unsigned char)size;
(void)base;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
lsbits |= (unsigned char)(uintptr_t)base;
#endif
return (lsbits & (align - 1)) == 0;
}
/**
* swap_words_32 - swap two elements in 32-bit chunks
* @a: pointer to the first element to swap
* @b: pointer to the second element to swap
* @n: element size (must be a multiple of 4)
*
* Exchange the two objects in memory. This exploits base+index addressing,
* which basically all CPUs have, to minimize loop overhead computations.
*
* For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
* bottom of the loop, even though the zero flag is still valid from the
* subtract (since the intervening mov instructions don't alter the flags).
* Gcc 8.1.0 doesn't have that problem.
*/
static void swap_words_32(void *a, void *b, size_t n)
{
do {
u32 t = *(u32 *)(a + (n -= 4));
*(u32 *)(a + n) = *(u32 *)(b + n);
*(u32 *)(b + n) = t;
} while (n);
}
/**
* swap_words_64 - swap two elements in 64-bit chunks
* @a: pointer to the first element to swap
* @b: pointer to the second element to swap
* @n: element size (must be a multiple of 8)
*
* Exchange the two objects in memory. This exploits base+index
* addressing, which basically all CPUs have, to minimize loop overhead
* computations.
*
* We'd like to use 64-bit loads if possible. If they're not, emulating
* one requires base+index+4 addressing which x86 has but most other
* processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
* but it's possible to have 64-bit loads without 64-bit pointers (e.g.
* x32 ABI). Are there any cases the kernel needs to worry about?
*/
static void swap_words_64(void *a, void *b, size_t n)
{
do {
#ifdef CONFIG_64BIT
u64 t = *(u64 *)(a + (n -= 8));
*(u64 *)(a + n) = *(u64 *)(b + n);
*(u64 *)(b + n) = t;
#else
/* Use two 32-bit transfers to avoid base+index+4 addressing */
u32 t = *(u32 *)(a + (n -= 4));
*(u32 *)(a + n) = *(u32 *)(b + n);
*(u32 *)(b + n) = t;
t = *(u32 *)(a + (n -= 4));
*(u32 *)(a + n) = *(u32 *)(b + n);
*(u32 *)(b + n) = t;
#endif
} while (n);
}
/**
* swap_bytes - swap two elements a byte at a time
* @a: pointer to the first element to swap
* @b: pointer to the second element to swap
* @n: element size
*
* This is the fallback if alignment doesn't allow using larger chunks.
*/
static void swap_bytes(void *a, void *b, size_t n)
{
do {
char t = ((char *)a)[--n];
((char *)a)[n] = ((char *)b)[n];
((char *)b)[n] = t;
} while (n);
}
/*
* The values are arbitrary as long as they can't be confused with
* a pointer, but small integers make for the smallest compare
* instructions.
*/
#define SWAP_WORDS_64 (swap_r_func_t)0
#define SWAP_WORDS_32 (swap_r_func_t)1
#define SWAP_BYTES (swap_r_func_t)2
#define SWAP_WRAPPER (swap_r_func_t)3
struct wrapper {
cmp_func_t cmp;
swap_func_t swap;
};
/*
* The function pointer is last to make tail calls most efficient if the
* compiler decides not to inline this function.
*/
static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
{
if (swap_func == SWAP_WRAPPER) {
((const struct wrapper *)priv)->swap(a, b, (int)size);
return;
}
if (swap_func == SWAP_WORDS_64)
swap_words_64(a, b, size);
else if (swap_func == SWAP_WORDS_32)
swap_words_32(a, b, size);
else if (swap_func == SWAP_BYTES)
swap_bytes(a, b, size);
else
swap_func(a, b, (int)size, priv);
}
#define _CMP_WRAPPER ((cmp_r_func_t)0L)
static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
{
if (cmp == _CMP_WRAPPER)
return ((const struct wrapper *)priv)->cmp(a, b);
return cmp(a, b, priv);
}
/**
* parent - given the offset of the child, find the offset of the parent.
* @i: the offset of the heap element whose parent is sought. Non-zero.
* @lsbit: a precomputed 1-bit mask, equal to "size & -size"
* @size: size of each element
*
* In terms of array indexes, the parent of element j = @i/@size is simply
* (j-1)/2. But when working in byte offsets, we can't use implicit
* truncation of integer divides.
*
* Fortunately, we only need one bit of the quotient, not the full divide.
* @size has a least significant bit. That bit will be clear if @i is
* an even multiple of @size, and set if it's an odd multiple.
*
* Logically, we're doing "if (i & lsbit) i -= size;", but since the
* branch is unpredictable, it's done with a bit of clever branch-free
* code instead.
*/
__attribute_const__ __always_inline
static size_t parent(size_t i, unsigned int lsbit, size_t size)
{
i -= size;
i -= size & -(i & lsbit);
return i / 2;
}
/**
* sort_r - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
* @priv: third argument passed to comparison function
*
* This function does a heapsort on the given array. You may provide
* a swap_func function if you need to do something more than a memory
* copy (e.g. fix up pointers or auxiliary data), but the built-in swap
* avoids a slow retpoline and so is significantly faster.
*
* Sorting time is O(n log n) both on average and worst-case. While
* quicksort is slightly faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
swap_r_func_t swap_func,
const void *priv)
{
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
const unsigned int lsbit = size & -size; /* Used to find parent */
if (!a) /* num < 2 || size == 0 */
return;
/* called from 'sort' without swap function, let's pick the default */
if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap)
swap_func = NULL;
if (!swap_func) {
if (is_aligned(base, size, 8))
swap_func = SWAP_WORDS_64;
else if (is_aligned(base, size, 4))
swap_func = SWAP_WORDS_32;
else
swap_func = SWAP_BYTES;
}
/*
* Loop invariants:
* 1. elements [a,n) satisfy the heap property (compare greater than
* all of their children),
* 2. elements [n,num*size) are sorted, and
* 3. a <= b <= c <= d <= n (whenever they are valid).
*/
for (;;) {
size_t b, c, d;
if (a) /* Building heap: sift down --a */
a -= size;
else if (n -= size) /* Sorting: Extract root to --n */
do_swap(base, base + n, size, swap_func, priv);
else /* Sort complete */
break;
/*
* Sift element at "a" down into heap. This is the
* "bottom-up" variant, which significantly reduces
* calls to cmp_func(): we find the sift-down path all
* the way to the leaves (one compare per level), then
* backtrack to find where to insert the target element.
*
* Because elements tend to sift down close to the leaves,
* this uses fewer compares than doing two per level
* on the way down. (A bit more than half as many on
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
/* Now backtrack from "b" to the correct location for "a" */
while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
b = parent(b, lsbit, size);
c = b; /* Where "a" belongs */
while (b != a) { /* Shift it into place */
b = parent(b, lsbit, size);
do_swap(base + b, base + c, size, swap_func, priv);
}
}
}
EXPORT_SYMBOL(sort_r);
void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func)
{
struct wrapper w = {
.cmp = cmp_func,
.swap = swap_func,
};
return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
}
EXPORT_SYMBOL(sort);
| linux-master | lib/sort.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include "notifier-error-inject.h"
static int debugfs_errno_set(void *data, u64 val)
{
*(int *)data = clamp_t(int, val, -MAX_ERRNO, 0);
return 0;
}
static int debugfs_errno_get(void *data, u64 *val)
{
*val = *(int *)data;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
struct dentry *parent, int *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_errno);
}
static int notifier_err_inject_callback(struct notifier_block *nb,
unsigned long val, void *p)
{
int err = 0;
struct notifier_err_inject *err_inject =
container_of(nb, struct notifier_err_inject, nb);
struct notifier_err_inject_action *action;
for (action = err_inject->actions; action->name; action++) {
if (action->val == val) {
err = action->error;
break;
}
}
if (err)
pr_info("Injecting error (%d) to %s\n", err, action->name);
return notifier_from_errno(err);
}
struct dentry *notifier_err_inject_dir;
EXPORT_SYMBOL_GPL(notifier_err_inject_dir);
struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
struct notifier_err_inject *err_inject, int priority)
{
struct notifier_err_inject_action *action;
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
struct dentry *dir;
struct dentry *actions_dir;
err_inject->nb.notifier_call = notifier_err_inject_callback;
err_inject->nb.priority = priority;
dir = debugfs_create_dir(name, parent);
actions_dir = debugfs_create_dir("actions", dir);
for (action = err_inject->actions; action->name; action++) {
struct dentry *action_dir;
action_dir = debugfs_create_dir(action->name, actions_dir);
/*
* Create debugfs r/w file containing action->error. If
* notifier call chain is called with action->val, it will
* fail with the error code
*/
debugfs_create_errno("error", mode, action_dir, &action->error);
}
return dir;
}
EXPORT_SYMBOL_GPL(notifier_err_inject_init);
static int __init err_inject_init(void)
{
notifier_err_inject_dir =
debugfs_create_dir("notifier-error-inject", NULL);
return 0;
}
static void __exit err_inject_exit(void)
{
debugfs_remove_recursive(notifier_err_inject_dir);
}
module_init(err_inject_init);
module_exit(err_inject_exit);
MODULE_DESCRIPTION("Notifier error injection module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akinobu Mita <[email protected]>");
| linux-master | lib/notifier-error-inject.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* test_maple_tree.c: Test the maple tree API
* Copyright (c) 2018-2022 Oracle Corporation
* Author: Liam R. Howlett <[email protected]>
*
* Any tests that only require the interface of the tree.
*/
#include <linux/maple_tree.h>
#include <linux/module.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH
#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
#ifndef CONFIG_DEBUG_MAPLE_TREE
#define mt_dump(mt, fmt) do {} while (0)
#define mt_validate(mt) do {} while (0)
#define mt_cache_shrink() do {} while (0)
#define mas_dump(mas) do {} while (0)
#define mas_wr_dump(mas) do {} while (0)
atomic_t maple_tree_tests_run;
atomic_t maple_tree_tests_passed;
#undef MT_BUG_ON
#define MT_BUG_ON(__tree, __x) do { \
atomic_inc(&maple_tree_tests_run); \
if (__x) { \
pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
} else { \
atomic_inc(&maple_tree_tests_passed); \
} \
} while (0)
#endif
/* #define BENCH_SLOT_STORE */
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
/* #define BENCH_MAS_PREV */
#ifdef __KERNEL__
#define mt_set_non_kernel(x) do {} while (0)
#define mt_zero_nr_tallocated(x) do {} while (0)
#else
#define cond_resched() do {} while (0)
#endif
static int __init mtree_insert_index(struct maple_tree *mt,
unsigned long index, gfp_t gfp)
{
return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
}
static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index)
{
MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
}
static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index,
void *ptr)
{
return mtree_insert(mt, index, ptr, GFP_KERNEL);
}
static int __init mtree_test_store_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr)
{
return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
}
static int __init mtree_test_store(struct maple_tree *mt, unsigned long start,
void *ptr)
{
return mtree_test_store_range(mt, start, start, ptr);
}
static int __init mtree_test_insert_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr)
{
return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
}
static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index)
{
return mtree_load(mt, index);
}
static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index)
{
return mtree_erase(mt, index);
}
#if defined(CONFIG_64BIT)
static noinline void __init check_mtree_alloc_range(struct maple_tree *mt,
unsigned long start, unsigned long end, unsigned long size,
unsigned long expected, int eret, void *ptr)
{
unsigned long result = expected + 1;
int ret;
ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
GFP_KERNEL);
MT_BUG_ON(mt, ret != eret);
if (ret)
return;
MT_BUG_ON(mt, result != expected);
}
static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
unsigned long start, unsigned long end, unsigned long size,
unsigned long expected, int eret, void *ptr)
{
unsigned long result = expected + 1;
int ret;
ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
GFP_KERNEL);
MT_BUG_ON(mt, ret != eret);
if (ret)
return;
MT_BUG_ON(mt, result != expected);
}
#endif
static noinline void __init check_load(struct maple_tree *mt,
unsigned long index, void *ptr)
{
void *ret = mtree_test_load(mt, index);
if (ret != ptr)
pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
MT_BUG_ON(mt, ret != ptr);
}
static noinline void __init check_store_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr, int expected)
{
int ret = -EINVAL;
unsigned long i;
ret = mtree_test_store_range(mt, start, end, ptr);
MT_BUG_ON(mt, ret != expected);
if (ret)
return;
for (i = start; i <= end; i++)
check_load(mt, i, ptr);
}
static noinline void __init check_insert_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr, int expected)
{
int ret = -EINVAL;
unsigned long i;
ret = mtree_test_insert_range(mt, start, end, ptr);
MT_BUG_ON(mt, ret != expected);
if (ret)
return;
for (i = start; i <= end; i++)
check_load(mt, i, ptr);
}
static noinline void __init check_insert(struct maple_tree *mt,
unsigned long index, void *ptr)
{
int ret = -EINVAL;
ret = mtree_test_insert(mt, index, ptr);
MT_BUG_ON(mt, ret != 0);
}
static noinline void __init check_dup_insert(struct maple_tree *mt,
unsigned long index, void *ptr)
{
int ret = -EINVAL;
ret = mtree_test_insert(mt, index, ptr);
MT_BUG_ON(mt, ret != -EEXIST);
}
static noinline void __init check_index_load(struct maple_tree *mt,
unsigned long index)
{
return check_load(mt, index, xa_mk_value(index & LONG_MAX));
}
static inline __init int not_empty(struct maple_node *node)
{
int i;
if (node->parent)
return 1;
for (i = 0; i < ARRAY_SIZE(node->slot); i++)
if (node->slot[i])
return 1;
return 0;
}
static noinline void __init check_rev_seq(struct maple_tree *mt,
unsigned long max, bool verbose)
{
unsigned long i = max, j;
MT_BUG_ON(mt, !mtree_empty(mt));
mt_zero_nr_tallocated();
while (i) {
MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
for (j = i; j <= max; j++)
check_index_load(mt, j);
check_load(mt, i - 1, NULL);
mt_set_in_rcu(mt);
MT_BUG_ON(mt, !mt_height(mt));
mt_clear_in_rcu(mt);
MT_BUG_ON(mt, !mt_height(mt));
i--;
}
check_load(mt, max + 1, NULL);
#ifndef __KERNEL__
if (verbose) {
rcu_barrier();
mt_dump(mt, mt_dump_dec);
pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
__func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(),
mt_nr_tallocated());
}
#endif
}
static noinline void __init check_seq(struct maple_tree *mt, unsigned long max,
bool verbose)
{
unsigned long i, j;
MT_BUG_ON(mt, !mtree_empty(mt));
mt_zero_nr_tallocated();
for (i = 0; i <= max; i++) {
MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
for (j = 0; j <= i; j++)
check_index_load(mt, j);
if (i)
MT_BUG_ON(mt, !mt_height(mt));
check_load(mt, i + 1, NULL);
}
#ifndef __KERNEL__
if (verbose) {
rcu_barrier();
mt_dump(mt, mt_dump_dec);
pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
max, mt_get_alloc_size()/1024, mt_nr_allocated(),
mt_nr_tallocated());
}
#endif
}
static noinline void __init check_lb_not_empty(struct maple_tree *mt)
{
unsigned long i, j;
unsigned long huge = 4000UL * 1000 * 1000;
i = huge;
while (i > 4096) {
check_insert(mt, i, (void *) i);
for (j = huge; j >= i; j /= 2) {
check_load(mt, j-1, NULL);
check_load(mt, j, (void *) j);
check_load(mt, j+1, NULL);
}
i /= 2;
}
mtree_destroy(mt);
}
static noinline void __init check_lower_bound_split(struct maple_tree *mt)
{
MT_BUG_ON(mt, !mtree_empty(mt));
check_lb_not_empty(mt);
}
static noinline void __init check_upper_bound_split(struct maple_tree *mt)
{
unsigned long i, j;
unsigned long huge;
MT_BUG_ON(mt, !mtree_empty(mt));
if (MAPLE_32BIT)
huge = 2147483647UL;
else
huge = 4000UL * 1000 * 1000;
i = 4096;
while (i < huge) {
check_insert(mt, i, (void *) i);
for (j = i; j >= huge; j *= 2) {
check_load(mt, j-1, NULL);
check_load(mt, j, (void *) j);
check_load(mt, j+1, NULL);
}
i *= 2;
}
mtree_destroy(mt);
}
static noinline void __init check_mid_split(struct maple_tree *mt)
{
unsigned long huge = 8000UL * 1000 * 1000;
check_insert(mt, huge, (void *) huge);
check_insert(mt, 0, xa_mk_value(0));
check_lb_not_empty(mt);
}
static noinline void __init check_rev_find(struct maple_tree *mt)
{
int i, nr_entries = 200;
void *val;
MA_STATE(mas, mt, 0, 0);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
rcu_read_lock();
mas_set(&mas, 1000);
val = mas_find_rev(&mas, 1000);
MT_BUG_ON(mt, val != xa_mk_value(100));
val = mas_find_rev(&mas, 1000);
MT_BUG_ON(mt, val != NULL);
mas_set(&mas, 999);
val = mas_find_rev(&mas, 997);
MT_BUG_ON(mt, val != NULL);
mas_set(&mas, 1000);
val = mas_find_rev(&mas, 900);
MT_BUG_ON(mt, val != xa_mk_value(100));
val = mas_find_rev(&mas, 900);
MT_BUG_ON(mt, val != xa_mk_value(99));
mas_set(&mas, 20);
val = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(2));
val = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(1));
val = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(0));
val = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, val != NULL);
rcu_read_unlock();
}
static noinline void __init check_find(struct maple_tree *mt)
{
unsigned long val = 0;
unsigned long count;
unsigned long max;
unsigned long top;
unsigned long last = 0, index = 0;
void *entry, *entry2;
MA_STATE(mas, mt, 0, 0);
/* Insert 0. */
MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL));
#if defined(CONFIG_64BIT)
top = 4398046511104UL;
#else
top = ULONG_MAX;
#endif
if (MAPLE_32BIT) {
count = 15;
} else {
count = 20;
}
for (int i = 0; i <= count; i++) {
if (val != 64)
MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL));
else
MT_BUG_ON(mt, mtree_insert(mt, val,
XA_ZERO_ENTRY, GFP_KERNEL));
val <<= 2;
}
val = 0;
mas_set(&mas, val);
mas_lock(&mas);
while ((entry = mas_find(&mas, 268435456)) != NULL) {
if (val != 64)
MT_BUG_ON(mt, xa_mk_value(val) != entry);
else
MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
}
mas_unlock(&mas);
val = 0;
mas_set(&mas, val);
mas_lock(&mas);
mas_for_each(&mas, entry, ULONG_MAX) {
if (val != 64)
MT_BUG_ON(mt, xa_mk_value(val) != entry);
else
MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
}
mas_unlock(&mas);
/* Test mas_pause */
val = 0;
mas_set(&mas, val);
mas_lock(&mas);
mas_for_each(&mas, entry, ULONG_MAX) {
if (val != 64)
MT_BUG_ON(mt, xa_mk_value(val) != entry);
else
MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
mas_pause(&mas);
mas_unlock(&mas);
mas_lock(&mas);
}
mas_unlock(&mas);
val = 0;
max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */
mt_for_each(mt, entry, index, max) {
MT_BUG_ON(mt, xa_mk_value(val) != entry);
val <<= 2;
if (val == 64) /* Skip zero entry. */
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
}
val = 0;
max = 0;
index = 0;
MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
mt_for_each(mt, entry, index, ULONG_MAX) {
if (val == top)
MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
else
MT_BUG_ON(mt, xa_mk_value(val) != entry);
/* Workaround for 32bit */
if ((val << 2) < val)
val = ULONG_MAX;
else
val <<= 2;
if (val == 64) /* Skip zero entry. */
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
max++;
MT_BUG_ON(mt, max > 25);
}
mtree_erase_index(mt, ULONG_MAX);
mas_reset(&mas);
index = 17;
entry = mt_find(mt, &index, 512);
MT_BUG_ON(mt, xa_mk_value(256) != entry);
mas_reset(&mas);
index = 17;
entry = mt_find(mt, &index, 20);
MT_BUG_ON(mt, entry != NULL);
/* Range check.. */
/* Insert ULONG_MAX */
MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
val = 0;
mas_set(&mas, 0);
mas_lock(&mas);
mas_for_each(&mas, entry, ULONG_MAX) {
if (val == 64)
MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
else if (val == top)
MT_BUG_ON(mt, entry != xa_mk_value(LONG_MAX));
else
MT_BUG_ON(mt, xa_mk_value(val) != entry);
/* Workaround for 32bit */
if ((val << 2) < val)
val = ULONG_MAX;
else
val <<= 2;
/* For zero check. */
if (!val)
val = 1;
mas_pause(&mas);
mas_unlock(&mas);
mas_lock(&mas);
}
mas_unlock(&mas);
mas_set(&mas, 1048576);
mas_lock(&mas);
entry = mas_find(&mas, 1048576);
mas_unlock(&mas);
MT_BUG_ON(mas.tree, entry == NULL);
/*
* Find last value.
* 1. get the expected value, leveraging the existence of an end entry
* 2. delete end entry
* 3. find the last value but searching for ULONG_MAX and then using
* prev
*/
/* First, get the expected result. */
mas_lock(&mas);
mas_reset(&mas);
mas.index = ULONG_MAX; /* start at max.. */
entry = mas_find(&mas, ULONG_MAX);
entry = mas_prev(&mas, 0);
index = mas.index;
last = mas.last;
/* Erase the last entry. */
mas_reset(&mas);
mas.index = ULONG_MAX;
mas.last = ULONG_MAX;
mas_erase(&mas);
/* Get the previous value from MAS_START */
mas_reset(&mas);
entry2 = mas_prev(&mas, 0);
/* Check results. */
MT_BUG_ON(mt, entry != entry2);
MT_BUG_ON(mt, index != mas.index);
MT_BUG_ON(mt, last != mas.last);
mas.node = MAS_NONE;
mas.index = ULONG_MAX;
mas.last = ULONG_MAX;
entry2 = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != entry2);
mas_set(&mas, 0);
MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL);
mas_unlock(&mas);
mtree_destroy(mt);
}
static noinline void __init check_find_2(struct maple_tree *mt)
{
unsigned long i, j;
void *entry;
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_for_each(&mas, entry, ULONG_MAX)
MT_BUG_ON(mt, true);
rcu_read_unlock();
for (i = 0; i < 256; i++) {
mtree_insert_index(mt, i, GFP_KERNEL);
j = 0;
mas_set(&mas, 0);
rcu_read_lock();
mas_for_each(&mas, entry, ULONG_MAX) {
MT_BUG_ON(mt, entry != xa_mk_value(j));
j++;
}
rcu_read_unlock();
MT_BUG_ON(mt, j != i + 1);
}
for (i = 0; i < 256; i++) {
mtree_erase_index(mt, i);
j = i + 1;
mas_set(&mas, 0);
rcu_read_lock();
mas_for_each(&mas, entry, ULONG_MAX) {
if (xa_is_zero(entry))
continue;
MT_BUG_ON(mt, entry != xa_mk_value(j));
j++;
}
rcu_read_unlock();
MT_BUG_ON(mt, j != 256);
}
/*MT_BUG_ON(mt, !mtree_empty(mt)); */
}
#if defined(CONFIG_64BIT)
static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
{
/*
* Generated by:
* cat /proc/self/maps | awk '{print $1}'|
* awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
*/
static const unsigned long range[] = {
/* Inclusive , Exclusive. */
0x565234af2000, 0x565234af4000,
0x565234af4000, 0x565234af9000,
0x565234af9000, 0x565234afb000,
0x565234afc000, 0x565234afd000,
0x565234afd000, 0x565234afe000,
0x565235def000, 0x565235e10000,
0x7f36d4bfd000, 0x7f36d4ee2000,
0x7f36d4ee2000, 0x7f36d4f04000,
0x7f36d4f04000, 0x7f36d504c000,
0x7f36d504c000, 0x7f36d5098000,
0x7f36d5098000, 0x7f36d5099000,
0x7f36d5099000, 0x7f36d509d000,
0x7f36d509d000, 0x7f36d509f000,
0x7f36d509f000, 0x7f36d50a5000,
0x7f36d50b9000, 0x7f36d50db000,
0x7f36d50db000, 0x7f36d50dc000,
0x7f36d50dc000, 0x7f36d50fa000,
0x7f36d50fa000, 0x7f36d5102000,
0x7f36d5102000, 0x7f36d5103000,
0x7f36d5103000, 0x7f36d5104000,
0x7f36d5104000, 0x7f36d5105000,
0x7fff5876b000, 0x7fff5878d000,
0x7fff5878e000, 0x7fff58791000,
0x7fff58791000, 0x7fff58793000,
};
static const unsigned long holes[] = {
/*
* Note: start of hole is INCLUSIVE
* end of hole is EXCLUSIVE
* (opposite of the above table.)
* Start of hole, end of hole, size of hole (+1)
*/
0x565234afb000, 0x565234afc000, 0x1000,
0x565234afe000, 0x565235def000, 0x12F1000,
0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
};
/*
* req_range consists of 4 values.
* 1. min index
* 2. max index
* 3. size
* 4. number that should be returned.
* 5. return value
*/
static const unsigned long req_range[] = {
0x565234af9000, /* Min */
0x7fff58791000, /* Max */
0x1000, /* Size */
0x7fff5878d << 12, /* First rev hole of size 0x1000 */
0, /* Return value success. */
0x0, /* Min */
0x565234AF0 << 12, /* Max */
0x3000, /* Size */
0x565234AEE << 12, /* max - 3. */
0, /* Return value success. */
0x0, /* Min */
-1, /* Max */
0x1000, /* Size */
562949953421311 << 12,/* First rev hole of size 0x1000 */
0, /* Return value success. */
0x0, /* Min */
0x7F36D5109 << 12, /* Max */
0x4000, /* Size */
0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
0, /* Return value success. */
/* Ascend test. */
0x0,
34148798628 << 12,
19 << 12,
34148797418 << 12,
0x0,
/* Too big test. */
0x0,
18446744073709551615UL,
562915594369134UL << 12,
0x0,
-EBUSY,
/* Single space test. */
34148798725 << 12,
34148798725 << 12,
1 << 12,
34148798725 << 12,
0,
};
int i, range_count = ARRAY_SIZE(range);
int req_range_count = ARRAY_SIZE(req_range);
unsigned long min = 0;
MA_STATE(mas, mt, 0, 0);
mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
GFP_KERNEL);
#define DEBUG_REV_RANGE 0
for (i = 0; i < range_count; i += 2) {
/* Inclusive, Inclusive (with the -1) */
#if DEBUG_REV_RANGE
pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
(range[i + 1] >> 12) - 1);
#endif
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
mt_validate(mt);
}
mas_lock(&mas);
for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
#if DEBUG_REV_RANGE
pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
min, holes[i+1]>>12, holes[i+2]>>12,
holes[i] >> 12);
#endif
MT_BUG_ON(mt, mas_empty_area_rev(&mas, min,
holes[i+1] >> 12,
holes[i+2] >> 12));
#if DEBUG_REV_RANGE
pr_debug("Found %lu %lu\n", mas.index, mas.last);
pr_debug("gap %lu %lu\n", (holes[i] >> 12),
(holes[i+1] >> 12));
#endif
MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
min = holes[i+1] >> 12;
mas_reset(&mas);
}
mas_unlock(&mas);
for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_REV_RANGE
pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
i, req_range[i] >> 12,
(req_range[i + 1] >> 12),
req_range[i+2] >> 12,
req_range[i+3] >> 12);
#endif
check_mtree_alloc_rrange(mt,
req_range[i] >> 12, /* start */
req_range[i+1] >> 12, /* end */
req_range[i+2] >> 12, /* size */
req_range[i+3] >> 12, /* expected address */
req_range[i+4], /* expected return */
xa_mk_value(req_range[i] >> 12)); /* pointer */
mt_validate(mt);
}
mt_set_non_kernel(1);
mtree_erase(mt, 34148798727); /* create a deleted range. */
mtree_erase(mt, 34148798725);
check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
34148798725, 0, mt);
mtree_destroy(mt);
}
static noinline void __init check_alloc_range(struct maple_tree *mt)
{
/*
* Generated by:
* cat /proc/self/maps|awk '{print $1}'|
* awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
*/
static const unsigned long range[] = {
/* Inclusive , Exclusive. */
0x565234af2000, 0x565234af4000,
0x565234af4000, 0x565234af9000,
0x565234af9000, 0x565234afb000,
0x565234afc000, 0x565234afd000,
0x565234afd000, 0x565234afe000,
0x565235def000, 0x565235e10000,
0x7f36d4bfd000, 0x7f36d4ee2000,
0x7f36d4ee2000, 0x7f36d4f04000,
0x7f36d4f04000, 0x7f36d504c000,
0x7f36d504c000, 0x7f36d5098000,
0x7f36d5098000, 0x7f36d5099000,
0x7f36d5099000, 0x7f36d509d000,
0x7f36d509d000, 0x7f36d509f000,
0x7f36d509f000, 0x7f36d50a5000,
0x7f36d50b9000, 0x7f36d50db000,
0x7f36d50db000, 0x7f36d50dc000,
0x7f36d50dc000, 0x7f36d50fa000,
0x7f36d50fa000, 0x7f36d5102000,
0x7f36d5102000, 0x7f36d5103000,
0x7f36d5103000, 0x7f36d5104000,
0x7f36d5104000, 0x7f36d5105000,
0x7fff5876b000, 0x7fff5878d000,
0x7fff5878e000, 0x7fff58791000,
0x7fff58791000, 0x7fff58793000,
};
static const unsigned long holes[] = {
/* Start of hole, end of hole, size of hole (+1) */
0x565234afb000, 0x565234afc000, 0x1000,
0x565234afe000, 0x565235def000, 0x12F1000,
0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
};
/*
* req_range consists of 4 values.
* 1. min index
* 2. max index
* 3. size
* 4. number that should be returned.
* 5. return value
*/
static const unsigned long req_range[] = {
0x565234af9000, /* Min */
0x7fff58791000, /* Max */
0x1000, /* Size */
0x565234afb000, /* First hole in our data of size 1000. */
0, /* Return value success. */
0x0, /* Min */
0x7fff58791000, /* Max */
0x1F00, /* Size */
0x0, /* First hole in our data of size 2000. */
0, /* Return value success. */
/* Test ascend. */
34148797436 << 12, /* Min */
0x7fff587AF000, /* Max */
0x3000, /* Size */
34148798629 << 12, /* Expected location */
0, /* Return value success. */
/* Test failing. */
34148798623 << 12, /* Min */
34148798683 << 12, /* Max */
0x15000, /* Size */
0, /* Expected location */
-EBUSY, /* Return value failed. */
/* Test filling entire gap. */
34148798623 << 12, /* Min */
0x7fff587AF000, /* Max */
0x10000, /* Size */
34148798632 << 12, /* Expected location */
0, /* Return value success. */
/* Test walking off the end of root. */
0, /* Min */
-1, /* Max */
-1, /* Size */
0, /* Expected location */
-EBUSY, /* Return value failure. */
/* Test looking for too large a hole across entire range. */
0, /* Min */
-1, /* Max */
4503599618982063UL << 12, /* Size */
34359052178 << 12, /* Expected location */
-EBUSY, /* Return failure. */
/* Test a single entry */
34148798648 << 12, /* Min */
34148798648 << 12, /* Max */
4096, /* Size of 1 */
34148798648 << 12, /* Location is the same as min/max */
0, /* Success */
};
int i, range_count = ARRAY_SIZE(range);
int req_range_count = ARRAY_SIZE(req_range);
unsigned long min = 0x565234af2000;
MA_STATE(mas, mt, 0, 0);
mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
GFP_KERNEL);
for (i = 0; i < range_count; i += 2) {
#define DEBUG_ALLOC_RANGE 0
#if DEBUG_ALLOC_RANGE
pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
(range[i + 1] >> 12) - 1);
mt_dump(mt, mt_dump_hex);
#endif
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
mt_validate(mt);
}
mas_lock(&mas);
for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
#if DEBUG_ALLOC_RANGE
pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12,
holes[i+1] >> 12, holes[i+2] >> 12,
min, holes[i+1]);
#endif
MT_BUG_ON(mt, mas_empty_area(&mas, min >> 12,
holes[i+1] >> 12,
holes[i+2] >> 12));
MT_BUG_ON(mt, mas.index != holes[i] >> 12);
min = holes[i+1];
mas_reset(&mas);
}
mas_unlock(&mas);
for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_ALLOC_RANGE
pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
i/5, req_range[i] >> 12, req_range[i + 1] >> 12,
req_range[i + 2] >> 12, req_range[i + 3] >> 12,
req_range[i], req_range[i+1]);
#endif
check_mtree_alloc_range(mt,
req_range[i] >> 12, /* start */
req_range[i+1] >> 12, /* end */
req_range[i+2] >> 12, /* size */
req_range[i+3] >> 12, /* expected address */
req_range[i+4], /* expected return */
xa_mk_value(req_range[i] >> 12)); /* pointer */
mt_validate(mt);
#if DEBUG_ALLOC_RANGE
mt_dump(mt, mt_dump_hex);
#endif
}
mtree_destroy(mt);
}
#endif
static noinline void __init check_ranges(struct maple_tree *mt)
{
int i, val, val2;
static const unsigned long r[] = {
10, 15,
20, 25,
17, 22, /* Overlaps previous range. */
9, 1000, /* Huge. */
100, 200,
45, 168,
118, 128,
};
MT_BUG_ON(mt, !mtree_empty(mt));
check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0);
check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0);
check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST);
MT_BUG_ON(mt, !mt_height(mt));
/* Store */
check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0);
check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0);
check_store_range(mt, r[8], r[9], xa_mk_value(r[8]), 0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
MT_BUG_ON(mt, mt_height(mt));
check_seq(mt, 50, false);
mt_set_non_kernel(4);
check_store_range(mt, 5, 47, xa_mk_value(47), 0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/* Create tree of 1-100 */
check_seq(mt, 100, false);
/* Store 45-168 */
mt_set_non_kernel(10);
check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/* Create tree of 1-200 */
check_seq(mt, 200, false);
/* Store 45-168 */
check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
check_seq(mt, 30, false);
check_store_range(mt, 6, 18, xa_mk_value(6), 0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/* Overwrite across multiple levels. */
/* Create tree of 1-400 */
check_seq(mt, 400, false);
mt_set_non_kernel(50);
/* Store 118-128 */
check_store_range(mt, r[12], r[13], xa_mk_value(r[12]), 0);
mt_set_non_kernel(50);
mtree_test_erase(mt, 140);
mtree_test_erase(mt, 141);
mtree_test_erase(mt, 142);
mtree_test_erase(mt, 143);
mtree_test_erase(mt, 130);
mtree_test_erase(mt, 131);
mtree_test_erase(mt, 132);
mtree_test_erase(mt, 133);
mtree_test_erase(mt, 134);
mtree_test_erase(mt, 135);
check_load(mt, r[12], xa_mk_value(r[12]));
check_load(mt, r[13], xa_mk_value(r[12]));
check_load(mt, r[13] - 1, xa_mk_value(r[12]));
check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1));
check_load(mt, 135, NULL);
check_load(mt, 140, NULL);
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/* Overwrite multiple levels at the end of the tree (slot 7) */
mt_set_non_kernel(50);
check_seq(mt, 400, false);
check_store_range(mt, 353, 361, xa_mk_value(353), 0);
check_store_range(mt, 347, 352, xa_mk_value(347), 0);
check_load(mt, 346, xa_mk_value(346));
for (i = 347; i <= 352; i++)
check_load(mt, i, xa_mk_value(347));
for (i = 353; i <= 361; i++)
check_load(mt, i, xa_mk_value(353));
check_load(mt, 362, xa_mk_value(362));
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_set_non_kernel(50);
check_seq(mt, 400, false);
check_store_range(mt, 352, 364, NULL, 0);
check_store_range(mt, 351, 363, xa_mk_value(352), 0);
check_load(mt, 350, xa_mk_value(350));
check_load(mt, 351, xa_mk_value(352));
for (i = 352; i <= 363; i++)
check_load(mt, i, xa_mk_value(352));
check_load(mt, 364, NULL);
check_load(mt, 365, xa_mk_value(365));
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_set_non_kernel(5);
check_seq(mt, 400, false);
check_store_range(mt, 352, 364, NULL, 0);
check_store_range(mt, 351, 364, xa_mk_value(352), 0);
check_load(mt, 350, xa_mk_value(350));
check_load(mt, 351, xa_mk_value(352));
for (i = 352; i <= 364; i++)
check_load(mt, i, xa_mk_value(352));
check_load(mt, 365, xa_mk_value(365));
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_set_non_kernel(50);
check_seq(mt, 400, false);
check_store_range(mt, 362, 367, xa_mk_value(362), 0);
check_store_range(mt, 353, 361, xa_mk_value(353), 0);
mt_set_non_kernel(0);
mt_validate(mt);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/*
* Interesting cases:
* 1. Overwrite the end of a node and end in the first entry of the next
* node.
* 2. Split a single range
* 3. Overwrite the start of a range
* 4. Overwrite the end of a range
* 5. Overwrite the entire range
* 6. Overwrite a range that causes multiple parent nodes to be
* combined
* 7. Overwrite a range that causes multiple parent nodes and part of
* root to be combined
* 8. Overwrite the whole tree
* 9. Try to overwrite the zero entry of an alloc tree.
* 10. Write a range larger than a nodes current pivot
*/
mt_set_non_kernel(50);
for (i = 0; i <= 500; i++) {
val = i*5;
val2 = (i+1)*5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0);
check_store_range(mt, 2411, 2411, xa_mk_value(2411), 0);
check_store_range(mt, 2412, 2412, xa_mk_value(2412), 0);
check_store_range(mt, 2396, 2400, xa_mk_value(4052020), 0);
check_store_range(mt, 2402, 2402, xa_mk_value(2402), 0);
mtree_destroy(mt);
mt_set_non_kernel(0);
mt_set_non_kernel(50);
for (i = 0; i <= 500; i++) {
val = i*5;
val2 = (i+1)*5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0);
check_store_range(mt, 2424, 2424, xa_mk_value(2424), 0);
check_store_range(mt, 2425, 2425, xa_mk_value(2), 0);
check_store_range(mt, 2460, 2470, NULL, 0);
check_store_range(mt, 2435, 2460, xa_mk_value(2435), 0);
check_store_range(mt, 2461, 2470, xa_mk_value(2461), 0);
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
/* Check in-place modifications */
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
/* Append to the start of last range */
mt_set_non_kernel(50);
for (i = 0; i <= 500; i++) {
val = i * 5 + 1;
val2 = val + 4;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
/* Append to the last range without touching any boundaries */
for (i = 0; i < 10; i++) {
val = val2 + 5;
val2 = val + 4;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
/* Append to the end of last range */
val = val2;
for (i = 0; i < 10; i++) {
val += 5;
MT_BUG_ON(mt, mtree_test_store_range(mt, val, ULONG_MAX,
xa_mk_value(val)) != 0);
}
/* Overwriting the range and over a part of the next range */
for (i = 10; i < 30; i += 2) {
val = i * 5 + 1;
val2 = val + 5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
/* Overwriting a part of the range and over the next range */
for (i = 50; i < 70; i += 2) {
val2 = i * 5;
val = val2 - 5;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
/*
* Expand the range, only partially overwriting the previous and
* next ranges
*/
for (i = 100; i < 130; i += 3) {
val = i * 5 - 5;
val2 = i * 5 + 1;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
/*
* Expand the range, only partially overwriting the previous and
* next ranges, in RCU mode
*/
mt_set_in_rcu(mt);
for (i = 150; i < 180; i += 3) {
val = i * 5 - 5;
val2 = i * 5 + 1;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
MT_BUG_ON(mt, !mt_height(mt));
mt_validate(mt);
mt_set_non_kernel(0);
mtree_destroy(mt);
/* Test rebalance gaps */
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
mt_set_non_kernel(50);
for (i = 0; i <= 50; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 161, 161, xa_mk_value(161), 0);
check_store_range(mt, 162, 162, xa_mk_value(162), 0);
check_store_range(mt, 163, 163, xa_mk_value(163), 0);
check_store_range(mt, 240, 249, NULL, 0);
mtree_erase(mt, 200);
mtree_erase(mt, 210);
mtree_erase(mt, 220);
mtree_erase(mt, 230);
mt_set_non_kernel(0);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
for (i = 0; i <= 500; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 4600, 4959, xa_mk_value(1), 0);
mt_validate(mt);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
for (i = 0; i <= 500; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
}
check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0);
check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0);
check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0);
check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0);
check_store_range(mt, 4842, 4849, NULL, 0);
mt_validate(mt);
MT_BUG_ON(mt, !mt_height(mt));
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
for (i = 0; i <= 1300; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
/* Cause a 3 child split all the way up the tree. */
for (i = 5; i < 215; i += 10)
check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
for (i = 5; i < 65; i += 10)
check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
for (i = 5; i < 45; i += 10)
check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
if (!MAPLE_32BIT)
MT_BUG_ON(mt, mt_height(mt) < 4);
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
for (i = 0; i <= 1200; i++) {
val = i*10;
val2 = (i+1)*10;
check_store_range(mt, val, val2, xa_mk_value(val), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
}
/* Fill parents and leaves before split. */
for (i = 5; i < 455; i += 10)
check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
for (i = 1; i < 16; i++)
check_store_range(mt, 8185 + i, 8185 + i + 1,
xa_mk_value(8185+i), 0);
MT_BUG_ON(mt, mt_height(mt) >= 4);
/* triple split across multiple levels. */
check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
if (!MAPLE_32BIT)
MT_BUG_ON(mt, mt_height(mt) != 4);
}
static noinline void __init check_next_entry(struct maple_tree *mt)
{
void *entry = NULL;
unsigned long limit = 30, i = 0;
MA_STATE(mas, mt, i, i);
MT_BUG_ON(mt, !mtree_empty(mt));
check_seq(mt, limit, false);
rcu_read_lock();
/* Check the first one and get ma_state in the correct state. */
MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++));
for ( ; i <= limit + 1; i++) {
entry = mas_next(&mas, limit);
if (i > limit)
MT_BUG_ON(mt, entry != NULL);
else
MT_BUG_ON(mt, xa_mk_value(i) != entry);
}
rcu_read_unlock();
mtree_destroy(mt);
}
static noinline void __init check_prev_entry(struct maple_tree *mt)
{
unsigned long index = 16;
void *value;
int i;
MA_STATE(mas, mt, index, index);
MT_BUG_ON(mt, !mtree_empty(mt));
check_seq(mt, 30, false);
rcu_read_lock();
value = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, value != xa_mk_value(index));
value = mas_prev(&mas, 0);
MT_BUG_ON(mt, value != xa_mk_value(index - 1));
rcu_read_unlock();
mtree_destroy(mt);
/* Check limits on prev */
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
mas_lock(&mas);
for (i = 0; i <= index; i++) {
mas_set_range(&mas, i*10, i*10+5);
mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
}
mas_set(&mas, 20);
value = mas_walk(&mas);
MT_BUG_ON(mt, value != xa_mk_value(2));
value = mas_prev(&mas, 19);
MT_BUG_ON(mt, value != NULL);
mas_set(&mas, 80);
value = mas_walk(&mas);
MT_BUG_ON(mt, value != xa_mk_value(8));
value = mas_prev(&mas, 76);
MT_BUG_ON(mt, value != NULL);
mas_unlock(&mas);
}
static noinline void __init check_root_expand(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
void *ptr;
mas_lock(&mas);
mas_set(&mas, 3);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, ptr != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
ptr = &check_prev_entry;
mas_set(&mas, 1);
mas_store_gfp(&mas, ptr, GFP_KERNEL);
mas_set(&mas, 0);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, ptr != NULL);
mas_set(&mas, 1);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, ptr != &check_prev_entry);
mas_set(&mas, 2);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, ptr != NULL);
mas_unlock(&mas);
mtree_destroy(mt);
mt_init_flags(mt, 0);
mas_lock(&mas);
mas_set(&mas, 0);
ptr = &check_prev_entry;
mas_store_gfp(&mas, ptr, GFP_KERNEL);
mas_set(&mas, 5);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, ptr != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
mas_set_range(&mas, 0, 100);
ptr = mas_walk(&mas);
MT_BUG_ON(mt, ptr != &check_prev_entry);
MT_BUG_ON(mt, mas.last != 0);
mas_unlock(&mas);
mtree_destroy(mt);
mt_init_flags(mt, 0);
mas_lock(&mas);
mas_set(&mas, 0);
ptr = (void *)((unsigned long) check_prev_entry | 1UL);
mas_store_gfp(&mas, ptr, GFP_KERNEL);
ptr = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, ptr != NULL);
MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
mas_set(&mas, 1);
ptr = mas_prev(&mas, 0);
MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 1UL));
mas_unlock(&mas);
mtree_destroy(mt);
mt_init_flags(mt, 0);
mas_lock(&mas);
mas_set(&mas, 0);
ptr = (void *)((unsigned long) check_prev_entry | 2UL);
mas_store_gfp(&mas, ptr, GFP_KERNEL);
ptr = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, ptr != NULL);
MT_BUG_ON(mt, (mas.index != ULONG_MAX) && (mas.last != ULONG_MAX));
mas_set(&mas, 1);
ptr = mas_prev(&mas, 0);
MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 2UL));
mas_unlock(&mas);
}
static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
void *entry;
unsigned long singletons = 100;
static const unsigned long *seq100;
static const unsigned long seq100_64[] = {
/* 0-5 */
74, 75, 76,
50, 100, 2,
/* 6-12 */
44, 45, 46, 43,
20, 50, 3,
/* 13-20*/
80, 81, 82,
76, 2, 79, 85, 4,
};
static const unsigned long seq100_32[] = {
/* 0-5 */
61, 62, 63,
50, 100, 2,
/* 6-12 */
31, 32, 33, 30,
20, 50, 3,
/* 13-20*/
80, 81, 82,
76, 2, 79, 85, 4,
};
static const unsigned long seq2000[] = {
1152, 1151,
1100, 1200, 2,
};
static const unsigned long seq400[] = {
286, 318,
256, 260, 266, 270, 275, 280, 290, 398,
286, 310,
};
unsigned long index;
MA_STATE(mas, mt, 0, 0);
if (MAPLE_32BIT)
seq100 = seq100_32;
else
seq100 = seq100_64;
index = seq100[0];
mas_set(&mas, index);
MT_BUG_ON(mt, !mtree_empty(mt));
check_seq(mt, singletons, false); /* create 100 singletons. */
mt_set_non_kernel(1);
mtree_test_erase(mt, seq100[2]);
check_load(mt, seq100[2], NULL);
mtree_test_erase(mt, seq100[1]);
check_load(mt, seq100[1], NULL);
rcu_read_lock();
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index));
mn1 = mas.node;
mas_next(&mas, ULONG_MAX);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
mn2 = mas.node;
MT_BUG_ON(mt, mn1 == mn2); /* test the test. */
/*
* At this point, there is a gap of 2 at index + 1 between seq100[3] and
* seq100[4]. Search for the gap.
*/
mt_set_non_kernel(1);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[3], seq100[4],
seq100[5]));
MT_BUG_ON(mt, mas.index != index + 1);
rcu_read_unlock();
mtree_test_erase(mt, seq100[6]);
check_load(mt, seq100[6], NULL);
mtree_test_erase(mt, seq100[7]);
check_load(mt, seq100[7], NULL);
mtree_test_erase(mt, seq100[8]);
index = seq100[9];
rcu_read_lock();
mas.index = index;
mas.last = index;
mas_reset(&mas);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index));
mn1 = mas.node;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
mas_next(&mas, ULONG_MAX); /* go to the next entry. */
mn2 = mas.node;
MT_BUG_ON(mt, mn1 == mn2); /* test the next entry is in the next node. */
/*
* At this point, there is a gap of 3 at seq100[6]. Find it by
* searching 20 - 50 for size 3.
*/
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[10], seq100[11],
seq100[12]));
MT_BUG_ON(mt, mas.index != seq100[6]);
rcu_read_unlock();
mt_set_non_kernel(1);
mtree_store(mt, seq100[13], NULL, GFP_KERNEL);
check_load(mt, seq100[13], NULL);
check_load(mt, seq100[14], xa_mk_value(seq100[14]));
mtree_store(mt, seq100[14], NULL, GFP_KERNEL);
check_load(mt, seq100[13], NULL);
check_load(mt, seq100[14], NULL);
mas_reset(&mas);
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[15],
seq100[17]));
MT_BUG_ON(mt, mas.index != seq100[13]);
mt_validate(mt);
rcu_read_unlock();
/*
* *DEPRECATED: no retries anymore* Test retry entry in the start of a
* gap.
*/
mt_set_non_kernel(2);
mtree_test_store_range(mt, seq100[18], seq100[14], NULL);
mtree_test_erase(mt, seq100[15]);
mas_reset(&mas);
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[19],
seq100[20]));
rcu_read_unlock();
MT_BUG_ON(mt, mas.index != seq100[18]);
mt_validate(mt);
mtree_destroy(mt);
/* seq 2000 tests are for multi-level tree gaps */
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_seq(mt, 2000, false);
mt_set_non_kernel(1);
mtree_test_erase(mt, seq2000[0]);
mtree_test_erase(mt, seq2000[1]);
mt_set_non_kernel(2);
mas_reset(&mas);
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq2000[2], seq2000[3],
seq2000[4]));
MT_BUG_ON(mt, mas.index != seq2000[1]);
rcu_read_unlock();
mt_validate(mt);
mtree_destroy(mt);
/* seq 400 tests rebalancing over two levels. */
mt_set_non_kernel(99);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_seq(mt, 400, false);
mtree_test_store_range(mt, seq400[0], seq400[1], NULL);
mt_set_non_kernel(0);
mtree_destroy(mt);
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_seq(mt, 400, false);
mt_set_non_kernel(50);
mtree_test_store_range(mt, seq400[2], seq400[9],
xa_mk_value(seq400[2]));
mtree_test_store_range(mt, seq400[3], seq400[9],
xa_mk_value(seq400[3]));
mtree_test_store_range(mt, seq400[4], seq400[9],
xa_mk_value(seq400[4]));
mtree_test_store_range(mt, seq400[5], seq400[9],
xa_mk_value(seq400[5]));
mtree_test_store_range(mt, seq400[0], seq400[9],
xa_mk_value(seq400[0]));
mtree_test_store_range(mt, seq400[6], seq400[9],
xa_mk_value(seq400[6]));
mtree_test_store_range(mt, seq400[7], seq400[9],
xa_mk_value(seq400[7]));
mtree_test_store_range(mt, seq400[8], seq400[9],
xa_mk_value(seq400[8]));
mtree_test_store_range(mt, seq400[10], seq400[11],
xa_mk_value(seq400[10]));
mt_validate(mt);
mt_set_non_kernel(0);
mtree_destroy(mt);
}
static noinline void __init check_node_overwrite(struct maple_tree *mt)
{
int i, max = 4000;
for (i = 0; i < max; i++)
mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100));
mtree_test_store_range(mt, 319951, 367950, NULL);
/*mt_dump(mt, mt_dump_dec); */
mt_validate(mt);
}
#if defined(BENCH_SLOT_STORE)
static noinline void __init bench_slot_store(struct maple_tree *mt)
{
int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
mtree_store_range(mt, brk, brk, NULL, GFP_KERNEL);
mtree_store_range(mt, brk_start, brk, xa_mk_value(brk),
GFP_KERNEL);
}
}
#endif
#if defined(BENCH_NODE_STORE)
static noinline void __init bench_node_store(struct maple_tree *mt)
{
int i, overwrite = 76, max = 240, count = 20000000;
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
mtree_store_range(mt, overwrite, overwrite + 15,
xa_mk_value(overwrite), GFP_KERNEL);
overwrite += 5;
if (overwrite >= 135)
overwrite = 76;
}
}
#endif
#if defined(BENCH_AWALK)
static noinline void __init bench_awalk(struct maple_tree *mt)
{
int i, max = 2500, count = 50000000;
MA_STATE(mas, mt, 1470, 1470);
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL);
for (i = 0; i < count; i++) {
mas_empty_area_rev(&mas, 0, 2000, 10);
mas_reset(&mas);
}
}
#endif
#if defined(BENCH_WALK)
static noinline void __init bench_walk(struct maple_tree *mt)
{
int i, max = 2500, count = 550000000;
MA_STATE(mas, mt, 1470, 1470);
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
mas_walk(&mas);
mas_reset(&mas);
}
}
#endif
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
int i, count = 1000000;
unsigned long max = 2500, index = 0;
void *entry;
for (i = 0; i < max; i += 5)
mtree_store_range(mt, i, i + 4, xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < count; i++) {
unsigned long j = 0;
mt_for_each(mt, entry, index, max) {
MT_BUG_ON(mt, entry != xa_mk_value(j));
j += 5;
}
index = 0;
}
}
#endif
#if defined(BENCH_MAS_FOR_EACH)
static noinline void __init bench_mas_for_each(struct maple_tree *mt)
{
int i, count = 1000000;
unsigned long max = 2500;
void *entry;
MA_STATE(mas, mt, 0, 0);
for (i = 0; i < max; i += 5) {
int gap = 4;
if (i % 30 == 0)
gap = 3;
mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
}
rcu_read_lock();
for (i = 0; i < count; i++) {
unsigned long j = 0;
mas_for_each(&mas, entry, max) {
MT_BUG_ON(mt, entry != xa_mk_value(j));
j += 5;
}
mas_set(&mas, 0);
}
rcu_read_unlock();
}
#endif
#if defined(BENCH_MAS_PREV)
static noinline void __init bench_mas_prev(struct maple_tree *mt)
{
int i, count = 1000000;
unsigned long max = 2500;
void *entry;
MA_STATE(mas, mt, 0, 0);
for (i = 0; i < max; i += 5) {
int gap = 4;
if (i % 30 == 0)
gap = 3;
mtree_store_range(mt, i, i + gap, xa_mk_value(i), GFP_KERNEL);
}
rcu_read_lock();
for (i = 0; i < count; i++) {
unsigned long j = 2495;
mas_set(&mas, ULONG_MAX);
while ((entry = mas_prev(&mas, 0)) != NULL) {
MT_BUG_ON(mt, entry != xa_mk_value(j));
j -= 5;
}
}
rcu_read_unlock();
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
static noinline void __init check_forking(struct maple_tree *mt)
{
struct maple_tree newmt;
int i, nr_entries = 134;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
mas_lock(&newmas);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
pr_err("OOM!");
BUG_ON(1);
}
rcu_read_lock();
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_store(&newmas, val);
}
rcu_read_unlock();
mas_destroy(&newmas);
mas_unlock(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
}
static noinline void __init check_iteration(struct maple_tree *mt)
{
int i, nr_entries = 125;
void *val;
MA_STATE(mas, mt, 0, 0);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i * 10, i * 10 + 9,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
i = 0;
mas_lock(&mas);
mas_for_each(&mas, val, 925) {
MT_BUG_ON(mt, mas.index != i * 10);
MT_BUG_ON(mt, mas.last != i * 10 + 9);
/* Overwrite end of entry 92 */
if (i == 92) {
mas.index = 925;
mas.last = 929;
mas_store(&mas, val);
}
i++;
}
/* Ensure mas_find() gets the next value */
val = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != xa_mk_value(i));
mas_set(&mas, 0);
i = 0;
mas_for_each(&mas, val, 785) {
MT_BUG_ON(mt, mas.index != i * 10);
MT_BUG_ON(mt, mas.last != i * 10 + 9);
/* Overwrite start of entry 78 */
if (i == 78) {
mas.index = 780;
mas.last = 785;
mas_store(&mas, val);
} else {
i++;
}
}
val = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != xa_mk_value(i));
mas_set(&mas, 0);
i = 0;
mas_for_each(&mas, val, 765) {
MT_BUG_ON(mt, mas.index != i * 10);
MT_BUG_ON(mt, mas.last != i * 10 + 9);
/* Overwrite end of entry 76 and advance to the end */
if (i == 76) {
mas.index = 760;
mas.last = 765;
mas_store(&mas, val);
}
i++;
}
/* Make sure the next find returns the one after 765, 766-769 */
val = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != xa_mk_value(76));
mas_unlock(&mas);
mas_destroy(&mas);
mt_set_non_kernel(0);
}
static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
{
struct maple_tree newmt;
int i, nr_entries = 135;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
newmas.tree = &newmt;
rcu_read_lock();
mas_lock(&newmas);
mas_reset(&newmas);
mas_set(&mas, 0);
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_store_gfp(&newmas, val, GFP_KERNEL);
}
mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
}
#if defined(BENCH_FORK)
static noinline void __init bench_forking(struct maple_tree *mt)
{
struct maple_tree newmt;
int i, nr_entries = 134, nr_fork = 80000;
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
for (i = 0; i < nr_fork; i++) {
mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
mas.index = 0;
mas.last = 0;
rcu_read_lock();
mas_lock(&newmas);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
}
mas_for_each(&mas, val, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_store(&newmas, val);
}
mas_destroy(&newmas);
mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
mtree_destroy(&newmt);
}
}
#endif
static noinline void __init next_prev_test(struct maple_tree *mt)
{
int i, nr_entries;
void *val;
MA_STATE(mas, mt, 0, 0);
struct maple_enode *mn;
static const unsigned long *level2;
static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720,
725};
static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
1760, 1765};
unsigned long last_index;
if (MAPLE_32BIT) {
nr_entries = 500;
level2 = level2_32;
last_index = 0x138e;
} else {
nr_entries = 200;
level2 = level2_64;
last_index = 0x7d6;
}
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mas_lock(&mas);
for (i = 0; i <= nr_entries / 2; i++) {
mas_next(&mas, 1000);
if (mas_is_none(&mas))
break;
}
mas_reset(&mas);
mas_set(&mas, 0);
i = 0;
mas_for_each(&mas, val, 1000) {
i++;
}
mas_reset(&mas);
mas_set(&mas, 0);
i = 0;
mas_for_each(&mas, val, 1000) {
mas_pause(&mas);
i++;
}
/*
* 680 - 685 = 0x61a00001930c
* 686 - 689 = NULL;
* 690 - 695 = 0x61a00001930c
* Check simple next/prev
*/
mas_set(&mas, 686);
val = mas_walk(&mas);
MT_BUG_ON(mt, val != NULL);
val = mas_next(&mas, 1000);
MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
MT_BUG_ON(mt, mas.index != 690);
MT_BUG_ON(mt, mas.last != 695);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(680 / 10));
MT_BUG_ON(mt, mas.index != 680);
MT_BUG_ON(mt, mas.last != 685);
val = mas_next(&mas, 1000);
MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
MT_BUG_ON(mt, mas.index != 690);
MT_BUG_ON(mt, mas.last != 695);
val = mas_next(&mas, 1000);
MT_BUG_ON(mt, val != xa_mk_value(700 / 10));
MT_BUG_ON(mt, mas.index != 700);
MT_BUG_ON(mt, mas.last != 705);
/* Check across node boundaries of the tree */
mas_set(&mas, 70);
val = mas_walk(&mas);
MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
MT_BUG_ON(mt, mas.index != 70);
MT_BUG_ON(mt, mas.last != 75);
val = mas_next(&mas, 1000);
MT_BUG_ON(mt, val != xa_mk_value(80 / 10));
MT_BUG_ON(mt, mas.index != 80);
MT_BUG_ON(mt, mas.last != 85);
val = mas_prev(&mas, 70);
MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
MT_BUG_ON(mt, mas.index != 70);
MT_BUG_ON(mt, mas.last != 75);
/* Check across two levels of the tree */
mas_reset(&mas);
mas_set(&mas, level2[0]);
val = mas_walk(&mas);
MT_BUG_ON(mt, val != NULL);
val = mas_next(&mas, level2[1]);
MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
MT_BUG_ON(mt, mas.index != level2[2]);
MT_BUG_ON(mt, mas.last != level2[3]);
mn = mas.node;
val = mas_next(&mas, level2[1]);
MT_BUG_ON(mt, val != xa_mk_value(level2[4] / 10));
MT_BUG_ON(mt, mas.index != level2[4]);
MT_BUG_ON(mt, mas.last != level2[5]);
MT_BUG_ON(mt, mn == mas.node);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(level2[2] / 10));
MT_BUG_ON(mt, mas.index != level2[2]);
MT_BUG_ON(mt, mas.last != level2[3]);
/* Check running off the end and back on */
mas_set(&mas, nr_entries * 10);
val = mas_walk(&mas);
MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
MT_BUG_ON(mt, mas.index != (nr_entries * 10));
MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
val = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != last_index);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(nr_entries));
MT_BUG_ON(mt, mas.index != (nr_entries * 10));
MT_BUG_ON(mt, mas.last != (nr_entries * 10 + 5));
/* Check running off the start and back on */
mas_reset(&mas);
mas_set(&mas, 10);
val = mas_walk(&mas);
MT_BUG_ON(mt, val != xa_mk_value(1));
MT_BUG_ON(mt, mas.index != 10);
MT_BUG_ON(mt, mas.last != 15);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != xa_mk_value(0));
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
MT_BUG_ON(mt, mas.node != MAS_NONE);
mas.index = 0;
mas.last = 5;
mas_store(&mas, NULL);
mas_reset(&mas);
mas_set(&mas, 10);
mas_walk(&mas);
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 9);
mas_unlock(&mas);
mtree_destroy(mt);
mt_init(mt);
mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
mtree_store_range(mt, 5, 5, xa_mk_value(5), GFP_KERNEL);
rcu_read_lock();
mas_set(&mas, 5);
val = mas_prev(&mas, 4);
MT_BUG_ON(mt, val != NULL);
rcu_read_unlock();
}
/* Test spanning writes that require balancing right sibling or right cousin */
static noinline void __init check_spanning_relatives(struct maple_tree *mt)
{
unsigned long i, nr_entries = 1000;
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
}
static noinline void __init check_fuzzer(struct maple_tree *mt)
{
/*
* 1. Causes a spanning rebalance of a single root node.
* Fixed by setting the correct limit in mast_cp_to_nodes() when the
* entire right side is consumed.
*/
mtree_test_insert(mt, 88, (void *)0xb1);
mtree_test_insert(mt, 84, (void *)0xa9);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_insert(mt, 14, (void *)0x1d);
mtree_test_insert(mt, 7, (void *)0xf);
mtree_test_insert(mt, 12, (void *)0x19);
mtree_test_insert(mt, 18, (void *)0x25);
mtree_test_store_range(mt, 8, 18, (void *)0x11);
mtree_destroy(mt);
/*
* 2. Cause a spanning rebalance of two nodes in root.
* Fixed by setting mast->r->max correctly.
*/
mt_init_flags(mt, 0);
mtree_test_store(mt, 87, (void *)0xaf);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_load(mt, 4);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_store(mt, 8, (void *)0x11);
mtree_test_store(mt, 44, (void *)0x59);
mtree_test_store(mt, 68, (void *)0x89);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_insert(mt, 43, (void *)0x57);
mtree_test_insert(mt, 24, (void *)0x31);
mtree_test_insert(mt, 844, (void *)0x699);
mtree_test_store(mt, 84, (void *)0xa9);
mtree_test_store(mt, 4, (void *)0x9);
mtree_test_erase(mt, 4);
mtree_test_load(mt, 5);
mtree_test_erase(mt, 0);
mtree_destroy(mt);
/*
* 3. Cause a node overflow on copy
* Fixed by using the correct check for node size in mas_wr_modify()
* Also discovered issue with metadata setting.
*/
mt_init_flags(mt, 0);
mtree_test_store_range(mt, 0, ULONG_MAX, (void *)0x1);
mtree_test_store(mt, 4, (void *)0x9);
mtree_test_erase(mt, 5);
mtree_test_erase(mt, 0);
mtree_test_erase(mt, 4);
mtree_test_store(mt, 5, (void *)0xb);
mtree_test_erase(mt, 5);
mtree_test_store(mt, 5, (void *)0xb);
mtree_test_erase(mt, 5);
mtree_test_erase(mt, 4);
mtree_test_store(mt, 4, (void *)0x9);
mtree_test_store(mt, 444, (void *)0x379);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_load(mt, 0);
mtree_test_store(mt, 5, (void *)0xb);
mtree_test_erase(mt, 0);
mtree_destroy(mt);
/*
* 4. spanning store failure due to writing incorrect pivot value at
* last slot.
* Fixed by setting mast->r->max correctly in mast_cp_to_nodes()
*
*/
mt_init_flags(mt, 0);
mtree_test_insert(mt, 261, (void *)0x20b);
mtree_test_store(mt, 516, (void *)0x409);
mtree_test_store(mt, 6, (void *)0xd);
mtree_test_insert(mt, 5, (void *)0xb);
mtree_test_insert(mt, 1256, (void *)0x9d1);
mtree_test_store(mt, 4, (void *)0x9);
mtree_test_erase(mt, 1);
mtree_test_store(mt, 56, (void *)0x71);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_store(mt, 24, (void *)0x31);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 2263, (void *)0x11af);
mtree_test_insert(mt, 446, (void *)0x37d);
mtree_test_store_range(mt, 6, 45, (void *)0xd);
mtree_test_store_range(mt, 3, 446, (void *)0x7);
mtree_destroy(mt);
/*
* 5. mas_wr_extend_null() may overflow slots.
* Fix by checking against wr_mas->node_end.
*/
mt_init_flags(mt, 0);
mtree_test_store(mt, 48, (void *)0x61);
mtree_test_store(mt, 3, (void *)0x7);
mtree_test_load(mt, 0);
mtree_test_store(mt, 88, (void *)0xb1);
mtree_test_store(mt, 81, (void *)0xa3);
mtree_test_insert(mt, 0, (void *)0x1);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_insert(mt, 2480, (void *)0x1361);
mtree_test_insert(mt, ULONG_MAX,
(void *)0xffffffffffffffff);
mtree_test_erase(mt, ULONG_MAX);
mtree_destroy(mt);
/*
* 6. When reusing a node with an implied pivot and the node is
* shrinking, old data would be left in the implied slot
* Fixed by checking the last pivot for the mas->max and clear
* accordingly. This only affected the left-most node as that node is
* the only one allowed to end in NULL.
*/
mt_init_flags(mt, 0);
mtree_test_erase(mt, 3);
mtree_test_insert(mt, 22, (void *)0x2d);
mtree_test_insert(mt, 15, (void *)0x1f);
mtree_test_load(mt, 2);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 5, (void *)0xb);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 3);
mtree_test_insert(mt, 22, (void *)0x2d);
mtree_test_insert(mt, 15, (void *)0x1f);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_load(mt, 2);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_insert(mt, 5, (void *)0xb);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 3);
mtree_test_insert(mt, 22, (void *)0x2d);
mtree_test_insert(mt, 15, (void *)0x1f);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_insert(mt, 12, (void *)0x19);
mtree_test_erase(mt, 1);
mtree_test_store_range(mt, 4, 62, (void *)0x9);
mtree_test_erase(mt, 62);
mtree_test_store_range(mt, 1, 0, (void *)0x3);
mtree_test_insert(mt, 11, (void *)0x17);
mtree_test_insert(mt, 3, (void *)0x7);
mtree_test_insert(mt, 3, (void *)0x7);
mtree_test_store(mt, 62, (void *)0x7d);
mtree_test_erase(mt, 62);
mtree_test_store_range(mt, 1, 15, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 22, (void *)0x2d);
mtree_test_insert(mt, 12, (void *)0x19);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 3, (void *)0x7);
mtree_test_store(mt, 62, (void *)0x7d);
mtree_test_erase(mt, 62);
mtree_test_insert(mt, 122, (void *)0xf5);
mtree_test_store(mt, 3, (void *)0x7);
mtree_test_insert(mt, 0, (void *)0x1);
mtree_test_store_range(mt, 0, 1, (void *)0x1);
mtree_test_insert(mt, 85, (void *)0xab);
mtree_test_insert(mt, 72, (void *)0x91);
mtree_test_insert(mt, 81, (void *)0xa3);
mtree_test_insert(mt, 726, (void *)0x5ad);
mtree_test_insert(mt, 0, (void *)0x1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_store(mt, 51, (void *)0x67);
mtree_test_insert(mt, 611, (void *)0x4c7);
mtree_test_insert(mt, 485, (void *)0x3cb);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 0, (void *)0x1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert_range(mt, 26, 1, (void *)0x35);
mtree_test_load(mt, 1);
mtree_test_store_range(mt, 1, 22, (void *)0x3);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_load(mt, 53);
mtree_test_load(mt, 1);
mtree_test_store_range(mt, 1, 1, (void *)0x3);
mtree_test_insert(mt, 222, (void *)0x1bd);
mtree_test_insert(mt, 485, (void *)0x3cb);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_load(mt, 0);
mtree_test_insert(mt, 21, (void *)0x2b);
mtree_test_insert(mt, 3, (void *)0x7);
mtree_test_store(mt, 621, (void *)0x4db);
mtree_test_insert(mt, 0, (void *)0x1);
mtree_test_erase(mt, 5);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_store(mt, 62, (void *)0x7d);
mtree_test_erase(mt, 62);
mtree_test_store_range(mt, 1, 0, (void *)0x3);
mtree_test_insert(mt, 22, (void *)0x2d);
mtree_test_insert(mt, 12, (void *)0x19);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_store_range(mt, 4, 62, (void *)0x9);
mtree_test_erase(mt, 62);
mtree_test_erase(mt, 1);
mtree_test_load(mt, 1);
mtree_test_store_range(mt, 1, 22, (void *)0x3);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_load(mt, 53);
mtree_test_load(mt, 1);
mtree_test_store_range(mt, 1, 1, (void *)0x3);
mtree_test_insert(mt, 222, (void *)0x1bd);
mtree_test_insert(mt, 485, (void *)0x3cb);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_load(mt, 0);
mtree_test_load(mt, 0);
mtree_destroy(mt);
/*
* 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old
* data by overwriting it first - that way metadata is of no concern.
*/
mt_init_flags(mt, 0);
mtree_test_load(mt, 1);
mtree_test_insert(mt, 102, (void *)0xcd);
mtree_test_erase(mt, 2);
mtree_test_erase(mt, 0);
mtree_test_load(mt, 0);
mtree_test_insert(mt, 4, (void *)0x9);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 110, (void *)0xdd);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_insert_range(mt, 5, 0, (void *)0xb);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_store(mt, 112, (void *)0xe1);
mtree_test_insert(mt, 21, (void *)0x2b);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_insert_range(mt, 110, 2, (void *)0xdd);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_load(mt, 22);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 210, (void *)0x1a5);
mtree_test_store_range(mt, 0, 2, (void *)0x1);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_erase(mt, 2);
mtree_test_erase(mt, 22);
mtree_test_erase(mt, 1);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_load(mt, 112);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_insert_range(mt, 1, 2, (void *)0x3);
mtree_test_erase(mt, 0);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_erase(mt, 0);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_erase(mt, 2);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert_range(mt, 1, 2, (void *)0x3);
mtree_test_erase(mt, 0);
mtree_test_erase(mt, 2);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_load(mt, 112);
mtree_test_store_range(mt, 110, 12, (void *)0xdd);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_load(mt, 110);
mtree_test_insert_range(mt, 4, 71, (void *)0x9);
mtree_test_load(mt, 2);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_insert_range(mt, 11, 22, (void *)0x17);
mtree_test_erase(mt, 12);
mtree_test_store(mt, 2, (void *)0x5);
mtree_test_load(mt, 22);
mtree_destroy(mt);
/*
* 8. When rebalancing or spanning_rebalance(), the max of the new node
* may be set incorrectly to the final pivot and not the right max.
* Fix by setting the left max to orig right max if the entire node is
* consumed.
*/
mt_init_flags(mt, 0);
mtree_test_store(mt, 6, (void *)0xd);
mtree_test_store(mt, 67, (void *)0x87);
mtree_test_insert(mt, 15, (void *)0x1f);
mtree_test_insert(mt, 6716, (void *)0x3479);
mtree_test_store(mt, 61, (void *)0x7b);
mtree_test_insert(mt, 13, (void *)0x1b);
mtree_test_store(mt, 8, (void *)0x11);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_load(mt, 0);
mtree_test_erase(mt, 67167);
mtree_test_insert_range(mt, 6, 7167, (void *)0xd);
mtree_test_insert(mt, 6, (void *)0xd);
mtree_test_erase(mt, 67);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 667167);
mtree_test_insert(mt, 6, (void *)0xd);
mtree_test_store(mt, 67, (void *)0x87);
mtree_test_insert(mt, 5, (void *)0xb);
mtree_test_erase(mt, 1);
mtree_test_insert(mt, 6, (void *)0xd);
mtree_test_erase(mt, 67);
mtree_test_insert(mt, 15, (void *)0x1f);
mtree_test_insert(mt, 67167, (void *)0x20cbf);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_load(mt, 7);
mtree_test_insert(mt, 16, (void *)0x21);
mtree_test_insert(mt, 36, (void *)0x49);
mtree_test_store(mt, 67, (void *)0x87);
mtree_test_store(mt, 6, (void *)0xd);
mtree_test_insert(mt, 367, (void *)0x2df);
mtree_test_insert(mt, 115, (void *)0xe7);
mtree_test_store(mt, 0, (void *)0x1);
mtree_test_store_range(mt, 1, 3, (void *)0x3);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_erase(mt, 67167);
mtree_test_insert_range(mt, 6, 47, (void *)0xd);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_insert_range(mt, 1, 67, (void *)0x3);
mtree_test_load(mt, 67);
mtree_test_insert(mt, 1, (void *)0x3);
mtree_test_erase(mt, 67167);
mtree_destroy(mt);
/*
* 9. spanning store to the end of data caused an invalid metadata
* length which resulted in a crash eventually.
* Fix by checking if there is a value in pivot before incrementing the
* metadata end in mab_mas_cp(). To ensure this doesn't happen again,
* abstract the two locations this happens into a function called
* mas_leaf_set_meta().
*/
mt_init_flags(mt, 0);
mtree_test_insert(mt, 21, (void *)0x2b);
mtree_test_insert(mt, 12, (void *)0x19);
mtree_test_insert(mt, 6, (void *)0xd);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, 91, (void *)0xb7);
mtree_test_insert(mt, 18, (void *)0x25);
mtree_test_insert(mt, 81, (void *)0xa3);
mtree_test_store_range(mt, 0, 128, (void *)0x1);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_erase(mt, 8);
mtree_test_insert(mt, 11, (void *)0x17);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_insert(mt, 21, (void *)0x2b);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
mtree_test_erase(mt, ULONG_MAX - 10);
mtree_test_store_range(mt, 0, 281, (void *)0x1);
mtree_test_erase(mt, 2);
mtree_test_insert(mt, 1211, (void *)0x977);
mtree_test_insert(mt, 111, (void *)0xdf);
mtree_test_insert(mt, 13, (void *)0x1b);
mtree_test_insert(mt, 211, (void *)0x1a7);
mtree_test_insert(mt, 11, (void *)0x17);
mtree_test_insert(mt, 5, (void *)0xb);
mtree_test_insert(mt, 1218, (void *)0x985);
mtree_test_insert(mt, 61, (void *)0x7b);
mtree_test_store(mt, 1, (void *)0x3);
mtree_test_insert(mt, 121, (void *)0xf3);
mtree_test_insert(mt, 8, (void *)0x11);
mtree_test_insert(mt, 21, (void *)0x2b);
mtree_test_insert(mt, 2, (void *)0x5);
mtree_test_insert(mt, ULONG_MAX - 10, (void *)0xffffffffffffffeb);
mtree_test_erase(mt, ULONG_MAX - 10);
}
/* duplicate the tree with a specific gap */
static noinline void __init check_dup_gaps(struct maple_tree *mt,
unsigned long nr_entries, bool zero_start,
unsigned long gap)
{
unsigned long i = 0;
struct maple_tree newmt;
int ret;
void *tmp;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
if (!zero_start)
i = 1;
mt_zero_nr_tallocated();
for (; i <= nr_entries; i++)
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
mt_set_non_kernel(99999);
mas_lock(&newmas);
ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0);
rcu_read_lock();
mas_for_each(&mas, tmp, ULONG_MAX) {
newmas.index = mas.index;
newmas.last = mas.last;
mas_store(&newmas, tmp);
}
rcu_read_unlock();
mas_destroy(&newmas);
mas_unlock(&newmas);
mtree_destroy(&newmt);
}
/* Duplicate many sizes of trees. Mainly to test expected entry values */
static noinline void __init check_dup(struct maple_tree *mt)
{
int i;
int big_start = 100010;
/* Check with a value at zero */
for (i = 10; i < 1000; i++) {
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_dup_gaps(mt, i, true, 5);
mtree_destroy(mt);
rcu_barrier();
}
cond_resched();
mt_cache_shrink();
/* Check with a value at zero, no gap */
for (i = 1000; i < 2000; i++) {
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_dup_gaps(mt, i, true, 0);
mtree_destroy(mt);
rcu_barrier();
}
cond_resched();
mt_cache_shrink();
/* Check with a value at zero and unreasonably large */
for (i = big_start; i < big_start + 10; i++) {
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_dup_gaps(mt, i, true, 5);
mtree_destroy(mt);
rcu_barrier();
}
cond_resched();
mt_cache_shrink();
/* Small to medium size not starting at zero*/
for (i = 200; i < 1000; i++) {
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_dup_gaps(mt, i, false, 5);
mtree_destroy(mt);
rcu_barrier();
}
cond_resched();
mt_cache_shrink();
/* Unreasonably large not starting at zero*/
for (i = big_start; i < big_start + 10; i++) {
mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
check_dup_gaps(mt, i, false, 5);
mtree_destroy(mt);
rcu_barrier();
cond_resched();
mt_cache_shrink();
}
/* Check non-allocation tree not starting at zero */
for (i = 1500; i < 3000; i++) {
mt_init_flags(mt, 0);
check_dup_gaps(mt, i, false, 5);
mtree_destroy(mt);
rcu_barrier();
cond_resched();
if (i % 2 == 0)
mt_cache_shrink();
}
mt_cache_shrink();
/* Check non-allocation tree starting at zero */
for (i = 200; i < 1000; i++) {
mt_init_flags(mt, 0);
check_dup_gaps(mt, i, true, 5);
mtree_destroy(mt);
rcu_barrier();
cond_resched();
}
mt_cache_shrink();
/* Unreasonably large */
for (i = big_start + 5; i < big_start + 10; i++) {
mt_init_flags(mt, 0);
check_dup_gaps(mt, i, true, 5);
mtree_destroy(mt);
rcu_barrier();
mt_cache_shrink();
cond_resched();
}
}
static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
{
int i = 50;
MA_STATE(mas, mt, 0, 0);
mt_set_non_kernel(9999);
mas_lock(&mas);
do {
mas_set_range(&mas, i*10, i*10+9);
mas_store(&mas, check_bnode_min_spanning);
} while (i--);
mas_set_range(&mas, 240, 509);
mas_store(&mas, NULL);
mas_unlock(&mas);
mas_destroy(&mas);
mt_set_non_kernel(0);
}
static noinline void __init check_empty_area_window(struct maple_tree *mt)
{
unsigned long i, nr_entries = 20;
MA_STATE(mas, mt, 0, 0);
for (i = 1; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 9,
xa_mk_value(i), GFP_KERNEL);
/* Create another hole besides the one at 0 */
mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL);
/* Check lower bounds that don't fit */
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY);
/* Check lower bound that does fit */
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0);
MT_BUG_ON(mt, mas.index != 5);
MT_BUG_ON(mt, mas.last != 9);
rcu_read_unlock();
/* Check one gap that doesn't fit and one that does */
rcu_read_lock();
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0);
MT_BUG_ON(mt, mas.index != 161);
MT_BUG_ON(mt, mas.last != 169);
/* Check one gap that does fit above the min */
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0);
MT_BUG_ON(mt, mas.index != 216);
MT_BUG_ON(mt, mas.last != 218);
/* Check size that doesn't fit any gap */
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY);
/*
* Check size that doesn't fit the lower end of the window but
* does fit the gap
*/
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY);
/*
* Check size that doesn't fit the upper end of the window but
* does fit the gap
*/
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY);
/* Check mas_empty_area forward */
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 8);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 3);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL);
mas_reset(&mas);
mas_empty_area(&mas, 100, 165, 3);
mas_reset(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY);
rcu_read_unlock();
}
static noinline void __init check_empty_area_fill(struct maple_tree *mt)
{
const unsigned long max = 0x25D78000;
unsigned long size;
int loop, shift;
MA_STATE(mas, mt, 0, 0);
mt_set_non_kernel(99999);
for (shift = 12; shift <= 16; shift++) {
loop = 5000;
size = 1 << shift;
while (loop--) {
mas_set(&mas, 0);
mas_lock(&mas);
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
MT_BUG_ON(mt, mas.last != mas.index + size - 1);
mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
mas_unlock(&mas);
mas_reset(&mas);
}
}
/* No space left. */
size = 0x1000;
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
rcu_read_unlock();
/* Fill a depth 3 node to the maximum */
for (unsigned long i = 629440511; i <= 629440800; i += 6)
mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
/* Make space in the second-last depth 4 node */
mtree_erase(mt, 631668735);
/* Make space in the last depth 4 node */
mtree_erase(mt, 629506047);
mas_reset(&mas);
/* Search from just after the gap in the second-last depth 4 */
rcu_read_lock();
MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
rcu_read_unlock();
mt_set_non_kernel(0);
}
/*
* Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions.
*
* The table below shows the single entry tree (0-0 pointer) and normal tree
* with nodes.
*
* Function ENTRY Start Result index & last
* ┬ ┬ ┬ ┬ ┬
* │ │ │ │ └─ the final range
* │ │ │ └─ The node value after execution
* │ │ └─ The node value before execution
* │ └─ If the entry exists or does not exists (DNE)
* └─ The function name
*
* Function ENTRY Start Result index & last
* mas_next()
* - after last
* Single entry tree at 0-0
* ------------------------
* DNE MAS_START MAS_NONE 1 - oo
* DNE MAS_PAUSE MAS_NONE 1 - oo
* DNE MAS_ROOT MAS_NONE 1 - oo
* when index = 0
* DNE MAS_NONE MAS_ROOT 0
* when index > 0
* DNE MAS_NONE MAS_NONE 1 - oo
*
* Normal tree
* -----------
* exists MAS_START active range
* DNE MAS_START active set to last range
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to last range
* exists MAS_NONE active range
* exists active active range
* DNE active active set to last range
*
* Function ENTRY Start Result index & last
* mas_prev()
* - before index
* Single entry tree at 0-0
* ------------------------
* if index > 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
* exists MAS_NONE MAS_ROOT 0
*
* if index == 0
* DNE MAS_START MAS_NONE 0
* DNE MAS_PAUSE MAS_NONE 0
* DNE MAS_NONE MAS_NONE 0
* DNE MAS_ROOT MAS_NONE 0
*
* Normal tree
* -----------
* exists MAS_START active range
* DNE MAS_START active set to min
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to min
* exists MAS_NONE active range
* DNE MAS_NONE MAS_NONE set to min
* any MAS_ROOT MAS_NONE 0
* exists active active range
* DNE active active last range
*
* Function ENTRY Start Result index & last
* mas_find()
* - at index or next
* Single entry tree at 0-0
* ------------------------
* if index > 0
* DNE MAS_START MAS_NONE 0
* DNE MAS_PAUSE MAS_NONE 0
* DNE MAS_ROOT MAS_NONE 0
* DNE MAS_NONE MAS_NONE 0
* if index == 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
* exists MAS_NONE MAS_ROOT 0
*
* Normal tree
* -----------
* exists MAS_START active range
* DNE MAS_START active set to max
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to max
* exists MAS_NONE active range
* exists active active range
* DNE active active last range (max < last)
*
* Function ENTRY Start Result index & last
* mas_find_rev()
* - at index or before
* Single entry tree at 0-0
* ------------------------
* if index > 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
* exists MAS_NONE MAS_ROOT 0
* if index == 0
* DNE MAS_START MAS_NONE 0
* DNE MAS_PAUSE MAS_NONE 0
* DNE MAS_NONE MAS_NONE 0
* DNE MAS_ROOT MAS_NONE 0
*
* Normal tree
* -----------
* exists MAS_START active range
* DNE MAS_START active set to min
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to min
* exists MAS_NONE active range
* exists active active range
* DNE active active last range (min > index)
*
* Function ENTRY Start Result index & last
* mas_walk()
* - Look up index
* Single entry tree at 0-0
* ------------------------
* if index > 0
* DNE MAS_START MAS_ROOT 1 - oo
* DNE MAS_PAUSE MAS_ROOT 1 - oo
* DNE MAS_NONE MAS_ROOT 1 - oo
* DNE MAS_ROOT MAS_ROOT 1 - oo
* if index == 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
* exists MAS_NONE MAS_ROOT 0
* exists MAS_ROOT MAS_ROOT 0
*
* Normal tree
* -----------
* exists MAS_START active range
* DNE MAS_START active range of NULL
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active range of NULL
* exists MAS_NONE active range
* DNE MAS_NONE active range of NULL
* exists active active range
* DNE active active range of NULL
*/
#define mas_active(x) (((x).node != MAS_ROOT) && \
((x).node != MAS_START) && \
((x).node != MAS_PAUSE) && \
((x).node != MAS_NONE))
static noinline void __init check_state_handling(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
void *entry, *ptr = (void *) 0x1234500;
void *ptr2 = &ptr;
void *ptr3 = &ptr2;
/* Check MAS_ROOT First */
mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
mas_lock(&mas);
/* prev: Start -> none */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* prev: Start -> root */
mas_set(&mas, 10);
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* prev: pause -> root */
mas_set(&mas, 10);
mas_pause(&mas);
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* next: start -> none */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* next: start -> none */
mas_set(&mas, 10);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find: start -> root */
mas_set(&mas, 0);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* find: root -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find: none -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find: start -> none */
mas_set(&mas, 10);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find_rev: none -> root */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* find_rev: start -> root */
mas_set(&mas, 0);
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* find_rev: root -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find_rev: none -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* find_rev: start -> root */
mas_set(&mas, 10);
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* walk: start -> none */
mas_set(&mas, 10);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* walk: pause -> none*/
mas_set(&mas, 10);
mas_pause(&mas);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* walk: none -> none */
mas.index = mas.last = 10;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* walk: none -> none */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* walk: start -> root */
mas_set(&mas, 0);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* walk: pause -> root */
mas_set(&mas, 0);
mas_pause(&mas);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* walk: none -> root */
mas.node = MAS_NONE;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* walk: root -> root */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
/* walk: root -> none */
mas_set(&mas, 10);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, mas.node != MAS_NONE);
/* walk: none -> root */
mas.index = mas.last = 0;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
MT_BUG_ON(mt, mas.node != MAS_ROOT);
mas_unlock(&mas);
/* Check when there is an actual node */
mtree_store_range(mt, 0, 0, NULL, GFP_KERNEL);
mtree_store_range(mt, 0x1000, 0x1500, ptr, GFP_KERNEL);
mtree_store_range(mt, 0x2000, 0x2500, ptr2, GFP_KERNEL);
mtree_store_range(mt, 0x3000, 0x3500, ptr3, GFP_KERNEL);
mas_lock(&mas);
/* next: start ->active */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* next: pause ->active */
mas_set(&mas, 0);
mas_pause(&mas);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* next: none ->active */
mas.index = mas.last = 0;
mas.offset = 0;
mas.node = MAS_NONE;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* next:active ->active */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
/* next:active -> active out of range*/
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
MT_BUG_ON(mt, !mas_active(mas));
/* Continue after out of range*/
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
/* next:active -> active out of range*/
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, !mas_active(mas));
/* next: none -> active, skip value at location */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
mas.node = MAS_NONE;
mas.offset = 0;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
/* prev:active ->active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* prev:active -> active out of range*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
MT_BUG_ON(mt, !mas_active(mas));
/* prev: pause ->active */
mas_set(&mas, 0x3600);
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
mas_pause(&mas);
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
/* prev:active -> active out of range*/
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
MT_BUG_ON(mt, !mas_active(mas));
/* prev: active ->active, continue*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* find: start ->active */
mas_set(&mas, 0);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* find: pause ->active */
mas_set(&mas, 0);
mas_pause(&mas);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* find: start ->active on value */;
mas_set(&mas, 1200);
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* find:active ->active */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
/* find:active -> active (NULL)*/
entry = mas_find(&mas, 0x2700);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2FFF);
MT_BUG_ON(mt, !mas_active(mas));
/* find: none ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
/* find:active -> active (NULL) end*/
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, !mas_active(mas));
/* find_rev: active (END) ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
/* find_rev:active ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
/* find_rev: pause ->active */
mas_pause(&mas);
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* find_rev:active -> active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
MT_BUG_ON(mt, !mas_active(mas));
/* find_rev: start ->active */
mas_set(&mas, 0x1200);
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1200);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1600);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk pause ->active */
mas_set(&mas, 0x1200);
mas_pause(&mas);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk pause -> active */
mas_set(&mas, 0x1600);
mas_pause(&mas);
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1200);
mas.node = MAS_NONE;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1600);
mas.node = MAS_NONE;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk active -> active */
mas.index = 0x1200;
mas.last = 0x1200;
mas.offset = 0;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* mas_walk active -> active */
mas.index = 0x1600;
mas.last = 0x1600;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
MT_BUG_ON(mt, !mas_active(mas));
mas_unlock(&mas);
}
static DEFINE_MTREE(tree);
static int __init maple_tree_seed(void)
{
unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
1001, 1002, 1003, 1005, 0,
5003, 5002};
void *ptr = &set;
pr_info("\nTEST STARTING\n\n");
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_root_expand(&tree);
mtree_destroy(&tree);
#if defined(BENCH_SLOT_STORE)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_slot_store(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_NODE_STORE)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_node_store(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_AWALK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_awalk(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_WALK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_walk(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_FORK)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_forking(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_MT_FOR_EACH)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_mt_for_each(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_MAS_FOR_EACH)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_mas_for_each(&tree);
mtree_destroy(&tree);
goto skip;
#endif
#if defined(BENCH_MAS_PREV)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
bench_mas_prev(&tree);
mtree_destroy(&tree);
goto skip;
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_iteration(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_forking(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
mtree_destroy(&tree);
/* Test ranges (store and insert) */
mt_init_flags(&tree, 0);
check_ranges(&tree);
mtree_destroy(&tree);
#if defined(CONFIG_64BIT)
/* These tests have ranges outside of 4GB */
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_alloc_range(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_alloc_rev_range(&tree);
mtree_destroy(&tree);
#endif
mt_init_flags(&tree, 0);
check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
check_insert(&tree, set[9], &tree); /* Insert 0 */
check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
check_insert(&tree, set[10], ptr); /* Insert 5003 */
check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
check_load(&tree, set[11], NULL); /* See if 5002 -> NULL */
check_load(&tree, set[10], ptr); /* See if 5003 -> ptr */
/* Clear out the tree */
mtree_destroy(&tree);
/* Try to insert, insert a dup, and load back what was inserted. */
mt_init_flags(&tree, 0);
check_insert(&tree, set[0], &tree); /* Insert 5015 */
check_dup_insert(&tree, set[0], &tree); /* Insert 5015 again */
check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
/*
* Second set of tests try to load a value that doesn't exist, inserts
* a second value, then loads the value again
*/
check_load(&tree, set[1], NULL); /* See if 5014 -> NULL */
check_insert(&tree, set[1], ptr); /* insert 5014 -> ptr */
check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
/*
* Tree currently contains:
* p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
*/
check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
check_load(&tree, set[7], &tree); /* 1003 = &tree ? */
/* Clear out tree */
mtree_destroy(&tree);
mt_init_flags(&tree, 0);
/* Test inserting into a NULL hole. */
check_insert(&tree, set[5], ptr); /* insert 1001 -> ptr */
check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
check_load(&tree, set[5], ptr); /* See if 1001 -> ptr */
check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
check_load(&tree, set[7], &tree); /* See if 1003 -> &tree */
/* Clear out the tree */
mtree_destroy(&tree);
mt_init_flags(&tree, 0);
/*
* set[] = {5015, 5014, 5017, 25, 1000,
* 1001, 1002, 1003, 1005, 0,
* 5003, 5002};
*/
check_insert(&tree, set[0], ptr); /* 5015 */
check_insert(&tree, set[1], &tree); /* 5014 */
check_insert(&tree, set[2], ptr); /* 5017 */
check_insert(&tree, set[3], &tree); /* 25 */
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
check_insert(&tree, set[4], ptr); /* 1000 < Should split. */
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree); /*25 */
check_load(&tree, set[4], ptr);
check_insert(&tree, set[5], &tree); /* 1001 */
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
check_load(&tree, set[4], ptr);
check_load(&tree, set[5], &tree);
check_insert(&tree, set[6], ptr);
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
check_load(&tree, set[4], ptr);
check_load(&tree, set[5], &tree);
check_load(&tree, set[6], ptr);
check_insert(&tree, set[7], &tree);
check_load(&tree, set[0], ptr);
check_insert(&tree, set[8], ptr);
check_insert(&tree, set[9], &tree);
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
check_load(&tree, set[4], ptr);
check_load(&tree, set[5], &tree);
check_load(&tree, set[6], ptr);
check_load(&tree, set[9], &tree);
mtree_destroy(&tree);
mt_init_flags(&tree, 0);
check_seq(&tree, 16, false);
mtree_destroy(&tree);
mt_init_flags(&tree, 0);
check_seq(&tree, 1000, true);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_rev_seq(&tree, 1000, true);
mtree_destroy(&tree);
check_lower_bound_split(&tree);
check_upper_bound_split(&tree);
check_mid_split(&tree);
mt_init_flags(&tree, 0);
check_next_entry(&tree);
check_find(&tree);
check_find_2(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_prev_entry(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_gap_combining(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_node_overwrite(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
next_prev_test(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_spanning_relatives(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_rev_find(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, 0);
check_fuzzer(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_dup(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_bnode_min_spanning(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_empty_area_window(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_empty_area_fill(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_state_handling(&tree);
mtree_destroy(&tree);
#if defined(BENCH)
skip:
#endif
rcu_barrier();
pr_info("maple_tree: %u of %u tests passed\n",
atomic_read(&maple_tree_tests_passed),
atomic_read(&maple_tree_tests_run));
if (atomic_read(&maple_tree_tests_run) ==
atomic_read(&maple_tree_tests_passed))
return 0;
return -EINVAL;
}
static void __exit maple_tree_harvest(void)
{
}
module_init(maple_tree_seed);
module_exit(maple_tree_harvest);
MODULE_AUTHOR("Liam R. Howlett <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/test_maple_tree.c |
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt.c"
| linux-master | lib/fdt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* saved per-CPU IRQ register pointer
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/export.h>
#include <linux/percpu.h>
#include <asm/irq_regs.h>
#ifndef ARCH_HAS_OWN_IRQ_REGS
DEFINE_PER_CPU(struct pt_regs *, __irq_regs);
EXPORT_PER_CPU_SYMBOL(__irq_regs);
#endif
| linux-master | lib/irq_regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CRC32C
*@Article{castagnoli-crc,
* author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
* title = {{Optimization of Cyclic Redundancy-Check Codes with 24
* and 32 Parity Bits}},
* journal = IEEE Transactions on Communication,
* year = {1993},
* volume = {41},
* number = {6},
* pages = {},
* month = {June},
*}
* Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
* Following the example of lib/crc32, this function is intended to be
* flexible and useful for all users. Modules that currently have their
* own crc32c, but hopefully may be able to use this one are:
* net/sctp (please add all your doco to here if you change to
* use this one!)
* <endoflist>
*
* Copyright (c) 2004 Cisco Systems, Inc.
*/
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crc32c.h>
static struct crypto_shash *tfm;
u32 crc32c(u32 crc, const void *address, unsigned int length)
{
SHASH_DESC_ON_STACK(shash, tfm);
u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
int err;
shash->tfm = tfm;
*ctx = crc;
err = crypto_shash_update(shash, address, length);
BUG_ON(err);
ret = *ctx;
barrier_data(ctx);
return ret;
}
EXPORT_SYMBOL(crc32c);
static int __init libcrc32c_mod_init(void)
{
tfm = crypto_alloc_shash("crc32c", 0, 0);
return PTR_ERR_OR_ZERO(tfm);
}
static void __exit libcrc32c_mod_fini(void)
{
crypto_free_shash(tfm);
}
module_init(libcrc32c_mod_init);
module_exit(libcrc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <[email protected]>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc32c");
| linux-master | lib/libcrc32c.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* test_free_pages.c: Check that free_pages() doesn't leak memory
* Copyright (c) 2020 Oracle
* Author: Matthew Wilcox <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/module.h>
static void test_free_pages(gfp_t gfp)
{
unsigned int i;
for (i = 0; i < 1000 * 1000; i++) {
unsigned long addr = __get_free_pages(gfp, 3);
struct page *page = virt_to_page((void *)addr);
/* Simulate page cache getting a speculative reference */
get_page(page);
free_pages(addr, 3);
put_page(page);
}
}
static int m_in(void)
{
pr_info("Testing with GFP_KERNEL\n");
test_free_pages(GFP_KERNEL);
pr_info("Testing with GFP_KERNEL | __GFP_COMP\n");
test_free_pages(GFP_KERNEL | __GFP_COMP);
pr_info("Test completed\n");
return 0;
}
static void m_ex(void)
{
}
module_init(m_in);
module_exit(m_ex);
MODULE_AUTHOR("Matthew Wilcox <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/test_free_pages.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A generic version of devmem_is_allowed.
*
* Based on arch/arm64/mm/mmap.c
*
* Copyright (C) 2020 Google, Inc.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/io.h>
/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number. We mimic x86 here by
* disallowing access to system RAM as well as device-exclusive MMIO regions.
* This effectively disable read()/write() on /dev/mem.
*/
int devmem_is_allowed(unsigned long pfn)
{
if (iomem_is_exclusive(PFN_PHYS(pfn)))
return 0;
if (!page_is_ram(pfn))
return 1;
return 0;
}
| linux-master | lib/devmem_is_allowed.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Important notes about in-place decompression
*
* At least on x86, the kernel is decompressed in place: the compressed data
* is placed to the end of the output buffer, and the decompressor overwrites
* most of the compressed data. There must be enough safety margin to
* guarantee that the write position is always behind the read position.
*
* The safety margin for ZSTD with a 128 KB block size is calculated below.
* Note that the margin with ZSTD is bigger than with GZIP or XZ!
*
* The worst case for in-place decompression is that the beginning of
* the file is compressed extremely well, and the rest of the file is
* uncompressible. Thus, we must look for worst-case expansion when the
* compressor is encoding uncompressible data.
*
* The structure of the .zst file in case of a compressed kernel is as follows.
* Maximum sizes (as bytes) of the fields are in parenthesis.
*
* Frame Header: (18)
* Blocks: (N)
* Checksum: (4)
*
* The frame header and checksum overhead is at most 22 bytes.
*
* ZSTD stores the data in blocks. Each block has a header whose size is
* a 3 bytes. After the block header, there is up to 128 KB of payload.
* The maximum uncompressed size of the payload is 128 KB. The minimum
* uncompressed size of the payload is never less than the payload size
* (excluding the block header).
*
* The assumption, that the uncompressed size of the payload is never
* smaller than the payload itself, is valid only when talking about
* the payload as a whole. It is possible that the payload has parts where
* the decompressor consumes more input than it produces output. Calculating
* the worst case for this would be tricky. Instead of trying to do that,
* let's simply make sure that the decompressor never overwrites any bytes
* of the payload which it is currently reading.
*
* Now we have enough information to calculate the safety margin. We need
* - 22 bytes for the .zst file format headers;
* - 3 bytes per every 128 KiB of uncompressed size (one block header per
* block); and
* - 128 KiB (biggest possible zstd block size) to make sure that the
* decompressor never overwrites anything from the block it is currently
* reading.
*
* We get the following formula:
*
* safety_margin = 22 + uncompressed_size * 3 / 131072 + 131072
* <= 22 + (uncompressed_size >> 15) + 131072
*/
/*
* Preboot environments #include "path/to/decompress_unzstd.c".
* All of the source files we depend on must be #included.
* zstd's only source dependency is xxhash, which has no source
* dependencies.
*
* When UNZSTD_PREBOOT is defined we declare __decompress(), which is
* used for kernel decompression, instead of unzstd().
*
* Define __DISABLE_EXPORTS in preboot environments to prevent symbols
* from xxhash and zstd from being exported by the EXPORT_SYMBOL macro.
*/
#ifdef STATIC
# define UNZSTD_PREBOOT
# include "xxhash.c"
# include "zstd/decompress_sources.h"
#else
#include <linux/decompress/unzstd.h>
#endif
#include <linux/decompress/mm.h>
#include <linux/kernel.h>
#include <linux/zstd.h>
/* 128MB is the maximum window size supported by zstd. */
#define ZSTD_WINDOWSIZE_MAX (1 << ZSTD_WINDOWLOG_MAX)
/*
* Size of the input and output buffers in multi-call mode.
* Pick a larger size because it isn't used during kernel decompression,
* since that is single pass, and we have to allocate a large buffer for
* zstd's window anyway. The larger size speeds up initramfs decompression.
*/
#define ZSTD_IOBUF_SIZE (1 << 17)
static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
{
const zstd_error_code err = zstd_get_error_code(ret);
if (!zstd_is_error(ret))
return 0;
/*
* zstd_get_error_name() cannot be used because error takes a char *
* not a const char *
*/
switch (err) {
case ZSTD_error_memory_allocation:
error("ZSTD decompressor ran out of memory");
break;
case ZSTD_error_prefix_unknown:
error("Input is not in the ZSTD format (wrong magic bytes)");
break;
case ZSTD_error_dstSize_tooSmall:
case ZSTD_error_corruption_detected:
case ZSTD_error_checksum_wrong:
error("ZSTD-compressed data is corrupt");
break;
default:
error("ZSTD-compressed data is probably corrupt");
break;
}
return -1;
}
/*
* Handle the case where we have the entire input and output in one segment.
* We can allocate less memory (no circular buffer for the sliding window),
* and avoid some memcpy() calls.
*/
static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
long out_len, long *in_pos,
void (*error)(char *x))
{
const size_t wksp_size = zstd_dctx_workspace_bound();
void *wksp = large_malloc(wksp_size);
zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
int err;
size_t ret;
if (dctx == NULL) {
error("Out of memory while allocating zstd_dctx");
err = -1;
goto out;
}
/*
* Find out how large the frame actually is, there may be junk at
* the end of the frame that zstd_decompress_dctx() can't handle.
*/
ret = zstd_find_frame_compressed_size(in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
in_len = (long)ret;
ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
err = handle_zstd_error(ret, error);
if (err)
goto out;
if (in_pos != NULL)
*in_pos = in_len;
err = 0;
out:
if (wksp != NULL)
large_free(wksp);
return err;
}
static int INIT __unzstd(unsigned char *in_buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *in_pos,
void (*error)(char *x))
{
zstd_in_buffer in;
zstd_out_buffer out;
zstd_frame_header header;
void *in_allocated = NULL;
void *out_allocated = NULL;
void *wksp = NULL;
size_t wksp_size;
zstd_dstream *dstream;
int err;
size_t ret;
/*
* ZSTD decompression code won't be happy if the buffer size is so big
* that its end address overflows. When the size is not provided, make
* it as big as possible without having the end address overflow.
*/
if (out_len == 0)
out_len = UINTPTR_MAX - (uintptr_t)out_buf;
if (fill == NULL && flush == NULL)
/*
* We can decompress faster and with less memory when we have a
* single chunk.
*/
return decompress_single(in_buf, in_len, out_buf, out_len,
in_pos, error);
/*
* If in_buf is not provided, we must be using fill(), so allocate
* a large enough buffer. If it is provided, it must be at least
* ZSTD_IOBUF_SIZE large.
*/
if (in_buf == NULL) {
in_allocated = large_malloc(ZSTD_IOBUF_SIZE);
if (in_allocated == NULL) {
error("Out of memory while allocating input buffer");
err = -1;
goto out;
}
in_buf = in_allocated;
in_len = 0;
}
/* Read the first chunk, since we need to decode the frame header. */
if (fill != NULL)
in_len = fill(in_buf, ZSTD_IOBUF_SIZE);
if (in_len < 0) {
error("ZSTD-compressed data is truncated");
err = -1;
goto out;
}
/* Set the first non-empty input buffer. */
in.src = in_buf;
in.pos = 0;
in.size = in_len;
/* Allocate the output buffer if we are using flush(). */
if (flush != NULL) {
out_allocated = large_malloc(ZSTD_IOBUF_SIZE);
if (out_allocated == NULL) {
error("Out of memory while allocating output buffer");
err = -1;
goto out;
}
out_buf = out_allocated;
out_len = ZSTD_IOBUF_SIZE;
}
/* Set the output buffer. */
out.dst = out_buf;
out.pos = 0;
out.size = out_len;
/*
* We need to know the window size to allocate the zstd_dstream.
* Since we are streaming, we need to allocate a buffer for the sliding
* window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
* (8 MB), so it is important to use the actual value so as not to
* waste memory when it is smaller.
*/
ret = zstd_get_frame_header(&header, in.src, in.size);
err = handle_zstd_error(ret, error);
if (err)
goto out;
if (ret != 0) {
error("ZSTD-compressed data has an incomplete frame header");
err = -1;
goto out;
}
if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
error("ZSTD-compressed data has too large a window size");
err = -1;
goto out;
}
/*
* Allocate the zstd_dstream now that we know how much memory is
* required.
*/
wksp_size = zstd_dstream_workspace_bound(header.windowSize);
wksp = large_malloc(wksp_size);
dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
if (dstream == NULL) {
error("Out of memory while allocating ZSTD_DStream");
err = -1;
goto out;
}
/*
* Decompression loop:
* Read more data if necessary (error if no more data can be read).
* Call the decompression function, which returns 0 when finished.
* Flush any data produced if using flush().
*/
if (in_pos != NULL)
*in_pos = 0;
do {
/*
* If we need to reload data, either we have fill() and can
* try to get more data, or we don't and the input is truncated.
*/
if (in.pos == in.size) {
if (in_pos != NULL)
*in_pos += in.pos;
in_len = fill ? fill(in_buf, ZSTD_IOBUF_SIZE) : -1;
if (in_len < 0) {
error("ZSTD-compressed data is truncated");
err = -1;
goto out;
}
in.pos = 0;
in.size = in_len;
}
/* Returns zero when the frame is complete. */
ret = zstd_decompress_stream(dstream, &out, &in);
err = handle_zstd_error(ret, error);
if (err)
goto out;
/* Flush all of the data produced if using flush(). */
if (flush != NULL && out.pos > 0) {
if (out.pos != flush(out.dst, out.pos)) {
error("Failed to flush()");
err = -1;
goto out;
}
out.pos = 0;
}
} while (ret != 0);
if (in_pos != NULL)
*in_pos += in.pos;
err = 0;
out:
if (in_allocated != NULL)
large_free(in_allocated);
if (out_allocated != NULL)
large_free(out_allocated);
if (wksp != NULL)
large_free(wksp);
return err;
}
#ifndef UNZSTD_PREBOOT
STATIC int INIT unzstd(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
long *pos,
void (*error)(char *x))
{
return __unzstd(buf, len, fill, flush, out_buf, 0, pos, error);
}
#else
STATIC int INIT __decompress(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf, long out_len,
long *pos,
void (*error)(char *x))
{
return __unzstd(buf, len, fill, flush, out_buf, out_len, pos, error);
}
#endif
| linux-master | lib/decompress_unzstd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from arch/ppc/mm/extable.c and arch/i386/mm/extable.c.
*
* Copyright (C) 2004 Paul Mackerras, IBM Corp.
*/
#include <linux/bsearch.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sort.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define ex_to_insn(x) ((x)->insn)
#else
static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
{
return (unsigned long)&x->insn + x->insn;
}
#endif
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#else
static void swap_ex(void *a, void *b, int size)
{
struct exception_table_entry *x = a, *y = b, tmp;
int delta = b - a;
tmp = *x;
x->insn = y->insn + delta;
y->insn = tmp.insn - delta;
#ifdef swap_ex_entry_fixup
swap_ex_entry_fixup(x, y, tmp, delta);
#else
x->fixup = y->fixup + delta;
y->fixup = tmp.fixup - delta;
#endif
}
#endif /* ARCH_HAS_RELATIVE_EXTABLE */
/*
* The exception table needs to be sorted so that the binary
* search that we use to find entries in it works properly.
* This is used both for the kernel exception table and for
* the exception tables of modules that get loaded.
*/
static int cmp_ex_sort(const void *a, const void *b)
{
const struct exception_table_entry *x = a, *y = b;
/* avoid overflow */
if (ex_to_insn(x) > ex_to_insn(y))
return 1;
if (ex_to_insn(x) < ex_to_insn(y))
return -1;
return 0;
}
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
sort(start, finish - start, sizeof(struct exception_table_entry),
cmp_ex_sort, swap_ex);
}
#ifdef CONFIG_MODULES
/*
* If the exception table is sorted, any referring to the module init
* will be at the beginning or the end.
*/
void trim_init_extable(struct module *m)
{
/*trim the beginning*/
while (m->num_exentries &&
within_module_init(ex_to_insn(&m->extable[0]), m)) {
m->extable++;
m->num_exentries--;
}
/*trim the end*/
while (m->num_exentries &&
within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]),
m))
m->num_exentries--;
}
#endif /* CONFIG_MODULES */
static int cmp_ex_search(const void *key, const void *elt)
{
const struct exception_table_entry *_elt = elt;
unsigned long _key = *(unsigned long *)key;
/* avoid overflow */
if (_key > ex_to_insn(_elt))
return 1;
if (_key < ex_to_insn(_elt))
return -1;
return 0;
}
/*
* Search one exception table for an entry corresponding to the
* given instruction address, and return the address of the entry,
* or NULL if none is found.
* We use a binary search, and thus we assume that the table is
* already sorted.
*/
const struct exception_table_entry *
search_extable(const struct exception_table_entry *base,
const size_t num,
unsigned long value)
{
return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search);
}
| linux-master | lib/extable.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
/* validate @native and @pcp counter values match @expected */
#define CHECK(native, pcp, expected) \
do { \
WARN((native) != (expected), \
"raw %ld (0x%lx) != expected %lld (0x%llx)", \
(native), (native), \
(long long)(expected), (long long)(expected)); \
WARN(__this_cpu_read(pcp) != (expected), \
"pcp %ld (0x%lx) != expected %lld (0x%llx)", \
__this_cpu_read(pcp), __this_cpu_read(pcp), \
(long long)(expected), (long long)(expected)); \
} while (0)
static DEFINE_PER_CPU(long, long_counter);
static DEFINE_PER_CPU(unsigned long, ulong_counter);
static int __init percpu_test_init(void)
{
/*
* volatile prevents compiler from optimizing it uses, otherwise the
* +ul_one/-ul_one below would replace with inc/dec instructions.
*/
volatile unsigned int ui_one = 1;
long l = 0;
unsigned long ul = 0;
pr_info("percpu test start\n");
preempt_disable();
l += -1;
__this_cpu_add(long_counter, -1);
CHECK(l, long_counter, -1);
l += 1;
__this_cpu_add(long_counter, 1);
CHECK(l, long_counter, 0);
ul = 0;
__this_cpu_write(ulong_counter, 0);
ul += 1UL;
__this_cpu_add(ulong_counter, 1UL);
CHECK(ul, ulong_counter, 1);
ul += -1UL;
__this_cpu_add(ulong_counter, -1UL);
CHECK(ul, ulong_counter, 0);
ul += -(unsigned long)1;
__this_cpu_add(ulong_counter, -(unsigned long)1);
CHECK(ul, ulong_counter, -1);
ul = 0;
__this_cpu_write(ulong_counter, 0);
ul -= 1;
__this_cpu_dec(ulong_counter);
CHECK(ul, ulong_counter, -1);
CHECK(ul, ulong_counter, ULONG_MAX);
l += -ui_one;
__this_cpu_add(long_counter, -ui_one);
CHECK(l, long_counter, 0xffffffff);
l += ui_one;
__this_cpu_add(long_counter, ui_one);
CHECK(l, long_counter, (long)0x100000000LL);
l = 0;
__this_cpu_write(long_counter, 0);
l -= ui_one;
__this_cpu_sub(long_counter, ui_one);
CHECK(l, long_counter, -1);
l = 0;
__this_cpu_write(long_counter, 0);
l += ui_one;
__this_cpu_add(long_counter, ui_one);
CHECK(l, long_counter, 1);
l += -ui_one;
__this_cpu_add(long_counter, -ui_one);
CHECK(l, long_counter, (long)0x100000000LL);
l = 0;
__this_cpu_write(long_counter, 0);
l -= ui_one;
this_cpu_sub(long_counter, ui_one);
CHECK(l, long_counter, -1);
CHECK(l, long_counter, ULONG_MAX);
ul = 0;
__this_cpu_write(ulong_counter, 0);
ul += ui_one;
__this_cpu_add(ulong_counter, ui_one);
CHECK(ul, ulong_counter, 1);
ul = 0;
__this_cpu_write(ulong_counter, 0);
ul -= ui_one;
__this_cpu_sub(ulong_counter, ui_one);
CHECK(ul, ulong_counter, -1);
CHECK(ul, ulong_counter, ULONG_MAX);
ul = 3;
__this_cpu_write(ulong_counter, 3);
ul = this_cpu_sub_return(ulong_counter, ui_one);
CHECK(ul, ulong_counter, 2);
ul = __this_cpu_sub_return(ulong_counter, ui_one);
CHECK(ul, ulong_counter, 1);
preempt_enable();
pr_info("percpu test done\n");
return -EAGAIN; /* Fail will directly unload the module */
}
static void __exit percpu_test_exit(void)
{
}
module_init(percpu_test_init)
module_exit(percpu_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Thelen");
MODULE_DESCRIPTION("percpu operations test");
| linux-master | lib/percpu_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ratelimit.c - Do something with rate limit.
*
* Isolated from kernel/printk.c by Dave Young <[email protected]>
*
* 2008-05-01 rewrite the function and use a ratelimit_state data struct as
* parameter. Now every user can use their own standalone ratelimit_state.
*/
#include <linux/ratelimit.h>
#include <linux/jiffies.h>
#include <linux/export.h>
/*
* __ratelimit - rate limiting
* @rs: ratelimit_state data
* @func: name of calling function
*
* This enforces a rate limit: not more than @rs->burst callbacks
* in every @rs->interval
*
* RETURNS:
* 0 means callbacks will be suppressed.
* 1 means go ahead and do it.
*/
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
/* Paired with WRITE_ONCE() in .proc_handler().
* Changing two values seperately could be inconsistent
* and some message could be lost. (See: net_ratelimit_state).
*/
int interval = READ_ONCE(rs->interval);
int burst = READ_ONCE(rs->burst);
unsigned long flags;
int ret;
if (!interval)
return 1;
/*
* If we contend on this state's lock then almost
* by definition we are too busy to print a message,
* in addition to the one that will be printed by
* the entity that is holding the lock already:
*/
if (!raw_spin_trylock_irqsave(&rs->lock, flags))
return 0;
if (!rs->begin)
rs->begin = jiffies;
if (time_is_before_jiffies(rs->begin + interval)) {
if (rs->missed) {
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
printk_deferred(KERN_WARNING
"%s: %d callbacks suppressed\n",
func, rs->missed);
rs->missed = 0;
}
}
rs->begin = jiffies;
rs->printed = 0;
}
if (burst && burst > rs->printed) {
rs->printed++;
ret = 1;
} else {
rs->missed++;
ret = 0;
}
raw_spin_unlock_irqrestore(&rs->lock, flags);
return ret;
}
EXPORT_SYMBOL(___ratelimit);
| linux-master | lib/ratelimit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Test cases for compiler-based stack variable zeroing via
* -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
* For example, see:
* "Running tests with kunit_tool" at Documentation/dev-tools/kunit/start.rst
* ./tools/testing/kunit/kunit.py run stackinit [--raw_output] \
* --make_option LLVM=1 \
* --kconfig_add CONFIG_INIT_STACK_ALL_ZERO=y
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
static u8 check_buf[MAX_VAR_SIZE];
/* Character array to trigger stack protector in all functions. */
#define VAR_BUFFER 32
/* Volatile mask to convince compiler to copy memory with 0xff. */
static volatile u8 forced_mask = 0xff;
/* Location and size tracking to validate fill and test are colocated. */
static void *fill_start, *target_start;
static size_t fill_size, target_size;
static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
char *needle_start, size_t needle_size)
{
if (needle_start >= haystack_start &&
needle_start + needle_size <= haystack_start + haystack_size)
return true;
return false;
}
/* Whether the test is expected to fail. */
#define WANT_SUCCESS 0
#define XFAIL 1
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
#define DO_NOTHING_RETURN_STRING(ptr) /**/
#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
#define DO_NOTHING_CALL_SCALAR(var, name) \
(var) = do_nothing_ ## name(&(var))
#define DO_NOTHING_CALL_STRING(var, name) \
do_nothing_ ## name(var)
#define DO_NOTHING_CALL_STRUCT(var, name) \
do_nothing_ ## name(&(var))
#define FETCH_ARG_SCALAR(var) &var
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
#define FILL_SIZE_STRING 16
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
/*
* For the struct, intentionally poison padding to see if it gets
* copied out in direct assignments.
* */
#define ZERO_CLONE_STRUCT(zero) \
do { \
memset(&(zero), 0xFF, sizeof(zero)); \
zero.one = 0; \
zero.two = 0; \
zero.three = 0; \
zero.four = 0; \
} while (0)
#define INIT_SCALAR_none(var_type) /**/
#define INIT_SCALAR_zero(var_type) = 0
#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
#define INIT_STRUCT_none(var_type) /**/
#define INIT_STRUCT_zero(var_type) = { }
#define __static_partial { .two = 0, }
#define __static_all { .one = 0, \
.two = 0, \
.three = 0, \
.four = 0, \
}
#define __dynamic_partial { .two = arg->two, }
#define __dynamic_all { .one = arg->one, \
.two = arg->two, \
.three = arg->three, \
.four = arg->four, \
}
#define __runtime_partial var.two = 0
#define __runtime_all var.one = 0; \
var.two = 0; \
var.three = 0; \
var.four = 0
#define INIT_STRUCT_static_partial(var_type) \
= __static_partial
#define INIT_STRUCT_static_all(var_type) \
= __static_all
#define INIT_STRUCT_dynamic_partial(var_type) \
= __dynamic_partial
#define INIT_STRUCT_dynamic_all(var_type) \
= __dynamic_all
#define INIT_STRUCT_runtime_partial(var_type) \
; __runtime_partial
#define INIT_STRUCT_runtime_all(var_type) \
; __runtime_all
#define INIT_STRUCT_assigned_static_partial(var_type) \
; var = (var_type)__static_partial
#define INIT_STRUCT_assigned_static_all(var_type) \
; var = (var_type)__static_all
#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
; var = (var_type)__dynamic_partial
#define INIT_STRUCT_assigned_dynamic_all(var_type) \
; var = (var_type)__dynamic_all
#define INIT_STRUCT_assigned_copy(var_type) \
; var = *(arg)
/*
* @name: unique string name for the test
* @var_type: type to be tested for zeroing initialization
* @which: is this a SCALAR, STRING, or STRUCT type?
* @init_level: what kind of initialization is performed
* @xfail: is this test expected to fail?
*/
#define DEFINE_TEST_DRIVER(name, var_type, which, xfail) \
/* Returns 0 on success, 1 on failure. */ \
static noinline void test_ ## name (struct kunit *test) \
{ \
var_type zero INIT_CLONE_ ## which; \
int ignored; \
u8 sum = 0, i; \
\
/* Notice when a new test is larger than expected. */ \
BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
\
/* Fill clone type with zero for per-field init. */ \
ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Fill stack with 0xFF. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 1, \
FETCH_ARG_ ## which(zero)); \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
"leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 0, \
FETCH_ARG_ ## which(zero)); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
stackinit_range_contains(fill_start, fill_size, \
target_start, target_size), \
"stack fill missed target!? " \
"(fill %zu wide, target offset by %d)\n", \
fill_size, \
(int)((ssize_t)(uintptr_t)fill_start - \
(ssize_t)(uintptr_t)target_start)); \
\
/* Look for any bytes still 0xFF in check region. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] == 0xFF); \
\
if (sum != 0 && xfail) \
kunit_skip(test, \
"XFAIL uninit bytes: %d\n", \
sum); \
KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
"uninit bytes: %d\n", sum); \
}
#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
static noinline DO_NOTHING_TYPE_ ## which(var_type) \
do_nothing_ ## name(var_type *ptr) \
{ \
/* Will always be true, but compiler doesn't know. */ \
if ((unsigned long)ptr > 0x2) \
return DO_NOTHING_RETURN_ ## which(ptr); \
else \
return DO_NOTHING_RETURN_ ## which(ptr + 1); \
} \
static noinline int leaf_ ## name(unsigned long sp, bool fill, \
var_type *arg) \
{ \
char buf[VAR_BUFFER]; \
var_type var \
INIT_ ## which ## _ ## init_level(var_type); \
\
target_start = &var; \
target_size = sizeof(var); \
/* \
* Keep this buffer around to make sure we've got a \
* stack frame of SOME kind... \
*/ \
memset(buf, (char)(sp & 0xff), sizeof(buf)); \
/* Fill variable with 0xFF. */ \
if (fill) { \
fill_start = &var; \
fill_size = sizeof(var); \
memset(fill_start, \
(char)((sp & 0xff) | forced_mask), \
fill_size); \
} \
\
/* Silence "never initialized" warnings. */ \
DO_NOTHING_CALL_ ## which(var, name); \
\
/* Exfiltrate "var". */ \
memcpy(check_buf, target_start, target_size); \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
DEFINE_TEST_DRIVER(name, var_type, which, xfail)
/* Structure with no padding. */
struct test_packed {
unsigned long one;
unsigned long two;
unsigned long three;
unsigned long four;
};
/* Simple structure with padding likely to be covered by compiler. */
struct test_small_hole {
size_t one;
char two;
/* 3 byte padding hole here. */
int three;
unsigned long four;
};
/* Trigger unhandled padding in a structure. */
struct test_big_hole {
u8 one;
u8 two;
u8 three;
/* 61 byte padding hole here. */
u8 four __aligned(64);
} __aligned(64);
struct test_trailing_hole {
char *one;
char *two;
char *three;
char four;
/* "sizeof(unsigned long) - 1" byte padding hole here. */
};
/* Test if STRUCTLEAK is clearing structs with __user fields. */
struct test_user {
u8 one;
unsigned long two;
char __user *three;
unsigned long four;
};
#define ALWAYS_PASS WANT_SUCCESS
#define ALWAYS_FAIL XFAIL
#ifdef CONFIG_INIT_STACK_NONE
# define USER_PASS XFAIL
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS XFAIL
# define STRONG_PASS XFAIL
#elif defined(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
# define STRONG_PASS XFAIL
#else
# define USER_PASS WANT_SUCCESS
# define BYREF_PASS WANT_SUCCESS
# define STRONG_PASS WANT_SUCCESS
#endif
#define DEFINE_SCALAR_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, name, SCALAR, \
init, xfail)
#define DEFINE_SCALAR_TESTS(init, xfail) \
DEFINE_SCALAR_TEST(u8, init, xfail); \
DEFINE_SCALAR_TEST(u16, init, xfail); \
DEFINE_SCALAR_TEST(u32, init, xfail); \
DEFINE_SCALAR_TEST(u64, init, xfail); \
DEFINE_TEST(char_array_ ## init, unsigned char, \
STRING, init, xfail)
#define DEFINE_STRUCT_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, \
struct test_ ## name, STRUCT, init, \
xfail)
#define DEFINE_STRUCT_TESTS(init, xfail) \
DEFINE_STRUCT_TEST(small_hole, init, xfail); \
DEFINE_STRUCT_TEST(big_hole, init, xfail); \
DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
DEFINE_STRUCT_TEST(packed, init, xfail)
#define DEFINE_STRUCT_INITIALIZER_TESTS(base, xfail) \
DEFINE_STRUCT_TESTS(base ## _ ## partial, \
xfail); \
DEFINE_STRUCT_TESTS(base ## _ ## all, xfail)
/* These should be fully initialized all the time! */
DEFINE_SCALAR_TESTS(zero, ALWAYS_PASS);
DEFINE_STRUCT_TESTS(zero, ALWAYS_PASS);
/* Struct initializers: padding may be left uninitialized. */
DEFINE_STRUCT_INITIALIZER_TESTS(static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(dynamic, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(runtime, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static, STRONG_PASS);
DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic, STRONG_PASS);
DEFINE_STRUCT_TESTS(assigned_copy, ALWAYS_FAIL);
/* No initialization without compiler instrumentation. */
DEFINE_SCALAR_TESTS(none, STRONG_PASS);
DEFINE_STRUCT_TESTS(none, BYREF_PASS);
/* Initialization of members with __user attribute. */
DEFINE_TEST(user, struct test_user, STRUCT, none, USER_PASS);
/*
* Check two uses through a variable declaration outside either path,
* which was noticed as a special case in porting earlier stack init
* compiler logic.
*/
static int noinline __leaf_switch_none(int path, bool fill)
{
switch (path) {
/*
* This is intentionally unreachable. To silence the
* warning, build with -Wno-switch-unreachable
*/
uint64_t var[10];
case 1:
target_start = &var;
target_size = sizeof(var);
if (fill) {
fill_start = &var;
fill_size = sizeof(var);
memset(fill_start, forced_mask | 0x55, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
case 2:
target_start = &var;
target_size = sizeof(var);
if (fill) {
fill_start = &var;
fill_size = sizeof(var);
memset(fill_start, forced_mask | 0xaa, fill_size);
}
memcpy(check_buf, target_start, target_size);
break;
default:
var[1] = 5;
return var[1] & forced_mask;
}
return 0;
}
static noinline int leaf_switch_1_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(1, fill);
}
static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
uint64_t *arg)
{
return __leaf_switch_none(2, fill);
}
/*
* These are expected to fail for most configurations because neither
* GCC nor Clang have a way to perform initialization of variables in
* non-code areas (i.e. in a switch statement before the first "case").
* https://bugs.llvm.org/show_bug.cgi?id=44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
#define KUNIT_test_scalars(init) \
KUNIT_CASE(test_u8_ ## init), \
KUNIT_CASE(test_u16_ ## init), \
KUNIT_CASE(test_u32_ ## init), \
KUNIT_CASE(test_u64_ ## init), \
KUNIT_CASE(test_char_array_ ## init)
#define KUNIT_test_structs(init) \
KUNIT_CASE(test_small_hole_ ## init), \
KUNIT_CASE(test_big_hole_ ## init), \
KUNIT_CASE(test_trailing_hole_ ## init),\
KUNIT_CASE(test_packed_ ## init) \
static struct kunit_case stackinit_test_cases[] = {
/* These are explicitly initialized and should always pass. */
KUNIT_test_scalars(zero),
KUNIT_test_structs(zero),
/* Padding here appears to be accidentally always initialized? */
KUNIT_test_structs(dynamic_partial),
KUNIT_test_structs(assigned_dynamic_partial),
/* Padding initialization depends on compiler behaviors. */
KUNIT_test_structs(static_partial),
KUNIT_test_structs(static_all),
KUNIT_test_structs(dynamic_all),
KUNIT_test_structs(runtime_partial),
KUNIT_test_structs(runtime_all),
KUNIT_test_structs(assigned_static_partial),
KUNIT_test_structs(assigned_static_all),
KUNIT_test_structs(assigned_dynamic_all),
/* Everything fails this since it effectively performs a memcpy(). */
KUNIT_test_structs(assigned_copy),
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
KUNIT_test_scalars(none),
KUNIT_CASE(test_switch_1_none),
KUNIT_CASE(test_switch_2_none),
/* STRUCTLEAK_BYREF should cover from here down. */
KUNIT_test_structs(none),
/* STRUCTLEAK will only cover this. */
KUNIT_CASE(test_user),
{}
};
static struct kunit_suite stackinit_test_suite = {
.name = "stackinit",
.test_cases = stackinit_test_cases,
};
kunit_test_suites(&stackinit_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/stackinit_kunit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Out-of-line refcount functions.
*/
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/bug.h>
#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
{
refcount_set(r, REFCOUNT_SATURATED);
switch (t) {
case REFCOUNT_ADD_NOT_ZERO_OVF:
REFCOUNT_WARN("saturated; leaking memory");
break;
case REFCOUNT_ADD_OVF:
REFCOUNT_WARN("saturated; leaking memory");
break;
case REFCOUNT_ADD_UAF:
REFCOUNT_WARN("addition on 0; use-after-free");
break;
case REFCOUNT_SUB_UAF:
REFCOUNT_WARN("underflow; use-after-free");
break;
case REFCOUNT_DEC_LEAK:
REFCOUNT_WARN("decrement hit 0; leaking memory");
break;
default:
REFCOUNT_WARN("unknown saturation event!?");
}
}
EXPORT_SYMBOL(refcount_warn_saturate);
/**
* refcount_dec_if_one - decrement a refcount if it is 1
* @r: the refcount
*
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
* success thereof.
*
* Like all decrement operations, it provides release memory order and provides
* a control dependency.
*
* It can be used like a try-delete operator; this explicit case is provided
* and not cmpxchg in generic, because that would allow implementing unsafe
* operations.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
bool refcount_dec_if_one(refcount_t *r)
{
int val = 1;
return atomic_try_cmpxchg_release(&r->refs, &val, 0);
}
EXPORT_SYMBOL(refcount_dec_if_one);
/**
* refcount_dec_not_one - decrement a refcount if it is not 1
* @r: the refcount
*
* No atomic_t counterpart, it decrements unless the value is 1, in which case
* it will return false.
*
* Was often done like: atomic_add_unless(&var, -1, 1)
*
* Return: true if the decrement operation was successful, false otherwise
*/
bool refcount_dec_not_one(refcount_t *r)
{
unsigned int new, val = atomic_read(&r->refs);
do {
if (unlikely(val == REFCOUNT_SATURATED))
return true;
if (val == 1)
return false;
new = val - 1;
if (new > val) {
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
return true;
}
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
return true;
}
EXPORT_SYMBOL(refcount_dec_not_one);
/**
* refcount_dec_and_mutex_lock - return holding mutex if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the mutex to be locked
*
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
* to decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold mutex if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
{
if (refcount_dec_not_one(r))
return false;
mutex_lock(lock);
if (!refcount_dec_and_test(r)) {
mutex_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
/**
* refcount_dec_and_lock - return holding spinlock if able to decrement
* refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
*
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
* decrement when saturated at REFCOUNT_SATURATED.
*
* Provides release memory ordering, such that prior loads and stores are done
* before, and provides a control dependency such that free() must come after.
* See the comment on top.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
{
if (refcount_dec_not_one(r))
return false;
spin_lock(lock);
if (!refcount_dec_and_test(r)) {
spin_unlock(lock);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock);
/**
* refcount_dec_and_lock_irqsave - return holding spinlock with disabled
* interrupts if able to decrement refcount to 0
* @r: the refcount
* @lock: the spinlock to be locked
* @flags: saved IRQ-flags if the is acquired
*
* Same as refcount_dec_and_lock() above except that the spinlock is acquired
* with disabled interrupts.
*
* Return: true and hold spinlock if able to decrement refcount to 0, false
* otherwise
*/
bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
unsigned long *flags)
{
if (refcount_dec_not_one(r))
return false;
spin_lock_irqsave(lock, *flags);
if (!refcount_dec_and_test(r)) {
spin_unlock_irqrestore(lock, *flags);
return false;
}
return true;
}
EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
| linux-master | lib/refcount.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Helper function for splitting a string into an argv-like array.
*/
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/export.h>
static int count_argc(const char *str)
{
int count = 0;
bool was_space;
for (was_space = true; *str; str++) {
if (isspace(*str)) {
was_space = true;
} else if (was_space) {
was_space = false;
count++;
}
}
return count;
}
/**
* argv_free - free an argv
* @argv: the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
void argv_free(char **argv)
{
argv--;
kfree(argv[0]);
kfree(argv);
}
EXPORT_SYMBOL(argv_free);
/**
* argv_split - split a string at whitespace, returning an argv
* @gfp: the GFP mask used to allocate memory
* @str: the string to be split
* @argcp: returned argument count
*
* Returns: an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
* is always NULL-terminated. Returns NULL on memory allocation
* failure.
*
* The source string at `str' may be undergoing concurrent alteration via
* userspace sysctl activity (at least). The argv_split() implementation
* attempts to handle this gracefully by taking a local copy to work on.
*/
char **argv_split(gfp_t gfp, const char *str, int *argcp)
{
char *argv_str;
bool was_space;
char **argv, **argv_ret;
int argc;
argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
if (!argv_str)
return NULL;
argc = count_argc(argv_str);
argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
if (!argv) {
kfree(argv_str);
return NULL;
}
*argv = argv_str;
argv_ret = ++argv;
for (was_space = true; *argv_str; argv_str++) {
if (isspace(*argv_str)) {
was_space = true;
*argv_str = 0;
} else if (was_space) {
was_space = false;
*argv++ = argv_str;
}
}
*argv = NULL;
if (argcp)
*argcp = argc;
return argv_ret;
}
EXPORT_SYMBOL(argv_split);
| linux-master | lib/argv_split.c |
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
#include <kunit/test-bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
static int slab_errors;
/*
* Wrapper function for kmem_cache_create(), which reduces 2 parameters:
* 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
* object from kfence pool, where the operation could be caught by both
* our test and kfence sanity check.
*/
static struct kmem_cache *test_kmem_cache_create(const char *name,
unsigned int size, slab_flags_t flags)
{
struct kmem_cache *s = kmem_cache_create(name, size, 0,
(flags | SLAB_NO_USER_FLAGS), NULL);
s->flags |= SLAB_SKIP_KFENCE;
return s;
}
static void test_clobber_zone(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
p[64] = 0x12;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kasan_enable_current();
kmem_cache_free(s, p);
kmem_cache_destroy(s);
}
#ifndef CONFIG_KASAN
static void test_next_pointer(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
kmem_cache_free(s, p);
ptr_addr = (unsigned long *)(p + s->offset);
tmp = *ptr_addr;
p[s->offset] = 0x12;
/*
* Expecting three errors.
* One for the corrupted freechain and the other one for the wrong
* count of objects in use. The third error is fixing broken cache.
*/
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 3, slab_errors);
/*
* Try to repair corrupted freepointer.
* Still expecting two errors. The first for the wrong count
* of objects in use.
* The second error is for fixing broken cache.
*/
*ptr_addr = tmp;
slab_errors = 0;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
/*
* Previous validation repaired the count of objects in use.
* Now expecting no error.
*/
slab_errors = 0;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 0, slab_errors);
kmem_cache_destroy(s);
}
static void test_first_word(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
*p = 0x78;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kmem_cache_destroy(s);
}
static void test_clobber_50th_byte(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
p[50] = 0x9a;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kmem_cache_destroy(s);
}
#endif
static void test_clobber_redzone_free(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
kmem_cache_free(s, p);
p[64] = 0xab;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kasan_enable_current();
kmem_cache_destroy(s);
}
static void test_kmalloc_redzone_access(struct kunit *test)
{
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
kasan_disable_current();
/* Suppress the -Warray-bounds warning */
OPTIMIZER_HIDE_VAR(p);
p[18] = 0xab;
p[19] = 0xab;
validate_slab_cache(s);
KUNIT_EXPECT_EQ(test, 2, slab_errors);
kasan_enable_current();
kmem_cache_free(s, p);
kmem_cache_destroy(s);
}
static int test_init(struct kunit *test)
{
slab_errors = 0;
kunit_add_named_resource(test, NULL, NULL, &resource,
"slab_errors", &slab_errors);
return 0;
}
static struct kunit_case test_cases[] = {
KUNIT_CASE(test_clobber_zone),
#ifndef CONFIG_KASAN
KUNIT_CASE(test_next_pointer),
KUNIT_CASE(test_first_word),
KUNIT_CASE(test_clobber_50th_byte),
#endif
KUNIT_CASE(test_clobber_redzone_free),
KUNIT_CASE(test_kmalloc_redzone_access),
{}
};
static struct kunit_suite test_suite = {
.name = "slub_test",
.init = test_init,
.test_cases = test_cases,
};
kunit_test_suite(test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/slub_kunit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Convert integer string representation to an integer.
* If an integer doesn't fit into specified type, -E is returned.
*
* Integer starts with optional sign.
* kstrtou*() functions do not accept sign "-".
*
* Radix 0 means autodetection: leading "0x" implies radix 16,
* leading "0" implies radix 8, otherwise radix is 10.
* Autodetection hints work after optional sign, but not before.
*
* If -E is returned, result is not touched.
*/
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kstrtox.h>
#include <linux/math64.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include "kstrtox.h"
noinline
const char *_parse_integer_fixup_radix(const char *s, unsigned int *base)
{
if (*base == 0) {
if (s[0] == '0') {
if (_tolower(s[1]) == 'x' && isxdigit(s[2]))
*base = 16;
else
*base = 8;
} else
*base = 10;
}
if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x')
s += 2;
return s;
}
/*
* Convert non-negative integer string representation in explicitly given radix
* to an integer. A maximum of max_chars characters will be converted.
*
* Return number of characters consumed maybe or-ed with overflow bit.
* If overflow occurs, result integer (incorrect) is still returned.
*
* Don't you dare use this function.
*/
noinline
unsigned int _parse_integer_limit(const char *s, unsigned int base, unsigned long long *p,
size_t max_chars)
{
unsigned long long res;
unsigned int rv;
res = 0;
rv = 0;
while (max_chars--) {
unsigned int c = *s;
unsigned int lc = _tolower(c);
unsigned int val;
if ('0' <= c && c <= '9')
val = c - '0';
else if ('a' <= lc && lc <= 'f')
val = lc - 'a' + 10;
else
break;
if (val >= base)
break;
/*
* Check for overflow only if we are within range of
* it in the max base we support (16)
*/
if (unlikely(res & (~0ull << 60))) {
if (res > div_u64(ULLONG_MAX - val, base))
rv |= KSTRTOX_OVERFLOW;
}
res = res * base + val;
rv++;
s++;
}
*p = res;
return rv;
}
noinline
unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long *p)
{
return _parse_integer_limit(s, base, p, INT_MAX);
}
static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
unsigned long long _res;
unsigned int rv;
s = _parse_integer_fixup_radix(s, &base);
rv = _parse_integer(s, base, &_res);
if (rv & KSTRTOX_OVERFLOW)
return -ERANGE;
if (rv == 0)
return -EINVAL;
s += rv;
if (*s == '\n')
s++;
if (*s)
return -EINVAL;
*res = _res;
return 0;
}
/**
* kstrtoull - convert a string to an unsigned long long
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign, but not a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoull(). Return code must be checked.
*/
noinline
int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
{
if (s[0] == '+')
s++;
return _kstrtoull(s, base, res);
}
EXPORT_SYMBOL(kstrtoull);
/**
* kstrtoll - convert a string to a long long
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign or a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoll(). Return code must be checked.
*/
noinline
int kstrtoll(const char *s, unsigned int base, long long *res)
{
unsigned long long tmp;
int rv;
if (s[0] == '-') {
rv = _kstrtoull(s + 1, base, &tmp);
if (rv < 0)
return rv;
if ((long long)-tmp > 0)
return -ERANGE;
*res = -tmp;
} else {
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if ((long long)tmp < 0)
return -ERANGE;
*res = tmp;
}
return 0;
}
EXPORT_SYMBOL(kstrtoll);
/* Internal, do not use. */
int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
unsigned long long tmp;
int rv;
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (unsigned long)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(_kstrtoul);
/* Internal, do not use. */
int _kstrtol(const char *s, unsigned int base, long *res)
{
long long tmp;
int rv;
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (long)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(_kstrtol);
/**
* kstrtouint - convert a string to an unsigned int
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign, but not a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtoul(). Return code must be checked.
*/
noinline
int kstrtouint(const char *s, unsigned int base, unsigned int *res)
{
unsigned long long tmp;
int rv;
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (unsigned int)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtouint);
/**
* kstrtoint - convert a string to an int
* @s: The start of the string. The string must be null-terminated, and may also
* include a single newline before its terminating null. The first character
* may also be a plus sign or a minus sign.
* @base: The number base to use. The maximum supported base is 16. If base is
* given as 0, then the base of the string is automatically detected with the
* conventional semantics - If it begins with 0x the number will be parsed as a
* hexadecimal (case insensitive), if it otherwise begins with 0, it will be
* parsed as an octal number. Otherwise it will be parsed as a decimal.
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
* Preferred over simple_strtol(). Return code must be checked.
*/
noinline
int kstrtoint(const char *s, unsigned int base, int *res)
{
long long tmp;
int rv;
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (int)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtoint);
noinline
int kstrtou16(const char *s, unsigned int base, u16 *res)
{
unsigned long long tmp;
int rv;
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (u16)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtou16);
noinline
int kstrtos16(const char *s, unsigned int base, s16 *res)
{
long long tmp;
int rv;
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (s16)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtos16);
noinline
int kstrtou8(const char *s, unsigned int base, u8 *res)
{
unsigned long long tmp;
int rv;
rv = kstrtoull(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (u8)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtou8);
noinline
int kstrtos8(const char *s, unsigned int base, s8 *res)
{
long long tmp;
int rv;
rv = kstrtoll(s, base, &tmp);
if (rv < 0)
return rv;
if (tmp != (s8)tmp)
return -ERANGE;
*res = tmp;
return 0;
}
EXPORT_SYMBOL(kstrtos8);
/**
* kstrtobool - convert common user inputs into boolean values
* @s: input string
* @res: result
*
* This routine returns 0 iff the first character is one of 'YyTt1NnFf0', or
* [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
* pointed to by res is updated upon finding a match.
*/
noinline
int kstrtobool(const char *s, bool *res)
{
if (!s)
return -EINVAL;
switch (s[0]) {
case 'y':
case 'Y':
case 't':
case 'T':
case '1':
*res = true;
return 0;
case 'n':
case 'N':
case 'f':
case 'F':
case '0':
*res = false;
return 0;
case 'o':
case 'O':
switch (s[1]) {
case 'n':
case 'N':
*res = true;
return 0;
case 'f':
case 'F':
*res = false;
return 0;
default:
break;
}
break;
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(kstrtobool);
/*
* Since "base" would be a nonsense argument, this open-codes the
* _from_user helper instead of using the helper macro below.
*/
int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
{
/* Longest string needed to differentiate, newline, terminator */
char buf[4];
count = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, s, count))
return -EFAULT;
buf[count] = '\0';
return kstrtobool(buf, res);
}
EXPORT_SYMBOL(kstrtobool_from_user);
#define kstrto_from_user(f, g, type) \
int f(const char __user *s, size_t count, unsigned int base, type *res) \
{ \
/* sign, base 2 representation, newline, terminator */ \
char buf[1 + sizeof(type) * 8 + 1 + 1]; \
\
count = min(count, sizeof(buf) - 1); \
if (copy_from_user(buf, s, count)) \
return -EFAULT; \
buf[count] = '\0'; \
return g(buf, base, res); \
} \
EXPORT_SYMBOL(f)
kstrto_from_user(kstrtoull_from_user, kstrtoull, unsigned long long);
kstrto_from_user(kstrtoll_from_user, kstrtoll, long long);
kstrto_from_user(kstrtoul_from_user, kstrtoul, unsigned long);
kstrto_from_user(kstrtol_from_user, kstrtol, long);
kstrto_from_user(kstrtouint_from_user, kstrtouint, unsigned int);
kstrto_from_user(kstrtoint_from_user, kstrtoint, int);
kstrto_from_user(kstrtou16_from_user, kstrtou16, u16);
kstrto_from_user(kstrtos16_from_user, kstrtos16, s16);
kstrto_from_user(kstrtou8_from_user, kstrtou8, u8);
kstrto_from_user(kstrtos8_from_user, kstrtos8, s8);
| linux-master | lib/kstrtox.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Referrence tracker self test.
*
* Copyright (c) 2021 Eric Dumazet <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ref_tracker.h>
#include <linux/slab.h>
#include <linux/timer.h>
static struct ref_tracker_dir ref_dir;
static struct ref_tracker *tracker[20];
#define TRT_ALLOC(X) static noinline void \
alloctest_ref_tracker_alloc##X(struct ref_tracker_dir *dir, \
struct ref_tracker **trackerp) \
{ \
ref_tracker_alloc(dir, trackerp, GFP_KERNEL); \
}
TRT_ALLOC(1)
TRT_ALLOC(2)
TRT_ALLOC(3)
TRT_ALLOC(4)
TRT_ALLOC(5)
TRT_ALLOC(6)
TRT_ALLOC(7)
TRT_ALLOC(8)
TRT_ALLOC(9)
TRT_ALLOC(10)
TRT_ALLOC(11)
TRT_ALLOC(12)
TRT_ALLOC(13)
TRT_ALLOC(14)
TRT_ALLOC(15)
TRT_ALLOC(16)
TRT_ALLOC(17)
TRT_ALLOC(18)
TRT_ALLOC(19)
#undef TRT_ALLOC
static noinline void
alloctest_ref_tracker_free(struct ref_tracker_dir *dir,
struct ref_tracker **trackerp)
{
ref_tracker_free(dir, trackerp);
}
static struct timer_list test_ref_tracker_timer;
static atomic_t test_ref_timer_done = ATOMIC_INIT(0);
static void test_ref_tracker_timer_func(struct timer_list *t)
{
ref_tracker_alloc(&ref_dir, &tracker[0], GFP_ATOMIC);
atomic_set(&test_ref_timer_done, 1);
}
static int __init test_ref_tracker_init(void)
{
int i;
ref_tracker_dir_init(&ref_dir, 100, "selftest");
timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0);
mod_timer(&test_ref_tracker_timer, jiffies + 1);
alloctest_ref_tracker_alloc1(&ref_dir, &tracker[1]);
alloctest_ref_tracker_alloc2(&ref_dir, &tracker[2]);
alloctest_ref_tracker_alloc3(&ref_dir, &tracker[3]);
alloctest_ref_tracker_alloc4(&ref_dir, &tracker[4]);
alloctest_ref_tracker_alloc5(&ref_dir, &tracker[5]);
alloctest_ref_tracker_alloc6(&ref_dir, &tracker[6]);
alloctest_ref_tracker_alloc7(&ref_dir, &tracker[7]);
alloctest_ref_tracker_alloc8(&ref_dir, &tracker[8]);
alloctest_ref_tracker_alloc9(&ref_dir, &tracker[9]);
alloctest_ref_tracker_alloc10(&ref_dir, &tracker[10]);
alloctest_ref_tracker_alloc11(&ref_dir, &tracker[11]);
alloctest_ref_tracker_alloc12(&ref_dir, &tracker[12]);
alloctest_ref_tracker_alloc13(&ref_dir, &tracker[13]);
alloctest_ref_tracker_alloc14(&ref_dir, &tracker[14]);
alloctest_ref_tracker_alloc15(&ref_dir, &tracker[15]);
alloctest_ref_tracker_alloc16(&ref_dir, &tracker[16]);
alloctest_ref_tracker_alloc17(&ref_dir, &tracker[17]);
alloctest_ref_tracker_alloc18(&ref_dir, &tracker[18]);
alloctest_ref_tracker_alloc19(&ref_dir, &tracker[19]);
/* free all trackers but first 0 and 1. */
for (i = 2; i < ARRAY_SIZE(tracker); i++)
alloctest_ref_tracker_free(&ref_dir, &tracker[i]);
/* Attempt to free an already freed tracker. */
alloctest_ref_tracker_free(&ref_dir, &tracker[2]);
while (!atomic_read(&test_ref_timer_done))
msleep(1);
/* This should warn about tracker[0] & tracker[1] being not freed. */
ref_tracker_dir_exit(&ref_dir);
return 0;
}
static void __exit test_ref_tracker_exit(void)
{
}
module_init(test_ref_tracker_init);
module_exit(test_ref_tracker_exit);
MODULE_LICENSE("GPL v2");
| linux-master | lib/test_ref_tracker.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to interrupt-poll handling in the block layer. This
* is similar to NAPI for network devices.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/irq_poll.h>
#include <linux/delay.h>
static unsigned int irq_poll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
/**
* irq_poll_sched - Schedule a run of the iopoll handler
* @iop: The parent iopoll structure
*
* Description:
* Add this irq_poll structure to the pending poll list and trigger the
* raise of the blk iopoll softirq.
**/
void irq_poll_sched(struct irq_poll *iop)
{
unsigned long flags;
if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
return;
if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
return;
local_irq_save(flags);
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_sched);
/**
* __irq_poll_complete - Mark this @iop as un-polled again
* @iop: The parent iopoll structure
*
* Description:
* See irq_poll_complete(). This function must be called with interrupts
* disabled.
**/
static void __irq_poll_complete(struct irq_poll *iop)
{
list_del(&iop->list);
smp_mb__before_atomic();
clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
}
/**
* irq_poll_complete - Mark this @iop as un-polled again
* @iop: The parent iopoll structure
*
* Description:
* If a driver consumes less than the assigned budget in its run of the
* iopoll handler, it'll end the polled mode by calling this function. The
* iopoll handler will not be invoked again before irq_poll_sched()
* is called.
**/
void irq_poll_complete(struct irq_poll *iop)
{
unsigned long flags;
local_irq_save(flags);
__irq_poll_complete(iop);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_complete);
static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
unsigned long start_time = jiffies;
local_irq_disable();
while (!list_empty(list)) {
struct irq_poll *iop;
int work, weight;
/*
* If softirq window is exhausted then punt.
*/
if (budget <= 0 || time_after(jiffies, start_time)) {
rearm = 1;
break;
}
local_irq_enable();
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
iop = list_entry(list->next, struct irq_poll, list);
weight = iop->weight;
work = 0;
if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
work = iop->poll(iop, weight);
budget -= work;
local_irq_disable();
/*
* Drivers must not modify the iopoll state, if they
* consume their assigned weight (or more, some drivers can't
* easily just stop processing, they have to complete an
* entire mask of commands).In such cases this code
* still "owns" the iopoll instance and therefore can
* move the instance around on the list at-will.
*/
if (work >= weight) {
if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
__irq_poll_complete(iop);
else
list_move_tail(&iop->list, list);
}
}
if (rearm)
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
}
/**
* irq_poll_disable - Disable iopoll on this @iop
* @iop: The parent iopoll structure
*
* Description:
* Disable io polling and wait for any pending callbacks to have completed.
**/
void irq_poll_disable(struct irq_poll *iop)
{
set_bit(IRQ_POLL_F_DISABLE, &iop->state);
while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
msleep(1);
clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
}
EXPORT_SYMBOL(irq_poll_disable);
/**
* irq_poll_enable - Enable iopoll on this @iop
* @iop: The parent iopoll structure
*
* Description:
* Enable iopoll on this @iop. Note that the handler run will not be
* scheduled, it will only mark it as active.
**/
void irq_poll_enable(struct irq_poll *iop)
{
BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
smp_mb__before_atomic();
clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(irq_poll_enable);
/**
* irq_poll_init - Initialize this @iop
* @iop: The parent iopoll structure
* @weight: The default weight (or command completion budget)
* @poll_fn: The handler to invoke
*
* Description:
* Initialize and enable this irq_poll structure.
**/
void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
{
memset(iop, 0, sizeof(*iop));
INIT_LIST_HEAD(&iop->list);
iop->weight = weight;
iop->poll = poll_fn;
}
EXPORT_SYMBOL(irq_poll_init);
static int irq_poll_cpu_dead(unsigned int cpu)
{
/*
* If a CPU goes away, splice its entries to the current CPU and
* set the POLL softirq bit. The local_bh_disable()/enable() pair
* ensures that it is handled. Otherwise the current CPU could
* reach idle with the POLL softirq pending.
*/
local_bh_disable();
local_irq_disable();
list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
local_bh_enable();
return 0;
}
static __init int irq_poll_setup(void)
{
int i;
for_each_possible_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
irq_poll_cpu_dead);
return 0;
}
subsys_initcall(irq_poll_setup);
| linux-master | lib/irq_poll.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit test for the Kernel Linked-list structures.
*
* Copyright (C) 2019, Google LLC.
* Author: David Gow <[email protected]>
*/
#include <kunit/test.h>
#include <linux/list.h>
#include <linux/klist.h>
struct list_test_struct {
int data;
struct list_head list;
};
static void list_test_list_init(struct kunit *test)
{
/* Test the different ways of initialising a list. */
struct list_head list1 = LIST_HEAD_INIT(list1);
struct list_head list2;
LIST_HEAD(list3);
struct list_head *list4;
struct list_head *list5;
INIT_LIST_HEAD(&list2);
list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
INIT_LIST_HEAD(list4);
list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_LIST_HEAD(list5);
/* list_empty_careful() checks both next and prev. */
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list1));
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list3));
KUNIT_EXPECT_TRUE(test, list_empty_careful(list4));
KUNIT_EXPECT_TRUE(test, list_empty_careful(list5));
kfree(list4);
kfree(list5);
}
static void list_test_list_add(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add(&a, &list);
list_add(&b, &list);
/* should be [list] -> b -> a */
KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
}
static void list_test_list_add_tail(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* should be [list] -> a -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a);
KUNIT_EXPECT_PTR_EQ(test, a.prev, &list);
KUNIT_EXPECT_PTR_EQ(test, a.next, &b);
}
static void list_test_list_del(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* before: [list] -> a -> b */
list_del(&a);
/* now: [list] -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
}
static void list_test_list_replace(struct kunit *test)
{
struct list_head a_old, a_new, b;
LIST_HEAD(list);
list_add_tail(&a_old, &list);
list_add_tail(&b, &list);
/* before: [list] -> a_old -> b */
list_replace(&a_old, &a_new);
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
}
static void list_test_list_replace_init(struct kunit *test)
{
struct list_head a_old, a_new, b;
LIST_HEAD(list);
list_add_tail(&a_old, &list);
list_add_tail(&b, &list);
/* before: [list] -> a_old -> b */
list_replace_init(&a_old, &a_new);
/* now: [list] -> a_new -> b */
KUNIT_EXPECT_PTR_EQ(test, list.next, &a_new);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &a_new);
/* check a_old is empty (initialized) */
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a_old));
}
static void list_test_list_swap(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* before: [list] -> a -> b */
list_swap(&a, &b);
/* after: [list] -> b -> a */
KUNIT_EXPECT_PTR_EQ(test, &b, list.next);
KUNIT_EXPECT_PTR_EQ(test, &a, list.prev);
KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
KUNIT_EXPECT_PTR_EQ(test, &list, b.prev);
KUNIT_EXPECT_PTR_EQ(test, &list, a.next);
KUNIT_EXPECT_PTR_EQ(test, &b, a.prev);
}
static void list_test_list_del_init(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* before: [list] -> a -> b */
list_del_init(&a);
/* after: [list] -> b, a initialised */
KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
}
static void list_test_list_del_init_careful(struct kunit *test)
{
/* NOTE: This test only checks the behaviour of this function in
* isolation. It does not verify memory model guarantees.
*/
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* before: [list] -> a -> b */
list_del_init_careful(&a);
/* after: [list] -> b, a initialised */
KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
KUNIT_EXPECT_TRUE(test, list_empty_careful(&a));
}
static void list_test_list_move(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list1);
LIST_HEAD(list2);
list_add_tail(&a, &list1);
list_add_tail(&b, &list2);
/* before: [list1] -> a, [list2] -> b */
list_move(&a, &list2);
/* after: [list1] empty, [list2] -> a -> b */
KUNIT_EXPECT_TRUE(test, list_empty(&list1));
KUNIT_EXPECT_PTR_EQ(test, &a, list2.next);
KUNIT_EXPECT_PTR_EQ(test, &b, a.next);
}
static void list_test_list_move_tail(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list1);
LIST_HEAD(list2);
list_add_tail(&a, &list1);
list_add_tail(&b, &list2);
/* before: [list1] -> a, [list2] -> b */
list_move_tail(&a, &list2);
/* after: [list1] empty, [list2] -> b -> a */
KUNIT_EXPECT_TRUE(test, list_empty(&list1));
KUNIT_EXPECT_PTR_EQ(test, &b, list2.next);
KUNIT_EXPECT_PTR_EQ(test, &a, b.next);
}
static void list_test_list_bulk_move_tail(struct kunit *test)
{
struct list_head a, b, c, d, x, y;
struct list_head *list1_values[] = { &x, &b, &c, &y };
struct list_head *list2_values[] = { &a, &d };
struct list_head *ptr;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&x, &list1);
list_add_tail(&y, &list1);
list_add_tail(&a, &list2);
list_add_tail(&b, &list2);
list_add_tail(&c, &list2);
list_add_tail(&d, &list2);
/* before: [list1] -> x -> y, [list2] -> a -> b -> c -> d */
list_bulk_move_tail(&y, &b, &c);
/* after: [list1] -> x -> b -> c -> y, [list2] -> a -> d */
list_for_each(ptr, &list1) {
KUNIT_EXPECT_PTR_EQ(test, ptr, list1_values[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 4);
i = 0;
list_for_each(ptr, &list2) {
KUNIT_EXPECT_PTR_EQ(test, ptr, list2_values[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 2);
}
static void list_test_list_is_head(struct kunit *test)
{
struct list_head a, b, c;
/* Two lists: [a] -> b, [c] */
INIT_LIST_HEAD(&a);
INIT_LIST_HEAD(&c);
list_add_tail(&b, &a);
KUNIT_EXPECT_TRUE_MSG(test, list_is_head(&a, &a),
"Head element of same list");
KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &b),
"Non-head element of same list");
KUNIT_EXPECT_FALSE_MSG(test, list_is_head(&a, &c),
"Head element of different list");
}
static void list_test_list_is_first(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
KUNIT_EXPECT_TRUE(test, list_is_first(&a, &list));
KUNIT_EXPECT_FALSE(test, list_is_first(&b, &list));
}
static void list_test_list_is_last(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
KUNIT_EXPECT_FALSE(test, list_is_last(&a, &list));
KUNIT_EXPECT_TRUE(test, list_is_last(&b, &list));
}
static void list_test_list_empty(struct kunit *test)
{
struct list_head a;
LIST_HEAD(list1);
LIST_HEAD(list2);
list_add_tail(&a, &list1);
KUNIT_EXPECT_FALSE(test, list_empty(&list1));
KUNIT_EXPECT_TRUE(test, list_empty(&list2));
}
static void list_test_list_empty_careful(struct kunit *test)
{
/* This test doesn't check correctness under concurrent access */
struct list_head a;
LIST_HEAD(list1);
LIST_HEAD(list2);
list_add_tail(&a, &list1);
KUNIT_EXPECT_FALSE(test, list_empty_careful(&list1));
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
}
static void list_test_list_rotate_left(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
list_add_tail(&a, &list);
list_add_tail(&b, &list);
/* before: [list] -> a -> b */
list_rotate_left(&list);
/* after: [list] -> b -> a */
KUNIT_EXPECT_PTR_EQ(test, list.next, &b);
KUNIT_EXPECT_PTR_EQ(test, b.prev, &list);
KUNIT_EXPECT_PTR_EQ(test, b.next, &a);
}
static void list_test_list_rotate_to_front(struct kunit *test)
{
struct list_head a, b, c, d;
struct list_head *list_values[] = { &c, &d, &a, &b };
struct list_head *ptr;
LIST_HEAD(list);
int i = 0;
list_add_tail(&a, &list);
list_add_tail(&b, &list);
list_add_tail(&c, &list);
list_add_tail(&d, &list);
/* before: [list] -> a -> b -> c -> d */
list_rotate_to_front(&c, &list);
/* after: [list] -> c -> d -> a -> b */
list_for_each(ptr, &list) {
KUNIT_EXPECT_PTR_EQ(test, ptr, list_values[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 4);
}
static void list_test_list_is_singular(struct kunit *test)
{
struct list_head a, b;
LIST_HEAD(list);
/* [list] empty */
KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
list_add_tail(&a, &list);
/* [list] -> a */
KUNIT_EXPECT_TRUE(test, list_is_singular(&list));
list_add_tail(&b, &list);
/* [list] -> a -> b */
KUNIT_EXPECT_FALSE(test, list_is_singular(&list));
}
static void list_test_list_cut_position(struct kunit *test)
{
struct list_head entries[3], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list1);
/* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
list_cut_position(&list2, &list1, &entries[1]);
/* after: [list2] -> entries[0] -> entries[1], [list1] -> entries[2] */
list_for_each(cur, &list2) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 2);
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
}
static void list_test_list_cut_before(struct kunit *test)
{
struct list_head entries[3], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list1);
/* before: [list1] -> entries[0] -> entries[1] -> entries[2] */
list_cut_before(&list2, &list1, &entries[1]);
/* after: [list2] -> entries[0], [list1] -> entries[1] -> entries[2] */
list_for_each(cur, &list2) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 1);
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
}
static void list_test_list_splice(struct kunit *test)
{
struct list_head entries[5], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list2);
list_add_tail(&entries[3], &list2);
list_add_tail(&entries[4], &list1);
/* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
list_splice(&list2, &entries[1]);
/* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
}
static void list_test_list_splice_tail(struct kunit *test)
{
struct list_head entries[5], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list2);
list_add_tail(&entries[3], &list2);
list_add_tail(&entries[4], &list1);
/* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
list_splice_tail(&list2, &entries[4]);
/* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] uninit */
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
}
static void list_test_list_splice_init(struct kunit *test)
{
struct list_head entries[5], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list2);
list_add_tail(&entries[3], &list2);
list_add_tail(&entries[4], &list1);
/* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
list_splice_init(&list2, &entries[1]);
/* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
}
static void list_test_list_splice_tail_init(struct kunit *test)
{
struct list_head entries[5], *cur;
LIST_HEAD(list1);
LIST_HEAD(list2);
int i = 0;
list_add_tail(&entries[0], &list1);
list_add_tail(&entries[1], &list1);
list_add_tail(&entries[2], &list2);
list_add_tail(&entries[3], &list2);
list_add_tail(&entries[4], &list1);
/* before: [list1]->e[0]->e[1]->e[4], [list2]->e[2]->e[3] */
list_splice_tail_init(&list2, &entries[4]);
/* after: [list1]->e[0]->e[1]->e[2]->e[3]->e[4], [list2] empty */
list_for_each(cur, &list1) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
KUNIT_EXPECT_TRUE(test, list_empty_careful(&list2));
}
static void list_test_list_entry(struct kunit *test)
{
struct list_test_struct test_struct;
KUNIT_EXPECT_PTR_EQ(test, &test_struct, list_entry(&(test_struct.list),
struct list_test_struct, list));
}
static void list_test_list_entry_is_head(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2, test_struct3;
INIT_LIST_HEAD(&test_struct1.list);
INIT_LIST_HEAD(&test_struct3.list);
list_add_tail(&test_struct2.list, &test_struct1.list);
KUNIT_EXPECT_TRUE_MSG(test,
list_entry_is_head((&test_struct1), &test_struct1.list, list),
"Head element of same list");
KUNIT_EXPECT_FALSE_MSG(test,
list_entry_is_head((&test_struct2), &test_struct1.list, list),
"Non-head element of same list");
KUNIT_EXPECT_FALSE_MSG(test,
list_entry_is_head((&test_struct3), &test_struct1.list, list),
"Head element of different list");
}
static void list_test_list_first_entry(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
LIST_HEAD(list);
list_add_tail(&test_struct1.list, &list);
list_add_tail(&test_struct2.list, &list);
KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_first_entry(&list,
struct list_test_struct, list));
}
static void list_test_list_last_entry(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
LIST_HEAD(list);
list_add_tail(&test_struct1.list, &list);
list_add_tail(&test_struct2.list, &list);
KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_last_entry(&list,
struct list_test_struct, list));
}
static void list_test_list_first_entry_or_null(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
LIST_HEAD(list);
KUNIT_EXPECT_FALSE(test, list_first_entry_or_null(&list,
struct list_test_struct, list));
list_add_tail(&test_struct1.list, &list);
list_add_tail(&test_struct2.list, &list);
KUNIT_EXPECT_PTR_EQ(test, &test_struct1,
list_first_entry_or_null(&list,
struct list_test_struct, list));
}
static void list_test_list_next_entry(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
LIST_HEAD(list);
list_add_tail(&test_struct1.list, &list);
list_add_tail(&test_struct2.list, &list);
KUNIT_EXPECT_PTR_EQ(test, &test_struct2, list_next_entry(&test_struct1,
list));
}
static void list_test_list_prev_entry(struct kunit *test)
{
struct list_test_struct test_struct1, test_struct2;
LIST_HEAD(list);
list_add_tail(&test_struct1.list, &list);
list_add_tail(&test_struct2.list, &list);
KUNIT_EXPECT_PTR_EQ(test, &test_struct1, list_prev_entry(&test_struct2,
list));
}
static void list_test_list_for_each(struct kunit *test)
{
struct list_head entries[3], *cur;
LIST_HEAD(list);
int i = 0;
list_add_tail(&entries[0], &list);
list_add_tail(&entries[1], &list);
list_add_tail(&entries[2], &list);
list_for_each(cur, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 3);
}
static void list_test_list_for_each_prev(struct kunit *test)
{
struct list_head entries[3], *cur;
LIST_HEAD(list);
int i = 2;
list_add_tail(&entries[0], &list);
list_add_tail(&entries[1], &list);
list_add_tail(&entries[2], &list);
list_for_each_prev(cur, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void list_test_list_for_each_safe(struct kunit *test)
{
struct list_head entries[3], *cur, *n;
LIST_HEAD(list);
int i = 0;
list_add_tail(&entries[0], &list);
list_add_tail(&entries[1], &list);
list_add_tail(&entries[2], &list);
list_for_each_safe(cur, n, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
list_del(&entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 3);
KUNIT_EXPECT_TRUE(test, list_empty(&list));
}
static void list_test_list_for_each_prev_safe(struct kunit *test)
{
struct list_head entries[3], *cur, *n;
LIST_HEAD(list);
int i = 2;
list_add_tail(&entries[0], &list);
list_add_tail(&entries[1], &list);
list_add_tail(&entries[2], &list);
list_for_each_prev_safe(cur, n, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
list_del(&entries[i]);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
KUNIT_EXPECT_TRUE(test, list_empty(&list));
}
static void list_test_list_for_each_entry(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
entries[i].data = i;
list_add_tail(&entries[i].list, &list);
}
i = 0;
list_for_each_entry(cur, &list, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
}
static void list_test_list_for_each_entry_reverse(struct kunit *test)
{
struct list_test_struct entries[5], *cur;
LIST_HEAD(list);
int i = 0;
for (i = 0; i < 5; ++i) {
entries[i].data = i;
list_add_tail(&entries[i].list, &list);
}
i = 4;
list_for_each_entry_reverse(cur, &list, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static struct kunit_case list_test_cases[] = {
KUNIT_CASE(list_test_list_init),
KUNIT_CASE(list_test_list_add),
KUNIT_CASE(list_test_list_add_tail),
KUNIT_CASE(list_test_list_del),
KUNIT_CASE(list_test_list_replace),
KUNIT_CASE(list_test_list_replace_init),
KUNIT_CASE(list_test_list_swap),
KUNIT_CASE(list_test_list_del_init),
KUNIT_CASE(list_test_list_del_init_careful),
KUNIT_CASE(list_test_list_move),
KUNIT_CASE(list_test_list_move_tail),
KUNIT_CASE(list_test_list_bulk_move_tail),
KUNIT_CASE(list_test_list_is_head),
KUNIT_CASE(list_test_list_is_first),
KUNIT_CASE(list_test_list_is_last),
KUNIT_CASE(list_test_list_empty),
KUNIT_CASE(list_test_list_empty_careful),
KUNIT_CASE(list_test_list_rotate_left),
KUNIT_CASE(list_test_list_rotate_to_front),
KUNIT_CASE(list_test_list_is_singular),
KUNIT_CASE(list_test_list_cut_position),
KUNIT_CASE(list_test_list_cut_before),
KUNIT_CASE(list_test_list_splice),
KUNIT_CASE(list_test_list_splice_tail),
KUNIT_CASE(list_test_list_splice_init),
KUNIT_CASE(list_test_list_splice_tail_init),
KUNIT_CASE(list_test_list_entry),
KUNIT_CASE(list_test_list_entry_is_head),
KUNIT_CASE(list_test_list_first_entry),
KUNIT_CASE(list_test_list_last_entry),
KUNIT_CASE(list_test_list_first_entry_or_null),
KUNIT_CASE(list_test_list_next_entry),
KUNIT_CASE(list_test_list_prev_entry),
KUNIT_CASE(list_test_list_for_each),
KUNIT_CASE(list_test_list_for_each_prev),
KUNIT_CASE(list_test_list_for_each_safe),
KUNIT_CASE(list_test_list_for_each_prev_safe),
KUNIT_CASE(list_test_list_for_each_entry),
KUNIT_CASE(list_test_list_for_each_entry_reverse),
{},
};
static struct kunit_suite list_test_module = {
.name = "list-kunit-test",
.test_cases = list_test_cases,
};
struct hlist_test_struct {
int data;
struct hlist_node list;
};
static void hlist_test_init(struct kunit *test)
{
/* Test the different ways of initialising a list. */
struct hlist_head list1 = HLIST_HEAD_INIT;
struct hlist_head list2;
HLIST_HEAD(list3);
struct hlist_head *list4;
struct hlist_head *list5;
INIT_HLIST_HEAD(&list2);
list4 = kzalloc(sizeof(*list4), GFP_KERNEL | __GFP_NOFAIL);
INIT_HLIST_HEAD(list4);
list5 = kmalloc(sizeof(*list5), GFP_KERNEL | __GFP_NOFAIL);
memset(list5, 0xFF, sizeof(*list5));
INIT_HLIST_HEAD(list5);
KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
KUNIT_EXPECT_TRUE(test, hlist_empty(&list3));
KUNIT_EXPECT_TRUE(test, hlist_empty(list4));
KUNIT_EXPECT_TRUE(test, hlist_empty(list5));
kfree(list4);
kfree(list5);
}
static void hlist_test_unhashed(struct kunit *test)
{
struct hlist_node a;
HLIST_HEAD(list);
INIT_HLIST_NODE(&a);
/* is unhashed by default */
KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
hlist_add_head(&a, &list);
/* is hashed once added to list */
KUNIT_EXPECT_FALSE(test, hlist_unhashed(&a));
hlist_del_init(&a);
/* is again unhashed after del_init */
KUNIT_EXPECT_TRUE(test, hlist_unhashed(&a));
}
/* Doesn't test concurrency guarantees */
static void hlist_test_unhashed_lockless(struct kunit *test)
{
struct hlist_node a;
HLIST_HEAD(list);
INIT_HLIST_NODE(&a);
/* is unhashed by default */
KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
hlist_add_head(&a, &list);
/* is hashed once added to list */
KUNIT_EXPECT_FALSE(test, hlist_unhashed_lockless(&a));
hlist_del_init(&a);
/* is again unhashed after del_init */
KUNIT_EXPECT_TRUE(test, hlist_unhashed_lockless(&a));
}
static void hlist_test_del(struct kunit *test)
{
struct hlist_node a, b;
HLIST_HEAD(list);
hlist_add_head(&a, &list);
hlist_add_behind(&b, &a);
/* before: [list] -> a -> b */
hlist_del(&a);
/* now: [list] -> b */
KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
}
static void hlist_test_del_init(struct kunit *test)
{
struct hlist_node a, b;
HLIST_HEAD(list);
hlist_add_head(&a, &list);
hlist_add_behind(&b, &a);
/* before: [list] -> a -> b */
hlist_del_init(&a);
/* now: [list] -> b */
KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
KUNIT_EXPECT_PTR_EQ(test, b.pprev, &list.first);
/* a is now initialised */
KUNIT_EXPECT_PTR_EQ(test, a.next, NULL);
KUNIT_EXPECT_PTR_EQ(test, a.pprev, NULL);
}
/* Tests all three hlist_add_* functions */
static void hlist_test_add(struct kunit *test)
{
struct hlist_node a, b, c, d;
HLIST_HEAD(list);
hlist_add_head(&a, &list);
hlist_add_head(&b, &list);
hlist_add_before(&c, &a);
hlist_add_behind(&d, &a);
/* should be [list] -> b -> c -> a -> d */
KUNIT_EXPECT_PTR_EQ(test, list.first, &b);
KUNIT_EXPECT_PTR_EQ(test, c.pprev, &(b.next));
KUNIT_EXPECT_PTR_EQ(test, b.next, &c);
KUNIT_EXPECT_PTR_EQ(test, a.pprev, &(c.next));
KUNIT_EXPECT_PTR_EQ(test, c.next, &a);
KUNIT_EXPECT_PTR_EQ(test, d.pprev, &(a.next));
KUNIT_EXPECT_PTR_EQ(test, a.next, &d);
}
/* Tests both hlist_fake() and hlist_add_fake() */
static void hlist_test_fake(struct kunit *test)
{
struct hlist_node a;
INIT_HLIST_NODE(&a);
/* not fake after init */
KUNIT_EXPECT_FALSE(test, hlist_fake(&a));
hlist_add_fake(&a);
/* is now fake */
KUNIT_EXPECT_TRUE(test, hlist_fake(&a));
}
static void hlist_test_is_singular_node(struct kunit *test)
{
struct hlist_node a, b;
HLIST_HEAD(list);
INIT_HLIST_NODE(&a);
KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
hlist_add_head(&a, &list);
KUNIT_EXPECT_TRUE(test, hlist_is_singular_node(&a, &list));
hlist_add_head(&b, &list);
KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&a, &list));
KUNIT_EXPECT_FALSE(test, hlist_is_singular_node(&b, &list));
}
static void hlist_test_empty(struct kunit *test)
{
struct hlist_node a;
HLIST_HEAD(list);
/* list starts off empty */
KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
hlist_add_head(&a, &list);
/* list is no longer empty */
KUNIT_EXPECT_FALSE(test, hlist_empty(&list));
}
static void hlist_test_move_list(struct kunit *test)
{
struct hlist_node a;
HLIST_HEAD(list1);
HLIST_HEAD(list2);
hlist_add_head(&a, &list1);
KUNIT_EXPECT_FALSE(test, hlist_empty(&list1));
KUNIT_EXPECT_TRUE(test, hlist_empty(&list2));
hlist_move_list(&list1, &list2);
KUNIT_EXPECT_TRUE(test, hlist_empty(&list1));
KUNIT_EXPECT_FALSE(test, hlist_empty(&list2));
}
static void hlist_test_entry(struct kunit *test)
{
struct hlist_test_struct test_struct;
KUNIT_EXPECT_PTR_EQ(test, &test_struct,
hlist_entry(&(test_struct.list),
struct hlist_test_struct, list));
}
static void hlist_test_entry_safe(struct kunit *test)
{
struct hlist_test_struct test_struct;
KUNIT_EXPECT_PTR_EQ(test, &test_struct,
hlist_entry_safe(&(test_struct.list),
struct hlist_test_struct, list));
KUNIT_EXPECT_PTR_EQ(test, NULL,
hlist_entry_safe((struct hlist_node *)NULL,
struct hlist_test_struct, list));
}
static void hlist_test_for_each(struct kunit *test)
{
struct hlist_node entries[3], *cur;
HLIST_HEAD(list);
int i = 0;
hlist_add_head(&entries[0], &list);
hlist_add_behind(&entries[1], &entries[0]);
hlist_add_behind(&entries[2], &entries[1]);
hlist_for_each(cur, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 3);
}
static void hlist_test_for_each_safe(struct kunit *test)
{
struct hlist_node entries[3], *cur, *n;
HLIST_HEAD(list);
int i = 0;
hlist_add_head(&entries[0], &list);
hlist_add_behind(&entries[1], &entries[0]);
hlist_add_behind(&entries[2], &entries[1]);
hlist_for_each_safe(cur, n, &list) {
KUNIT_EXPECT_PTR_EQ(test, cur, &entries[i]);
hlist_del(&entries[i]);
i++;
}
KUNIT_EXPECT_EQ(test, i, 3);
KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
}
static void hlist_test_for_each_entry(struct kunit *test)
{
struct hlist_test_struct entries[5], *cur;
HLIST_HEAD(list);
int i = 0;
entries[0].data = 0;
hlist_add_head(&entries[0].list, &list);
for (i = 1; i < 5; ++i) {
entries[i].data = i;
hlist_add_behind(&entries[i].list, &entries[i-1].list);
}
i = 0;
hlist_for_each_entry(cur, &list, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
}
static void hlist_test_for_each_entry_continue(struct kunit *test)
{
struct hlist_test_struct entries[5], *cur;
HLIST_HEAD(list);
int i = 0;
entries[0].data = 0;
hlist_add_head(&entries[0].list, &list);
for (i = 1; i < 5; ++i) {
entries[i].data = i;
hlist_add_behind(&entries[i].list, &entries[i-1].list);
}
/* We skip the first (zero-th) entry. */
i = 1;
cur = &entries[0];
hlist_for_each_entry_continue(cur, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
/* Stamp over the entry. */
cur->data = 42;
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
/* The first entry was not visited. */
KUNIT_EXPECT_EQ(test, entries[0].data, 0);
/* The second (and presumably others), were. */
KUNIT_EXPECT_EQ(test, entries[1].data, 42);
}
static void hlist_test_for_each_entry_from(struct kunit *test)
{
struct hlist_test_struct entries[5], *cur;
HLIST_HEAD(list);
int i = 0;
entries[0].data = 0;
hlist_add_head(&entries[0].list, &list);
for (i = 1; i < 5; ++i) {
entries[i].data = i;
hlist_add_behind(&entries[i].list, &entries[i-1].list);
}
i = 0;
cur = &entries[0];
hlist_for_each_entry_from(cur, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
/* Stamp over the entry. */
cur->data = 42;
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
/* The first entry was visited. */
KUNIT_EXPECT_EQ(test, entries[0].data, 42);
}
static void hlist_test_for_each_entry_safe(struct kunit *test)
{
struct hlist_test_struct entries[5], *cur;
struct hlist_node *tmp_node;
HLIST_HEAD(list);
int i = 0;
entries[0].data = 0;
hlist_add_head(&entries[0].list, &list);
for (i = 1; i < 5; ++i) {
entries[i].data = i;
hlist_add_behind(&entries[i].list, &entries[i-1].list);
}
i = 0;
hlist_for_each_entry_safe(cur, tmp_node, &list, list) {
KUNIT_EXPECT_EQ(test, cur->data, i);
hlist_del(&cur->list);
i++;
}
KUNIT_EXPECT_EQ(test, i, 5);
KUNIT_EXPECT_TRUE(test, hlist_empty(&list));
}
static struct kunit_case hlist_test_cases[] = {
KUNIT_CASE(hlist_test_init),
KUNIT_CASE(hlist_test_unhashed),
KUNIT_CASE(hlist_test_unhashed_lockless),
KUNIT_CASE(hlist_test_del),
KUNIT_CASE(hlist_test_del_init),
KUNIT_CASE(hlist_test_add),
KUNIT_CASE(hlist_test_fake),
KUNIT_CASE(hlist_test_is_singular_node),
KUNIT_CASE(hlist_test_empty),
KUNIT_CASE(hlist_test_move_list),
KUNIT_CASE(hlist_test_entry),
KUNIT_CASE(hlist_test_entry_safe),
KUNIT_CASE(hlist_test_for_each),
KUNIT_CASE(hlist_test_for_each_safe),
KUNIT_CASE(hlist_test_for_each_entry),
KUNIT_CASE(hlist_test_for_each_entry_continue),
KUNIT_CASE(hlist_test_for_each_entry_from),
KUNIT_CASE(hlist_test_for_each_entry_safe),
{},
};
static struct kunit_suite hlist_test_module = {
.name = "hlist",
.test_cases = hlist_test_cases,
};
struct klist_test_struct {
int data;
struct klist klist;
struct klist_node klist_node;
};
static int node_count;
static struct klist_node *last_node;
static void check_node(struct klist_node *node_ptr)
{
node_count++;
last_node = node_ptr;
}
static void check_delete_node(struct klist_node *node_ptr)
{
node_count--;
last_node = node_ptr;
}
static void klist_test_add_tail(struct kunit *test)
{
struct klist_node a, b;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, NULL);
klist_add_tail(&a, &mylist);
KUNIT_EXPECT_EQ(test, node_count, 1);
KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
klist_add_tail(&b, &mylist);
KUNIT_EXPECT_EQ(test, node_count, 2);
KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
/* should be [list] -> a -> b */
klist_iter_init(&mylist, &i);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
static void klist_test_add_head(struct kunit *test)
{
struct klist_node a, b;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, NULL);
klist_add_head(&a, &mylist);
KUNIT_EXPECT_EQ(test, node_count, 1);
KUNIT_EXPECT_PTR_EQ(test, last_node, &a);
klist_add_head(&b, &mylist);
KUNIT_EXPECT_EQ(test, node_count, 2);
KUNIT_EXPECT_PTR_EQ(test, last_node, &b);
/* should be [list] -> b -> a */
klist_iter_init(&mylist, &i);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
static void klist_test_add_behind(struct kunit *test)
{
struct klist_node a, b, c, d;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, NULL);
klist_add_head(&a, &mylist);
klist_add_head(&b, &mylist);
klist_add_behind(&c, &a);
KUNIT_EXPECT_EQ(test, node_count, 3);
KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
klist_add_behind(&d, &b);
KUNIT_EXPECT_EQ(test, node_count, 4);
KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
klist_iter_init(&mylist, &i);
/* should be [list] -> b -> d -> a -> c*/
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
static void klist_test_add_before(struct kunit *test)
{
struct klist_node a, b, c, d;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, NULL);
klist_add_head(&a, &mylist);
klist_add_head(&b, &mylist);
klist_add_before(&c, &a);
KUNIT_EXPECT_EQ(test, node_count, 3);
KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
klist_add_before(&d, &b);
KUNIT_EXPECT_EQ(test, node_count, 4);
KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
klist_iter_init(&mylist, &i);
/* should be [list] -> b -> d -> a -> c*/
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
/*
* Verify that klist_del() delays the deletion of a node until there
* are no other references to it
*/
static void klist_test_del_refcount_greater_than_zero(struct kunit *test)
{
struct klist_node a, b, c, d;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, &check_delete_node);
/* Add nodes a,b,c,d to the list*/
klist_add_tail(&a, &mylist);
klist_add_tail(&b, &mylist);
klist_add_tail(&c, &mylist);
klist_add_tail(&d, &mylist);
klist_iter_init(&mylist, &i);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
/* Advance the iterator to point to node c*/
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &c);
/* Try to delete node c while there is a reference to it*/
klist_del(&c);
/*
* Verify that node c is still attached to the list even after being
* deleted. Since the iterator still points to c, the reference count is not
* decreased to 0
*/
KUNIT_EXPECT_TRUE(test, klist_node_attached(&c));
/* Check that node c has not been removed yet*/
KUNIT_EXPECT_EQ(test, node_count, 4);
KUNIT_EXPECT_PTR_EQ(test, last_node, &d);
klist_iter_exit(&i);
/*
* Since the iterator is no longer pointing to node c, node c is removed
* from the list
*/
KUNIT_EXPECT_EQ(test, node_count, 3);
KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
}
/*
* Verify that klist_del() deletes a node immediately when there are no
* other references to it.
*/
static void klist_test_del_refcount_zero(struct kunit *test)
{
struct klist_node a, b, c, d;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, &check_delete_node);
/* Add nodes a,b,c,d to the list*/
klist_add_tail(&a, &mylist);
klist_add_tail(&b, &mylist);
klist_add_tail(&c, &mylist);
klist_add_tail(&d, &mylist);
/* Delete node c*/
klist_del(&c);
/* Check that node c is deleted from the list*/
KUNIT_EXPECT_EQ(test, node_count, 3);
KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
/* Should be [list] -> a -> b -> d*/
klist_iter_init(&mylist, &i);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
static void klist_test_remove(struct kunit *test)
{
/* This test doesn't check correctness under concurrent access */
struct klist_node a, b, c, d;
struct klist mylist;
struct klist_iter i;
node_count = 0;
klist_init(&mylist, &check_node, &check_delete_node);
/* Add nodes a,b,c,d to the list*/
klist_add_tail(&a, &mylist);
klist_add_tail(&b, &mylist);
klist_add_tail(&c, &mylist);
klist_add_tail(&d, &mylist);
/* Delete node c*/
klist_remove(&c);
/* Check the nodes in the list*/
KUNIT_EXPECT_EQ(test, node_count, 3);
KUNIT_EXPECT_PTR_EQ(test, last_node, &c);
/* should be [list] -> a -> b -> d*/
klist_iter_init(&mylist, &i);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &a);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &b);
KUNIT_EXPECT_PTR_EQ(test, klist_next(&i), &d);
KUNIT_EXPECT_NULL(test, klist_next(&i));
klist_iter_exit(&i);
}
static void klist_test_node_attached(struct kunit *test)
{
struct klist_node a = {};
struct klist mylist;
klist_init(&mylist, NULL, NULL);
KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
klist_add_head(&a, &mylist);
KUNIT_EXPECT_TRUE(test, klist_node_attached(&a));
klist_del(&a);
KUNIT_EXPECT_FALSE(test, klist_node_attached(&a));
}
static struct kunit_case klist_test_cases[] = {
KUNIT_CASE(klist_test_add_tail),
KUNIT_CASE(klist_test_add_head),
KUNIT_CASE(klist_test_add_behind),
KUNIT_CASE(klist_test_add_before),
KUNIT_CASE(klist_test_del_refcount_greater_than_zero),
KUNIT_CASE(klist_test_del_refcount_zero),
KUNIT_CASE(klist_test_remove),
KUNIT_CASE(klist_test_node_attached),
{},
};
static struct kunit_suite klist_test_module = {
.name = "klist",
.test_cases = klist_test_cases,
};
kunit_test_suites(&list_test_module, &hlist_test_module, &klist_test_module);
MODULE_LICENSE("GPL v2");
| linux-master | lib/list-test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/timex.h>
#define __param(type, name, init, msg) \
static type name = init; \
module_param(name, type, 0444); \
MODULE_PARM_DESC(name, msg);
__param(int, nnodes, 100, "Number of nodes in the rb-tree");
__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
struct test_node {
u32 key;
struct rb_node rb;
/* following fields used for testing augmented rbtree functionality */
u32 val;
u32 augmented;
};
static struct rb_root_cached root = RB_ROOT_CACHED;
static struct test_node *nodes = NULL;
static struct rnd_state rnd;
static void insert(struct test_node *node, struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
u32 key = node->key;
while (*new) {
parent = *new;
if (key < rb_entry(parent, struct test_node, rb)->key)
new = &parent->rb_left;
else
new = &parent->rb_right;
}
rb_link_node(&node->rb, parent, new);
rb_insert_color(&node->rb, &root->rb_root);
}
static void insert_cached(struct test_node *node, struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *parent = NULL;
u32 key = node->key;
bool leftmost = true;
while (*new) {
parent = *new;
if (key < rb_entry(parent, struct test_node, rb)->key)
new = &parent->rb_left;
else {
new = &parent->rb_right;
leftmost = false;
}
}
rb_link_node(&node->rb, parent, new);
rb_insert_color_cached(&node->rb, root, leftmost);
}
static inline void erase(struct test_node *node, struct rb_root_cached *root)
{
rb_erase(&node->rb, &root->rb_root);
}
static inline void erase_cached(struct test_node *node, struct rb_root_cached *root)
{
rb_erase_cached(&node->rb, root);
}
#define NODE_VAL(node) ((node)->val)
RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
struct test_node, rb, u32, augmented, NODE_VAL)
static void insert_augmented(struct test_node *node,
struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
u32 key = node->key;
u32 val = node->val;
struct test_node *parent;
while (*new) {
rb_parent = *new;
parent = rb_entry(rb_parent, struct test_node, rb);
if (parent->augmented < val)
parent->augmented = val;
if (key < parent->key)
new = &parent->rb.rb_left;
else
new = &parent->rb.rb_right;
}
node->augmented = val;
rb_link_node(&node->rb, rb_parent, new);
rb_insert_augmented(&node->rb, &root->rb_root, &augment_callbacks);
}
static void insert_augmented_cached(struct test_node *node,
struct rb_root_cached *root)
{
struct rb_node **new = &root->rb_root.rb_node, *rb_parent = NULL;
u32 key = node->key;
u32 val = node->val;
struct test_node *parent;
bool leftmost = true;
while (*new) {
rb_parent = *new;
parent = rb_entry(rb_parent, struct test_node, rb);
if (parent->augmented < val)
parent->augmented = val;
if (key < parent->key)
new = &parent->rb.rb_left;
else {
new = &parent->rb.rb_right;
leftmost = false;
}
}
node->augmented = val;
rb_link_node(&node->rb, rb_parent, new);
rb_insert_augmented_cached(&node->rb, root,
leftmost, &augment_callbacks);
}
static void erase_augmented(struct test_node *node, struct rb_root_cached *root)
{
rb_erase_augmented(&node->rb, &root->rb_root, &augment_callbacks);
}
static void erase_augmented_cached(struct test_node *node,
struct rb_root_cached *root)
{
rb_erase_augmented_cached(&node->rb, root, &augment_callbacks);
}
static void init(void)
{
int i;
for (i = 0; i < nnodes; i++) {
nodes[i].key = prandom_u32_state(&rnd);
nodes[i].val = prandom_u32_state(&rnd);
}
}
static bool is_red(struct rb_node *rb)
{
return !(rb->__rb_parent_color & 1);
}
static int black_path_count(struct rb_node *rb)
{
int count;
for (count = 0; rb; rb = rb_parent(rb))
count += !is_red(rb);
return count;
}
static void check_postorder_foreach(int nr_nodes)
{
struct test_node *cur, *n;
int count = 0;
rbtree_postorder_for_each_entry_safe(cur, n, &root.rb_root, rb)
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check_postorder(int nr_nodes)
{
struct rb_node *rb;
int count = 0;
for (rb = rb_first_postorder(&root.rb_root); rb; rb = rb_next_postorder(rb))
count++;
WARN_ON_ONCE(count != nr_nodes);
}
static void check(int nr_nodes)
{
struct rb_node *rb;
int count = 0, blacks = 0;
u32 prev_key = 0;
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
WARN_ON_ONCE(node->key < prev_key);
WARN_ON_ONCE(is_red(rb) &&
(!rb_parent(rb) || is_red(rb_parent(rb))));
if (!count)
blacks = black_path_count(rb);
else
WARN_ON_ONCE((!rb->rb_left || !rb->rb_right) &&
blacks != black_path_count(rb));
prev_key = node->key;
count++;
}
WARN_ON_ONCE(count != nr_nodes);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1);
check_postorder(nr_nodes);
check_postorder_foreach(nr_nodes);
}
static void check_augmented(int nr_nodes)
{
struct rb_node *rb;
check(nr_nodes);
for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) {
struct test_node *node = rb_entry(rb, struct test_node, rb);
u32 subtree, max = node->val;
if (node->rb.rb_left) {
subtree = rb_entry(node->rb.rb_left, struct test_node,
rb)->augmented;
if (max < subtree)
max = subtree;
}
if (node->rb.rb_right) {
subtree = rb_entry(node->rb.rb_right, struct test_node,
rb)->augmented;
if (max < subtree)
max = subtree;
}
WARN_ON_ONCE(node->augmented != max);
}
}
static int __init rbtree_test_init(void)
{
int i, j;
cycles_t time1, time2, time;
struct rb_node *node;
nodes = kmalloc_array(nnodes, sizeof(*nodes), GFP_KERNEL);
if (!nodes)
return -ENOMEM;
printk(KERN_ALERT "rbtree testing");
prandom_seed_state(&rnd, 3141592653589793238ULL);
init();
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n",
(unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_cached(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_cached(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n",
(unsigned long long)time);
for (i = 0; i < nnodes; i++)
insert(nodes + i, &root);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (node = rb_first(&root.rb_root); node; node = rb_next(node))
;
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 3 (latency of inorder traversal): %llu cycles\n",
(unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++)
node = rb_first(&root.rb_root);
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 4 (latency to fetch first node)\n");
printk(" non-cached: %llu cycles\n", (unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++)
node = rb_first_cached(&root);
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" cached: %llu cycles\n", (unsigned long long)time);
for (i = 0; i < nnodes; i++)
erase(nodes + i, &root);
/* run checks */
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < nnodes; j++) {
check(j);
insert(nodes + j, &root);
}
for (j = 0; j < nnodes; j++) {
check(nnodes - j);
erase(nodes + j, &root);
}
check(0);
}
printk(KERN_ALERT "augmented rbtree testing");
init();
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_augmented(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_augmented(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
time1 = get_cycles();
for (i = 0; i < perf_loops; i++) {
for (j = 0; j < nnodes; j++)
insert_augmented_cached(nodes + j, &root);
for (j = 0; j < nnodes; j++)
erase_augmented_cached(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
time = div_u64(time, perf_loops);
printk(" -> test 2 (latency of nnodes cached insert+delete): %llu cycles\n", (unsigned long long)time);
for (i = 0; i < check_loops; i++) {
init();
for (j = 0; j < nnodes; j++) {
check_augmented(j);
insert_augmented(nodes + j, &root);
}
for (j = 0; j < nnodes; j++) {
check_augmented(nnodes - j);
erase_augmented(nodes + j, &root);
}
check_augmented(0);
}
kfree(nodes);
return -EAGAIN; /* Fail will directly unload the module */
}
static void __exit rbtree_test_exit(void)
{
printk(KERN_ALERT "test exit\n");
}
module_init(rbtree_test_init)
module_exit(rbtree_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michel Lespinasse");
MODULE_DESCRIPTION("Red Black Tree test");
| linux-master | lib/rbtree_test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* test_kprobes.c - simple sanity test for *probes
*
* Copyright IBM Corp. 2008
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <kunit/test.h>
#define div_factor 3
static u32 rand1, preh_val, posth_val;
static u32 (*target)(u32 value);
static u32 (*recursed_target)(u32 value);
static u32 (*target2)(u32 value);
static struct kunit *current_test;
static unsigned long (*internal_target)(void);
static unsigned long (*stacktrace_target)(void);
static unsigned long (*stacktrace_driver)(void);
static unsigned long target_return_address[2];
static noinline u32 kprobe_target(u32 value)
{
return (value / div_factor);
}
static noinline u32 kprobe_recursed_target(u32 value)
{
return (value / div_factor);
}
static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
preh_val = recursed_target(rand1);
return 0;
}
static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
u32 expval = recursed_target(rand1);
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, preh_val, expval);
posth_val = preh_val + div_factor;
}
static struct kprobe kp = {
.symbol_name = "kprobe_target",
.pre_handler = kp_pre_handler,
.post_handler = kp_post_handler
};
static void test_kprobe(struct kunit *test)
{
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
target(rand1);
unregister_kprobe(&kp);
KUNIT_EXPECT_NE(test, 0, preh_val);
KUNIT_EXPECT_NE(test, 0, posth_val);
}
static noinline u32 kprobe_target2(u32 value)
{
return (value / div_factor) + 1;
}
static noinline unsigned long kprobe_stacktrace_internal_target(void)
{
if (!target_return_address[0])
target_return_address[0] = (unsigned long)__builtin_return_address(0);
return target_return_address[0];
}
static noinline unsigned long kprobe_stacktrace_target(void)
{
if (!target_return_address[1])
target_return_address[1] = (unsigned long)__builtin_return_address(0);
if (internal_target)
internal_target();
return target_return_address[1];
}
static noinline unsigned long kprobe_stacktrace_driver(void)
{
if (stacktrace_target)
stacktrace_target();
/* This is for preventing inlining the function */
return (unsigned long)__builtin_return_address(0);
}
static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
{
preh_val = (rand1 / div_factor) + 1;
return 0;
}
static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
posth_val = preh_val + div_factor;
}
static struct kprobe kp2 = {
.symbol_name = "kprobe_target2",
.pre_handler = kp_pre_handler2,
.post_handler = kp_post_handler2
};
static void test_kprobes(struct kunit *test)
{
struct kprobe *kps[2] = {&kp, &kp2};
current_test = test;
/* addr and flags should be cleard for reusing kprobe. */
kp.addr = NULL;
kp.flags = 0;
KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
preh_val = 0;
posth_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, preh_val);
KUNIT_EXPECT_NE(test, 0, posth_val);
preh_val = 0;
posth_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, preh_val);
KUNIT_EXPECT_NE(test, 0, posth_val);
unregister_kprobes(kps, 2);
}
static struct kprobe kp_missed = {
.symbol_name = "kprobe_recursed_target",
.pre_handler = kp_pre_handler,
.post_handler = kp_post_handler,
};
static void test_kprobe_missed(struct kunit *test)
{
current_test = test;
preh_val = 0;
posth_val = 0;
KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
recursed_target(rand1);
KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
KUNIT_EXPECT_NE(test, 0, preh_val);
KUNIT_EXPECT_NE(test, 0, posth_val);
unregister_kprobe(&kp_missed);
}
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
krph_val = (rand1 / div_factor);
return 0;
}
static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
KUNIT_EXPECT_NE(current_test, krph_val, 0);
krph_val = rand1;
return 0;
}
static struct kretprobe rp = {
.handler = return_handler,
.entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target"
};
static void test_kretprobe(struct kunit *test)
{
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
target(rand1);
unregister_kretprobe(&rp);
KUNIT_EXPECT_EQ(test, krph_val, rand1);
}
static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
KUNIT_EXPECT_NE(current_test, krph_val, 0);
krph_val = rand1;
return 0;
}
static struct kretprobe rp2 = {
.handler = return_handler2,
.entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target2"
};
static void test_kretprobes(struct kunit *test)
{
struct kretprobe *rps[2] = {&rp, &rp2};
current_test = test;
/* addr and flags should be cleard for reusing kprobe. */
rp.kp.addr = NULL;
rp.kp.flags = 0;
KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
krph_val = 0;
target(rand1);
KUNIT_EXPECT_EQ(test, krph_val, rand1);
krph_val = 0;
target2(rand1);
KUNIT_EXPECT_EQ(test, krph_val, rand1);
unregister_kretprobes(rps, 2);
}
#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
#define STACK_BUF_SIZE 16
static unsigned long stack_buf[STACK_BUF_SIZE];
static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long retval = regs_return_value(regs);
int i, ret;
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
/*
* Test stacktrace inside the kretprobe handler, this will involves
* kretprobe trampoline, but must include correct return address
* of the target function.
*/
ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
KUNIT_EXPECT_NE(current_test, ret, 0);
for (i = 0; i < ret; i++) {
if (stack_buf[i] == target_return_address[1])
break;
}
KUNIT_EXPECT_NE(current_test, i, ret);
#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
/*
* Test stacktrace from pt_regs at the return address. Thus the stack
* trace must start from the target return address.
*/
ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
KUNIT_EXPECT_NE(current_test, ret, 0);
KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
#endif
return 0;
}
static struct kretprobe rp3 = {
.handler = stacktrace_return_handler,
.kp.symbol_name = "kprobe_stacktrace_target"
};
static void test_stacktrace_on_kretprobe(struct kunit *test)
{
unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
current_test = test;
rp3.kp.addr = NULL;
rp3.kp.flags = 0;
/*
* Run the stacktrace_driver() to record correct return address in
* stacktrace_target() and ensure stacktrace_driver() call is not
* inlined by checking the return address of stacktrace_driver()
* and the return address of this function is different.
*/
KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
unregister_kretprobe(&rp3);
}
static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long retval = regs_return_value(regs);
int i, ret;
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
/*
* Test stacktrace inside the kretprobe handler for nested case.
* The unwinder will find the kretprobe_trampoline address on the
* return address, and kretprobe must solve that.
*/
ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
KUNIT_EXPECT_NE(current_test, ret, 0);
for (i = 0; i < ret - 1; i++) {
if (stack_buf[i] == target_return_address[0]) {
KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
break;
}
}
KUNIT_EXPECT_NE(current_test, i, ret);
#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
/* Ditto for the regs version. */
ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
KUNIT_EXPECT_NE(current_test, ret, 0);
KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
#endif
return 0;
}
static struct kretprobe rp4 = {
.handler = stacktrace_internal_return_handler,
.kp.symbol_name = "kprobe_stacktrace_internal_target"
};
static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
{
unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
struct kretprobe *rps[2] = {&rp3, &rp4};
current_test = test;
rp3.kp.addr = NULL;
rp3.kp.flags = 0;
//KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
unregister_kretprobes(rps, 2);
}
#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
#endif /* CONFIG_KRETPROBES */
static int kprobes_test_init(struct kunit *test)
{
target = kprobe_target;
target2 = kprobe_target2;
recursed_target = kprobe_recursed_target;
stacktrace_target = kprobe_stacktrace_target;
internal_target = kprobe_stacktrace_internal_target;
stacktrace_driver = kprobe_stacktrace_driver;
rand1 = get_random_u32_above(div_factor);
return 0;
}
static struct kunit_case kprobes_testcases[] = {
KUNIT_CASE(test_kprobe),
KUNIT_CASE(test_kprobes),
KUNIT_CASE(test_kprobe_missed),
#ifdef CONFIG_KRETPROBES
KUNIT_CASE(test_kretprobe),
KUNIT_CASE(test_kretprobes),
#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
KUNIT_CASE(test_stacktrace_on_kretprobe),
KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
#endif
#endif
{}
};
static struct kunit_suite kprobes_test_suite = {
.name = "kprobes_test",
.init = kprobes_test_init,
.test_cases = kprobes_testcases,
};
kunit_test_suites(&kprobes_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/test_kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Fast batching percpu counters.
*/
#include <linux/percpu_counter.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/debugobjects.h>
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
static const struct debug_obj_descr percpu_counter_debug_descr;
static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
{
struct percpu_counter *fbc = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
percpu_counter_destroy(fbc);
debug_object_free(fbc, &percpu_counter_debug_descr);
return true;
default:
return false;
}
}
static const struct debug_obj_descr percpu_counter_debug_descr = {
.name = "percpu_counter",
.fixup_free = percpu_counter_fixup_free,
};
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{
debug_object_init(fbc, &percpu_counter_debug_descr);
debug_object_activate(fbc, &percpu_counter_debug_descr);
}
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{
debug_object_deactivate(fbc, &percpu_counter_debug_descr);
debug_object_free(fbc, &percpu_counter_debug_descr);
}
#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
{ }
static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
{ }
#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0;
}
fbc->count = amount;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_set);
/*
* local_irq_save() is needed to make the function irq safe:
* - The slow path would be ok as protected by an irq-safe spinlock.
* - this_cpu_add would be ok as it is irq-safe by definition.
* But:
* The decision slow path/fast path and the actual update must be atomic, too.
* Otherwise a call in process context could check the current values and
* decide that the fast path can be used. If now an interrupt occurs before
* the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
* then the this_cpu_add() that is executed after the interrupt has completed
* can produce values larger than "batch" or even overflows.
*/
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
unsigned long flags;
local_irq_save(flags);
count = __this_cpu_read(*fbc->counters) + amount;
if (abs(count) >= batch) {
raw_spin_lock(&fbc->lock);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count - amount);
raw_spin_unlock(&fbc->lock);
} else {
this_cpu_add(*fbc->counters, amount);
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* For percpu_counter with a big batch, the devication of its count could
* be big, and there is requirement to reduce the deviation, like when the
* counter's batch could be runtime decreased to get a better accuracy,
* which can be achieved by running this sync function on each CPU.
*/
void percpu_counter_sync(struct percpu_counter *fbc)
{
unsigned long flags;
s64 count;
raw_spin_lock_irqsave(&fbc->lock, flags);
count = __this_cpu_read(*fbc->counters);
fbc->count += count;
__this_cpu_sub(*fbc->counters, count);
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
EXPORT_SYMBOL(percpu_counter_sync);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive().
*
* We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
* from CPUs that are in the process of being taken offline. Dying cpus have
* been removed from the online mask, but may not have had the hotplug dead
* notifier called to fold the percpu count back into the global counter sum.
* By including dying CPUs in the iteration mask, we avoid this race condition
* so __percpu_counter_sum() just does the right thing when CPUs are being taken
* offline.
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
s64 ret;
int cpu;
unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
gfp_t gfp, u32 nr_counters,
struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
size_t counter_size;
s32 __percpu *counters;
u32 i;
counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
counters = __alloc_percpu_gfp(nr_counters * counter_size,
__alignof__(*counters), gfp);
if (!counters) {
fbc[0].counters = NULL;
return -ENOMEM;
}
for (i = 0; i < nr_counters; i++) {
raw_spin_lock_init(&fbc[i].lock);
lockdep_set_class(&fbc[i].lock, key);
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc[i].list);
#endif
fbc[i].count = amount;
fbc[i].counters = (void *)counters + (i * counter_size);
debug_percpu_counter_activate(&fbc[i]);
}
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
for (i = 0; i < nr_counters; i++)
list_add(&fbc[i].list, &percpu_counters);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
EXPORT_SYMBOL(__percpu_counter_init_many);
void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
{
unsigned long flags __maybe_unused;
u32 i;
if (WARN_ON_ONCE(!fbc))
return;
if (!fbc[0].counters)
return;
for (i = 0; i < nr_counters; i++)
debug_percpu_counter_deactivate(&fbc[i]);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
for (i = 0; i < nr_counters; i++)
list_del(&fbc[i].list);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
free_percpu(fbc[0].counters);
for (i = 0; i < nr_counters; i++)
fbc[i].counters = NULL;
}
EXPORT_SYMBOL(percpu_counter_destroy_many);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
static int compute_batch_value(unsigned int cpu)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
return 0;
}
static int percpu_counter_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
struct percpu_counter *fbc;
compute_batch_value(cpu);
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
raw_spin_lock(&fbc->lock);
pcount = per_cpu_ptr(fbc->counters, cpu);
fbc->count += *pcount;
*pcount = 0;
raw_spin_unlock(&fbc->lock);
}
spin_unlock_irq(&percpu_counters_lock);
#endif
return 0;
}
/*
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
if (abs(count - rhs) > (batch * num_online_cpus())) {
if (count > rhs)
return 1;
else
return -1;
}
/* Need to use precise count */
count = percpu_counter_sum(fbc);
if (count > rhs)
return 1;
else if (count < rhs)
return -1;
else
return 0;
}
EXPORT_SYMBOL(__percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
int ret;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
compute_batch_value, NULL);
WARN_ON(ret < 0);
ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
"lib/percpu_cnt:dead", NULL,
percpu_counter_cpu_dead);
WARN_ON(ret < 0);
return 0;
}
module_init(percpu_counter_startup);
| linux-master | lib/percpu_counter.c |
// SPDX-License-Identifier: GPL-2.0
const unsigned char __clz_tab[] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8,
};
| linux-master | lib/clz_tab.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/list_sort.h>
#include <linux/list.h>
/*
* Returns a list organized in an intermediate format suited
* to chaining of merge() calls: null-terminated, no reserved or
* sentinel head node, "prev" links not maintained.
*/
__attribute__((nonnull(2,3,4)))
static struct list_head *merge(void *priv, list_cmp_func_t cmp,
struct list_head *a, struct list_head *b)
{
struct list_head *head, **tail = &head;
for (;;) {
/* if equal, take 'a' -- important for sort stability */
if (cmp(priv, a, b) <= 0) {
*tail = a;
tail = &a->next;
a = a->next;
if (!a) {
*tail = b;
break;
}
} else {
*tail = b;
tail = &b->next;
b = b->next;
if (!b) {
*tail = a;
break;
}
}
}
return head;
}
/*
* Combine final list merge with restoration of standard doubly-linked
* list structure. This approach duplicates code from merge(), but
* runs faster than the tidier alternatives of either a separate final
* prev-link restoration pass, or maintaining the prev links
* throughout.
*/
__attribute__((nonnull(2,3,4,5)))
static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
struct list_head *a, struct list_head *b)
{
struct list_head *tail = head;
u8 count = 0;
for (;;) {
/* if equal, take 'a' -- important for sort stability */
if (cmp(priv, a, b) <= 0) {
tail->next = a;
a->prev = tail;
tail = a;
a = a->next;
if (!a)
break;
} else {
tail->next = b;
b->prev = tail;
tail = b;
b = b->next;
if (!b) {
b = a;
break;
}
}
}
/* Finish linking remainder of list b on to tail */
tail->next = b;
do {
/*
* If the merge is highly unbalanced (e.g. the input is
* already sorted), this loop may run many iterations.
* Continue callbacks to the client even though no
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
if (unlikely(!++count))
cmp(priv, b, b);
b->prev = tail;
tail = b;
b = b->next;
} while (b);
/* And the final links to make a circular doubly-linked list */
tail->next = head;
head->prev = tail;
}
/**
* list_sort - sort a list
* @priv: private data, opaque to list_sort(), passed to @cmp
* @head: the list to sort
* @cmp: the elements comparison function
*
* The comparison function @cmp must return > 0 if @a should sort after
* @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
* sort before @b *or* their original order should be preserved. It is
* always called with the element that came first in the input in @a,
* and list_sort is a stable sort, so it is not necessary to distinguish
* the @a < @b and @a == @b cases.
*
* This is compatible with two styles of @cmp function:
* - The traditional style which returns <0 / =0 / >0, or
* - Returning a boolean 0/1.
* The latter offers a chance to save a few cycles in the comparison
* (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
*
* A good way to write a multi-word comparison is::
*
* if (a->high != b->high)
* return a->high > b->high;
* if (a->middle != b->middle)
* return a->middle > b->middle;
* return a->low > b->low;
*
*
* This mergesort is as eager as possible while always performing at least
* 2:1 balanced merges. Given two pending sublists of size 2^k, they are
* merged to a size-2^(k+1) list as soon as we have 2^k following elements.
*
* Thus, it will avoid cache thrashing as long as 3*2^k elements can
* fit into the cache. Not quite as good as a fully-eager bottom-up
* mergesort, but it does use 0.2*n fewer comparisons, so is faster in
* the common case that everything fits into L1.
*
*
* The merging is controlled by "count", the number of elements in the
* pending lists. This is beautifully simple code, but rather subtle.
*
* Each time we increment "count", we set one bit (bit k) and clear
* bits k-1 .. 0. Each time this happens (except the very first time
* for each bit, when count increments to 2^k), we merge two lists of
* size 2^k into one list of size 2^(k+1).
*
* This merge happens exactly when the count reaches an odd multiple of
* 2^k, which is when we have 2^k elements pending in smaller lists,
* so it's safe to merge away two lists of size 2^k.
*
* After this happens twice, we have created two lists of size 2^(k+1),
* which will be merged into a list of size 2^(k+2) before we create
* a third list of size 2^(k+1), so there are never more than two pending.
*
* The number of pending lists of size 2^k is determined by the
* state of bit k of "count" plus two extra pieces of information:
*
* - The state of bit k-1 (when k == 0, consider bit -1 always set), and
* - Whether the higher-order bits are zero or non-zero (i.e.
* is count >= 2^(k+1)).
*
* There are six states we distinguish. "x" represents some arbitrary
* bits, and "y" represents some arbitrary non-zero bits:
* 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
* 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
* 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
* 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
* 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
* 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
* (merge and loop back to state 2)
*
* We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
* bit k-1 is set while the more significant bits are non-zero) and
* merge them away in the 5->2 transition. Note in particular that just
* before the 5->2 transition, all lower-order bits are 11 (state 3),
* so there is one list of each smaller size.
*
* When we reach the end of the input, we merge all the pending
* lists, from smallest to largest. If you work through cases 2 to
* 5 above, you can see that the number of elements we merge with a list
* of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
* 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
*/
__attribute__((nonnull(2,3)))
void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
{
struct list_head *list = head->next, *pending = NULL;
size_t count = 0; /* Count of pending */
if (list == head->prev) /* Zero or one elements */
return;
/* Convert to a null-terminated singly-linked list. */
head->prev->next = NULL;
/*
* Data structure invariants:
* - All lists are singly linked and null-terminated; prev
* pointers are not maintained.
* - pending is a prev-linked "list of lists" of sorted
* sublists awaiting further merging.
* - Each of the sorted sublists is power-of-two in size.
* - Sublists are sorted by size and age, smallest & newest at front.
* - There are zero to two sublists of each size.
* - A pair of pending sublists are merged as soon as the number
* of following pending elements equals their size (i.e.
* each time count reaches an odd multiple of that size).
* That ensures each later final merge will be at worst 2:1.
* - Each round consists of:
* - Merging the two sublists selected by the highest bit
* which flips when count is incremented, and
* - Adding an element from the input as a size-1 sublist.
*/
do {
size_t bits;
struct list_head **tail = &pending;
/* Find the least-significant clear bit in count */
for (bits = count; bits & 1; bits >>= 1)
tail = &(*tail)->prev;
/* Do the indicated merge */
if (likely(bits)) {
struct list_head *a = *tail, *b = a->prev;
a = merge(priv, cmp, b, a);
/* Install the merged result in place of the inputs */
a->prev = b->prev;
*tail = a;
}
/* Move one element from input list to pending */
list->prev = pending;
pending = list;
list = list->next;
pending->next = NULL;
count++;
} while (list);
/* End of input; merge together all the pending lists. */
list = pending;
pending = pending->prev;
for (;;) {
struct list_head *next = pending->prev;
if (!next)
break;
list = merge(priv, cmp, pending, list);
pending = next;
}
/* The final merge, rebuilding prev links */
merge_final(priv, cmp, head, pending, list);
}
EXPORT_SYMBOL(list_sort);
| linux-master | lib/list_sort.c |
/*
* Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
* cleaned up code to current version of sparse and added the slicing-by-8
* algorithm to the closely similar existing slicing-by-4 algorithm.
*
* Oct 15, 2000 Matt Domsch <[email protected]>
* Nicer crc32 functions/docs submitted by [email protected]. Thanks!
* Code was from the public domain, copyright abandoned. Code was
* subsequently included in the kernel, thus was re-licensed under the
* GNU GPL v2.
*
* Oct 12, 2000 Matt Domsch <[email protected]>
* Same crc32 function was used in 5 other places in the kernel.
* I made one version, and deleted the others.
* There are various incantations of crc32(). Some use a seed of 0 or ~0.
* Some xor at the end with ~0. The generic crc32() function takes
* seed as an argument, and doesn't xor at the end. Then individual
* users can do whatever they need.
* drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
* fs/jffs2 uses seed 0, doesn't xor with ~0.
* fs/partitions/efi.c uses seed ~0, xor's with ~0.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/crc32.h>
#include <linux/module.h>
#include <linux/sched.h>
#include "crc32defs.h"
/* 4096 random bytes */
static u8 const __aligned(8) test_buf[] __initconst =
{
0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
};
/* 100 test cases */
static struct crc_test {
u32 crc; /* random starting crc */
u32 start; /* random 6 bit offset in buf */
u32 length; /* random 11 bit length of test */
u32 crc_le; /* expected crc32_le result */
u32 crc_be; /* expected crc32_be result */
u32 crc32c_le; /* expected crc32c_le result */
} const test[] __initconst =
{
{0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
{0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
{0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
{0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
{0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
{0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
{0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
{0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
{0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
{0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
{0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
{0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
{0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
{0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
{0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
{0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
{0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
{0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
{0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
{0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
{0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
{0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
{0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
{0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
{0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
{0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
{0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
{0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
{0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
{0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
{0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
{0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
{0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
{0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
{0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
{0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
{0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
{0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
{0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
{0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
{0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
{0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
{0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
{0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
{0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
{0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
{0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
{0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
{0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
{0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
{0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
{0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
{0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
{0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
{0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
{0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
{0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
{0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
{0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
{0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
{0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
{0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
{0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
{0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
{0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
{0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
{0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
{0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
{0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
{0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
{0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
{0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
{0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
{0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
{0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
{0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
{0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
{0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
{0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
{0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
{0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
{0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
{0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
{0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
{0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
{0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
{0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
{0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
{0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
{0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
{0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
{0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
{0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
{0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
{0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
{0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
{0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
{0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
{0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
{0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
};
#include <linux/time.h>
static int __init crc32c_test(void)
{
int i;
int errors = 0;
int bytes = 0;
u64 nsec;
unsigned long flags;
/* keep static to prevent cache warming code from
* getting eliminated by the compiler */
static u32 crc;
/* pre-warm the cache */
for (i = 0; i < 100; i++) {
bytes += test[i].length;
crc ^= __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length);
}
/* reduce OS noise */
local_irq_save(flags);
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
test[i].start, test[i].length))
errors++;
}
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
if (errors)
pr_warn("crc32c: %d self tests failed\n", errors);
else {
pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
bytes, nsec);
}
return 0;
}
static int __init crc32c_combine_test(void)
{
int i, j;
int errors = 0, runs = 0;
for (i = 0; i < 10; i++) {
u32 crc_full;
crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
test[i].length);
for (j = 0; j <= test[i].length; ++j) {
u32 crc1, crc2;
u32 len1 = j, len2 = test[i].length - j;
crc1 = __crc32c_le(test[i].crc, test_buf +
test[i].start, len1);
crc2 = __crc32c_le(0, test_buf + test[i].start +
len1, len2);
if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
crc_full == test[i].crc32c_le))
errors++;
runs++;
cond_resched();
}
}
if (errors)
pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
else
pr_info("crc32c_combine: %d self tests passed\n", runs);
return 0;
}
static int __init crc32_test(void)
{
int i;
int errors = 0;
int bytes = 0;
u64 nsec;
unsigned long flags;
/* keep static to prevent cache warming code from
* getting eliminated by the compiler */
static u32 crc;
/* pre-warm the cache */
for (i = 0; i < 100; i++) {
bytes += 2*test[i].length;
crc ^= crc32_le(test[i].crc, test_buf +
test[i].start, test[i].length);
crc ^= crc32_be(test[i].crc, test_buf +
test[i].start, test[i].length);
}
/* reduce OS noise */
local_irq_save(flags);
nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
test[i].start, test[i].length))
errors++;
if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
test[i].start, test[i].length))
errors++;
}
nsec = ktime_get_ns() - nsec;
local_irq_restore(flags);
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
if (errors)
pr_warn("crc32: %d self tests failed\n", errors);
else {
pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
bytes, nsec);
}
return 0;
}
static int __init crc32_combine_test(void)
{
int i, j;
int errors = 0, runs = 0;
for (i = 0; i < 10; i++) {
u32 crc_full;
crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
test[i].length);
for (j = 0; j <= test[i].length; ++j) {
u32 crc1, crc2;
u32 len1 = j, len2 = test[i].length - j;
crc1 = crc32_le(test[i].crc, test_buf +
test[i].start, len1);
crc2 = crc32_le(0, test_buf + test[i].start +
len1, len2);
if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
crc_full == test[i].crc_le))
errors++;
runs++;
cond_resched();
}
}
if (errors)
pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
else
pr_info("crc32_combine: %d self tests passed\n", runs);
return 0;
}
static int __init crc32test_init(void)
{
crc32_test();
crc32c_test();
crc32_combine_test();
crc32c_combine_test();
return 0;
}
static void __exit crc32_exit(void)
{
}
module_init(crc32test_init);
module_exit(crc32_exit);
MODULE_AUTHOR("Matt Domsch <[email protected]>");
MODULE_DESCRIPTION("CRC32 selftest");
MODULE_LICENSE("GPL");
| linux-master | lib/crc32test.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "notifier-error-inject.h"
static int priority;
module_param(priority, int, 0);
MODULE_PARM_DESC(priority, "specify netdevice notifier priority");
static struct notifier_err_inject netdev_notifier_err_inject = {
.actions = {
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_REGISTER) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEMTU) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGENAME) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_UP) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRE_TYPE_CHANGE) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_POST_INIT) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEMTU) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_PRECHANGEUPPER) },
{ NOTIFIER_ERR_INJECT_ACTION(NETDEV_CHANGEUPPER) },
{}
}
};
static struct dentry *dir;
static int netdev_err_inject_init(void)
{
int err;
dir = notifier_err_inject_init("netdev", notifier_err_inject_dir,
&netdev_notifier_err_inject, priority);
if (IS_ERR(dir))
return PTR_ERR(dir);
err = register_netdevice_notifier(&netdev_notifier_err_inject.nb);
if (err)
debugfs_remove_recursive(dir);
return err;
}
static void netdev_err_inject_exit(void)
{
unregister_netdevice_notifier(&netdev_notifier_err_inject.nb);
debugfs_remove_recursive(dir);
}
module_init(netdev_err_inject_init);
module_exit(netdev_err_inject_exit);
MODULE_DESCRIPTION("Netdevice notifier error injection module");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nikolay Aleksandrov <[email protected]>");
| linux-master | lib/netdev-notifier-error-inject.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Test for find_*_bit functions.
*
* Copyright (c) 2017 Cavium.
*/
/*
* find_bit functions are widely used in kernel, so the successful boot
* is good enough test for correctness.
*
* This test is focused on performance of traversing bitmaps. Two typical
* scenarios are reproduced:
* - randomly filled bitmap with approximately equal number of set and
* cleared bits;
* - sparse bitmap with few set bits at random positions.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/random.h>
#define BITMAP_LEN (4096UL * 8 * 10)
#define SPARSE 500
static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata;
/*
* This is Schlemiel the Painter's algorithm. It should be called after
* all other tests for the same bitmap because it sets all bits of bitmap to 1.
*/
static int __init test_find_first_bit(void *bitmap, unsigned long len)
{
unsigned long i, cnt;
ktime_t time;
time = ktime_get();
for (cnt = i = 0; i < len; cnt++) {
i = find_first_bit(bitmap, len);
__clear_bit(i, bitmap);
}
time = ktime_get() - time;
pr_err("find_first_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_first_and_bit(void *bitmap, const void *bitmap2, unsigned long len)
{
static DECLARE_BITMAP(cp, BITMAP_LEN) __initdata;
unsigned long i, cnt;
ktime_t time;
bitmap_copy(cp, bitmap, BITMAP_LEN);
time = ktime_get();
for (cnt = i = 0; i < len; cnt++) {
i = find_first_and_bit(cp, bitmap2, len);
__clear_bit(i, cp);
}
time = ktime_get() - time;
pr_err("find_first_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
ktime_t time;
time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
time = ktime_get() - time;
pr_err("find_next_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
{
unsigned long i, cnt;
ktime_t time;
time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
i = find_next_zero_bit(bitmap, len, i) + 1;
time = ktime_get() - time;
pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_last_bit(const void *bitmap, unsigned long len)
{
unsigned long l, cnt = 0;
ktime_t time;
time = ktime_get();
do {
cnt++;
l = find_last_bit(bitmap, len);
if (l >= len)
break;
len = l;
} while (len);
time = ktime_get() - time;
pr_err("find_last_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init test_find_nth_bit(const unsigned long *bitmap, unsigned long len)
{
unsigned long l, n, w = bitmap_weight(bitmap, len);
ktime_t time;
time = ktime_get();
for (n = 0; n < w; n++) {
l = find_nth_bit(bitmap, len, n);
WARN_ON(l >= len);
}
time = ktime_get() - time;
pr_err("find_nth_bit: %18llu ns, %6ld iterations\n", time, w);
return 0;
}
static int __init test_find_next_and_bit(const void *bitmap,
const void *bitmap2, unsigned long len)
{
unsigned long i, cnt;
ktime_t time;
time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
time = ktime_get() - time;
pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
static int __init find_bit_test(void)
{
unsigned long nbits = BITMAP_LEN / SPARSE;
pr_err("\nStart testing find_bit() with random-filled bitmap\n");
get_random_bytes(bitmap, sizeof(bitmap));
get_random_bytes(bitmap2, sizeof(bitmap2));
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_nth_bit(bitmap, BITMAP_LEN / 10);
/*
* test_find_first_bit() may take some time, so
* traverse only part of bitmap to avoid soft lockup.
*/
test_find_first_bit(bitmap, BITMAP_LEN / 10);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN / 2);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
pr_err("\nStart testing find_bit() with sparse bitmap\n");
bitmap_zero(bitmap, BITMAP_LEN);
bitmap_zero(bitmap2, BITMAP_LEN);
while (nbits--) {
__set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
__set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
}
test_find_next_bit(bitmap, BITMAP_LEN);
test_find_next_zero_bit(bitmap, BITMAP_LEN);
test_find_last_bit(bitmap, BITMAP_LEN);
test_find_nth_bit(bitmap, BITMAP_LEN);
test_find_first_bit(bitmap, BITMAP_LEN);
test_find_first_and_bit(bitmap, bitmap2, BITMAP_LEN);
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
/*
* Everything is OK. Return error just to let user run benchmark
* again without annoying rmmod.
*/
return -EINVAL;
}
module_init(find_bit_test);
MODULE_LICENSE("GPL");
| linux-master | lib/find_bit_benchmark.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/io.h>
#include <linux/export.h>
/**
* check_signature - find BIOS signatures
* @io_addr: mmio address to check
* @signature: signature block
* @length: length of signature
*
* Perform a signature comparison with the mmio address io_addr. This
* address should have been obtained by ioremap.
* Returns 1 on a match.
*/
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length)
{
while (length--) {
if (readb(io_addr) != *signature)
return 0;
io_addr++;
signature++;
}
return 1;
}
EXPORT_SYMBOL(check_signature);
| linux-master | lib/check_signature.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/vsprintf.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
/*
* Wirzenius wrote this portably, Torvalds fucked it up :-)
*/
/*
* Fri Jul 13 2001 Crutcher Dunnavant <[email protected]>
* - changed to provide snprintf and vsnprintf functions
* So Feb 1 16:51:32 CET 2004 Juergen Quade <[email protected]>
* - scnprintf and vscnprintf
*/
#include <linux/stdarg.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/errname.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/rtc.h>
#include <linux/sprintf.h>
#include <linux/time.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
#include <linux/siphash.h>
#include <linux/compiler.h>
#include <linux/property.h>
#include <linux/notifier.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
#include "../mm/internal.h" /* For the trace_print_flags arrays */
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/byteorder.h> /* cpu_to_le16 */
#include <asm/unaligned.h>
#include <linux/string_helpers.h>
#include "kstrtox.h"
/* Disable pointer hashing if requested */
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
{
const char *cp;
unsigned long long result = 0ULL;
size_t prefix_chars;
unsigned int rv;
cp = _parse_integer_fixup_radix(startp, &base);
prefix_chars = cp - startp;
if (prefix_chars < max_chars) {
rv = _parse_integer_limit(cp, base, &result, max_chars - prefix_chars);
/* FIXME */
cp += (rv & ~KSTRTOX_OVERFLOW);
} else {
/* Field too short for prefix + digit, skip over without converting */
cp = startp + max_chars;
}
if (endp)
*endp = (char *)cp;
return result;
}
/**
* simple_strtoull - convert a string to an unsigned long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoull instead.
*/
noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
return simple_strntoull(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoull);
/**
* simple_strtoul - convert a string to an unsigned long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoul instead.
*/
unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
{
return simple_strtoull(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtoul);
/**
* simple_strtol - convert a string to a signed long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtol instead.
*/
long simple_strtol(const char *cp, char **endp, unsigned int base)
{
if (*cp == '-')
return -simple_strtoul(cp + 1, endp, base);
return simple_strtoul(cp, endp, base);
}
EXPORT_SYMBOL(simple_strtol);
static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
unsigned int base)
{
/*
* simple_strntoull() safely handles receiving max_chars==0 in the
* case cp[0] == '-' && max_chars == 1.
* If max_chars == 0 we can drop through and pass it to simple_strntoull()
* and the content of *cp is irrelevant.
*/
if (*cp == '-' && max_chars > 0)
return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
return simple_strntoull(cp, max_chars, endp, base);
}
/**
* simple_strtoll - convert a string to a signed long long
* @cp: The start of the string
* @endp: A pointer to the end of the parsed string will be placed here
* @base: The number base to use
*
* This function has caveats. Please use kstrtoll instead.
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
return simple_strntoll(cp, INT_MAX, endp, base);
}
EXPORT_SYMBOL(simple_strtoll);
static noinline_for_stack
int skip_atoi(const char **s)
{
int i = 0;
do {
i = i*10 + *((*s)++) - '0';
} while (isdigit(**s));
return i;
}
/*
* Decimal conversion is by far the most typical, and is used for
* /proc and /sys data. This directly impacts e.g. top performance
* with many processes running. We optimize it for speed by emitting
* two characters at a time, using a 200 byte lookup table. This
* roughly halves the number of multiplications compared to computing
* the digits one at a time. Implementation strongly inspired by the
* previous version, which in turn used ideas described at
* <http://www.cs.uiowa.edu/~jones/bcd/divide.html> (with permission
* from the author, Douglas W. Jones).
*
* It turns out there is precisely one 26 bit fixed-point
* approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32
* holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual
* range happens to be somewhat larger (x <= 1073741898), but that's
* irrelevant for our purpose.
*
* For dividing a number in the range [10^4, 10^6-1] by 100, we still
* need a 32x32->64 bit multiply, so we simply use the same constant.
*
* For dividing a number in the range [100, 10^4-1] by 100, there are
* several options. The simplest is (x * 0x147b) >> 19, which is valid
* for all x <= 43698.
*/
static const u16 decpair[100] = {
#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030)
_( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9),
_(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19),
_(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29),
_(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39),
_(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49),
_(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59),
_(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69),
_(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79),
_(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89),
_(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99),
#undef _
};
/*
* This will print a single '0' even if r == 0, since we would
* immediately jump to out_r where two 0s would be written but only
* one of them accounted for in buf. This is needed by ip4_string
* below. All other callers pass a non-zero value of r.
*/
static noinline_for_stack
char *put_dec_trunc8(char *buf, unsigned r)
{
unsigned q;
/* 1 <= r < 10^8 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 1 <= q < 10^6 */
if (q < 100)
goto out_q;
/* 100 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 1 <= r < 10^4 */
if (r < 100)
goto out_r;
/* 100 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
out_q:
/* 1 <= q < 100 */
r = q;
out_r:
/* 1 <= r < 100 */
*((u16 *)buf) = decpair[r];
buf += r < 10 ? 1 : 2;
return buf;
}
#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64
static noinline_for_stack
char *put_dec_full8(char *buf, unsigned r)
{
unsigned q;
/* 0 <= r < 10^8 */
q = (r * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 10^6 */
r = (q * (u64)0x28f5c29) >> 32;
*((u16 *)buf) = decpair[q - 100*r];
buf += 2;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
buf += 2;
return buf;
}
static noinline_for_stack
char *put_dec(char *buf, unsigned long long n)
{
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n <= 1.6e11 */
if (n >= 100*1000*1000)
buf = put_dec_full8(buf, do_div(n, 100*1000*1000));
/* 1 <= n < 1e8 */
return put_dec_trunc8(buf, n);
}
#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64
static void
put_dec_full4(char *buf, unsigned r)
{
unsigned q;
/* 0 <= r < 10^4 */
q = (r * 0x147b) >> 19;
*((u16 *)buf) = decpair[r - 100*q];
buf += 2;
/* 0 <= q < 100 */
*((u16 *)buf) = decpair[q];
}
/*
* Call put_dec_full4 on x % 10000, return x / 10000.
* The approximation x/10000 == (x * 0x346DC5D7) >> 43
* holds for all x < 1,128,869,999. The largest value this
* helper will ever be asked to convert is 1,125,520,955.
* (second call in the put_dec code, assuming n is all-ones).
*/
static noinline_for_stack
unsigned put_dec_helper4(char *buf, unsigned x)
{
uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43;
put_dec_full4(buf, x - q * 10000);
return q;
}
/* Based on code by Douglas W. Jones found at
* <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
* (with permission from the author).
* Performs no 64-bit division and hence should be fast on 32-bit machines.
*/
static
char *put_dec(char *buf, unsigned long long n)
{
uint32_t d3, d2, d1, q, h;
if (n < 100*1000*1000)
return put_dec_trunc8(buf, n);
d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
h = (n >> 32);
d2 = (h ) & 0xffff;
d3 = (h >> 16); /* implicit "& 0xffff" */
/* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0
= 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */
q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
q = put_dec_helper4(buf, q);
q += 7671 * d3 + 9496 * d2 + 6 * d1;
q = put_dec_helper4(buf+4, q);
q += 4749 * d3 + 42 * d2;
q = put_dec_helper4(buf+8, q);
q += 281 * d3;
buf += 12;
if (q)
buf = put_dec_trunc8(buf, q);
else while (buf[-1] == '0')
--buf;
return buf;
}
#endif
/*
* Convert passed number to decimal string.
* Returns the length of string. On buffer overflow, returns 0.
*
* If speed is not important, use snprintf(). It's easy to read the code.
*/
int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[sizeof(num) * 3] __aligned(2);
int idx, len;
/* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
if (num <= 9) {
tmp[0] = '0' + num;
len = 1;
} else {
len = put_dec(tmp, num) - tmp;
}
if (len > size || width > size)
return 0;
if (width > len) {
width = width - len;
for (idx = 0; idx < width; idx++)
buf[idx] = ' ';
} else {
width = 0;
}
for (idx = 0; idx < len; ++idx)
buf[idx + width] = tmp[len - idx - 1];
return len + width;
}
#define SIGN 1 /* unsigned/signed, must be 1 */
#define LEFT 2 /* left justified */
#define PLUS 4 /* show plus */
#define SPACE 8 /* space if plus */
#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */
#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */
#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */
static_assert(SIGN == 1);
static_assert(ZEROPAD == ('0' - ' '));
static_assert(SMALL == ('a' ^ 'A'));
enum format_type {
FORMAT_TYPE_NONE, /* Just a string part */
FORMAT_TYPE_WIDTH,
FORMAT_TYPE_PRECISION,
FORMAT_TYPE_CHAR,
FORMAT_TYPE_STR,
FORMAT_TYPE_PTR,
FORMAT_TYPE_PERCENT_CHAR,
FORMAT_TYPE_INVALID,
FORMAT_TYPE_LONG_LONG,
FORMAT_TYPE_ULONG,
FORMAT_TYPE_LONG,
FORMAT_TYPE_UBYTE,
FORMAT_TYPE_BYTE,
FORMAT_TYPE_USHORT,
FORMAT_TYPE_SHORT,
FORMAT_TYPE_UINT,
FORMAT_TYPE_INT,
FORMAT_TYPE_SIZE_T,
FORMAT_TYPE_PTRDIFF
};
struct printf_spec {
unsigned int type:8; /* format_type enum */
signed int field_width:24; /* width of output field */
unsigned int flags:8; /* flags to number() */
unsigned int base:8; /* number base, 8, 10 or 16 only */
signed int precision:16; /* # of digits/chars */
} __packed;
static_assert(sizeof(struct printf_spec) == 8);
#define FIELD_WIDTH_MAX ((1 << 23) - 1)
#define PRECISION_MAX ((1 << 15) - 1)
static noinline_for_stack
char *number(char *buf, char *end, unsigned long long num,
struct printf_spec spec)
{
/* put_dec requires 2-byte alignment of the buffer. */
char tmp[3 * sizeof(num)] __aligned(2);
char sign;
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
int i;
bool is_zero = num == 0LL;
int field_width = spec.field_width;
int precision = spec.precision;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
locase = (spec.flags & SMALL);
if (spec.flags & LEFT)
spec.flags &= ~ZEROPAD;
sign = 0;
if (spec.flags & SIGN) {
if ((signed long long)num < 0) {
sign = '-';
num = -(signed long long)num;
field_width--;
} else if (spec.flags & PLUS) {
sign = '+';
field_width--;
} else if (spec.flags & SPACE) {
sign = ' ';
field_width--;
}
}
if (need_pfx) {
if (spec.base == 16)
field_width -= 2;
else if (!is_zero)
field_width--;
}
/* generate full string in tmp[], in reverse order */
i = 0;
if (num < spec.base)
tmp[i++] = hex_asc_upper[num] | locase;
else if (spec.base != 10) { /* 8 or 16 */
int mask = spec.base - 1;
int shift = 3;
if (spec.base == 16)
shift = 4;
do {
tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase);
num >>= shift;
} while (num);
} else { /* base 10 */
i = put_dec(tmp, num) - tmp;
}
/* printing 100 using %2d gives "100", not "00" */
if (i > precision)
precision = i;
/* leading space padding */
field_width -= precision;
if (!(spec.flags & (ZEROPAD | LEFT))) {
while (--field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
}
}
/* sign */
if (sign) {
if (buf < end)
*buf = sign;
++buf;
}
/* "0x" / "0" prefix */
if (need_pfx) {
if (spec.base == 16 || !is_zero) {
if (buf < end)
*buf = '0';
++buf;
}
if (spec.base == 16) {
if (buf < end)
*buf = ('X' | locase);
++buf;
}
}
/* zero or space padding */
if (!(spec.flags & LEFT)) {
char c = ' ' + (spec.flags & ZEROPAD);
while (--field_width >= 0) {
if (buf < end)
*buf = c;
++buf;
}
}
/* hmm even more zero padding? */
while (i <= --precision) {
if (buf < end)
*buf = '0';
++buf;
}
/* actual digits of result */
while (--i >= 0) {
if (buf < end)
*buf = tmp[i];
++buf;
}
/* trailing space padding */
while (--field_width >= 0) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
static noinline_for_stack
char *special_hex_number(char *buf, char *end, unsigned long long num, int size)
{
struct printf_spec spec;
spec.type = FORMAT_TYPE_PTR;
spec.field_width = 2 + 2 * size; /* 0x + hex */
spec.flags = SPECIAL | SMALL | ZEROPAD;
spec.base = 16;
spec.precision = -1;
return number(buf, end, num, spec);
}
static void move_right(char *buf, char *end, unsigned len, unsigned spaces)
{
size_t size;
if (buf >= end) /* nowhere to put anything */
return;
size = end - buf;
if (size <= spaces) {
memset(buf, ' ', size);
return;
}
if (len) {
if (len > size - spaces)
len = size - spaces;
memmove(buf + spaces, buf, len);
}
memset(buf, ' ', spaces);
}
/*
* Handle field width padding for a string.
* @buf: current buffer position
* @n: length of string
* @end: end of output buffer
* @spec: for field width and flags
* Returns: new buffer position after padding.
*/
static noinline_for_stack
char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
{
unsigned spaces;
if (likely(n >= spec.field_width))
return buf;
/* we want to pad the sucker */
spaces = spec.field_width - n;
if (!(spec.flags & LEFT)) {
move_right(buf - n, end, n, spaces);
return buf + spaces;
}
while (spaces--) {
if (buf < end)
*buf = ' ';
++buf;
}
return buf;
}
/* Handle string from a well known address. */
static char *string_nocheck(char *buf, char *end, const char *s,
struct printf_spec spec)
{
int len = 0;
int lim = spec.precision;
while (lim--) {
char c = *s++;
if (!c)
break;
if (buf < end)
*buf = c;
++buf;
++len;
}
return widen_string(buf, len, end, spec);
}
static char *err_ptr(char *buf, char *end, void *ptr,
struct printf_spec spec)
{
int err = PTR_ERR(ptr);
const char *sym = errname(err);
if (sym)
return string_nocheck(buf, end, sym, spec);
/*
* Somebody passed ERR_PTR(-1234) or some other non-existing
* Efoo - or perhaps CONFIG_SYMBOLIC_ERRNAME=n. Fall back to
* printing it as its decimal representation.
*/
spec.flags |= SIGN;
spec.base = 10;
return number(buf, end, err, spec);
}
/* Be careful: error messages must fit into the given buffer. */
static char *error_string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
/*
* Hard limit to avoid a completely insane messages. It actually
* works pretty well because most error messages are in
* the many pointer format modifiers.
*/
if (spec.precision == -1)
spec.precision = 2 * sizeof(void *);
return string_nocheck(buf, end, s, spec);
}
/*
* Do not call any complex external code here. Nested printk()/vsprintf()
* might cause infinite loops. Failures might break printk() and would
* be hard to debug.
*/
static const char *check_pointer_msg(const void *ptr)
{
if (!ptr)
return "(null)";
if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
return "(efault)";
return NULL;
}
static int check_pointer(char **buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *err_msg;
err_msg = check_pointer_msg(ptr);
if (err_msg) {
*buf = error_string(*buf, end, err_msg, spec);
return -EFAULT;
}
return 0;
}
static noinline_for_stack
char *string(char *buf, char *end, const char *s,
struct printf_spec spec)
{
if (check_pointer(&buf, end, s, spec))
return buf;
return string_nocheck(buf, end, s, spec);
}
static char *pointer_string(char *buf, char *end,
const void *ptr,
struct printf_spec spec)
{
spec.base = 16;
spec.flags |= SMALL;
if (spec.field_width == -1) {
spec.field_width = 2 * sizeof(ptr);
spec.flags |= ZEROPAD;
}
return number(buf, end, (unsigned long int)ptr, spec);
}
/* Make pointers available for printing early in the boot sequence. */
static int debug_boot_weak_hash __ro_after_init;
static int __init debug_boot_weak_hash_enable(char *str)
{
debug_boot_weak_hash = 1;
pr_info("debug_boot_weak_hash enabled\n");
return 0;
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
static bool filled_random_ptr_key __read_mostly;
static siphash_key_t ptr_key __read_mostly;
static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Pairs with smp_rmb() before reading ptr_key. */
smp_wmb();
WRITE_ONCE(filled_random_ptr_key, true);
return NOTIFY_DONE;
}
static int __init vsprintf_init_hashval(void)
{
static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
execute_with_initialized_rng(&fill_ptr_key_nb);
return 0;
}
subsys_initcall(vsprintf_init_hashval)
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
unsigned long hashval;
if (!READ_ONCE(filled_random_ptr_key))
return -EBUSY;
/* Pairs with smp_wmb() after writing ptr_key. */
smp_rmb();
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
/*
* Mask off the first 32 bits, this makes explicit that we have
* modified the address (and 32 bits is plenty for a unique ID).
*/
hashval = hashval & 0xffffffff;
#else
hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
#endif
*hashval_out = hashval;
return 0;
}
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
return __ptr_to_hashval(ptr, hashval_out);
}
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
int ret;
/*
* Print the real pointer value for NULL and error pointers,
* as they are not actual addresses.
*/
if (IS_ERR_OR_NULL(ptr))
return pointer_string(buf, end, ptr, spec);
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
hashval = hash_long((unsigned long)ptr, 32);
return pointer_string(buf, end, (const void *)hashval, spec);
}
ret = __ptr_to_hashval(ptr, &hashval);
if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
return pointer_string(buf, end, (const void *)hashval, spec);
}
static char *default_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
/*
* default is to _not_ leak addresses, so hash before printing,
* unless no_hash_pointers is specified on the command line.
*/
if (unlikely(no_hash_pointers))
return pointer_string(buf, end, ptr, spec);
return ptr_to_id(buf, end, ptr, spec);
}
int kptr_restrict __read_mostly;
static noinline_for_stack
char *restricted_pointer(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
switch (kptr_restrict) {
case 0:
/* Handle as %p, hash and do _not_ leak addresses. */
return default_pointer(buf, end, ptr, spec);
case 1: {
const struct cred *cred;
/*
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
if (in_hardirq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);
}
/*
* Only print the real pointer value if the current
* process has CAP_SYSLOG and is running with the
* same credentials it started with. This is because
* access to files is checked at open() time, but %pK
* checks permission at read() time. We don't want to
* leak pointer values if a binary opens a file using
* %pK and then elevates privileges before reading it.
*/
cred = current_cred();
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid))
ptr = NULL;
break;
}
case 2:
default:
/* Always print 0's for %pK */
ptr = NULL;
break;
}
return pointer_string(buf, end, ptr, spec);
}
static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
const char *array[4], *s;
const struct dentry *p;
int depth;
int i, n;
switch (fmt[1]) {
case '2': case '3': case '4':
depth = fmt[1] - '0';
break;
default:
depth = 1;
}
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
if (check_pointer(&buf, end, d, spec)) {
rcu_read_unlock();
return buf;
}
p = READ_ONCE(d->d_parent);
array[i] = READ_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
i++;
break;
}
}
s = array[--i];
for (n = 0; n != spec.precision; n++, buf++) {
char c = *s++;
if (!c) {
if (!i)
break;
c = '/';
s = array[--i];
}
if (buf < end)
*buf = c;
}
rcu_read_unlock();
return widen_string(buf, n, end, spec);
}
static noinline_for_stack
char *file_dentry_name(char *buf, char *end, const struct file *f,
struct printf_spec spec, const char *fmt)
{
if (check_pointer(&buf, end, f, spec))
return buf;
return dentry_name(buf, end, f->f_path.dentry, spec, fmt);
}
#ifdef CONFIG_BLOCK
static noinline_for_stack
char *bdev_name(char *buf, char *end, struct block_device *bdev,
struct printf_spec spec, const char *fmt)
{
struct gendisk *hd;
if (check_pointer(&buf, end, bdev, spec))
return buf;
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
if (bdev->bd_partno) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
buf = number(buf, end, bdev->bd_partno, spec);
}
return buf;
}
#endif
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
struct printf_spec spec, const char *fmt)
{
unsigned long value;
#ifdef CONFIG_KALLSYMS
char sym[KSYM_SYMBOL_LEN];
#endif
if (fmt[1] == 'R')
ptr = __builtin_extract_return_addr(ptr);
value = (unsigned long)ptr;
#ifdef CONFIG_KALLSYMS
if (*fmt == 'B' && fmt[1] == 'b')
sprint_backtrace_build_id(sym, value);
else if (*fmt == 'B')
sprint_backtrace(sym, value);
else if (*fmt == 'S' && (fmt[1] == 'b' || (fmt[1] == 'R' && fmt[2] == 'b')))
sprint_symbol_build_id(sym, value);
else if (*fmt != 's')
sprint_symbol(sym, value);
else
sprint_symbol_no_offset(sym, value);
return string_nocheck(buf, end, sym, spec);
#else
return special_hex_number(buf, end, value, sizeof(void *));
#endif
}
static const struct printf_spec default_str_spec = {
.field_width = -1,
.precision = -1,
};
static const struct printf_spec default_flag_spec = {
.base = 16,
.precision = -1,
.flags = SPECIAL | SMALL,
};
static const struct printf_spec default_dec_spec = {
.base = 10,
.precision = -1,
};
static const struct printf_spec default_dec02_spec = {
.base = 10,
.field_width = 2,
.precision = -1,
.flags = ZEROPAD,
};
static const struct printf_spec default_dec04_spec = {
.base = 10,
.field_width = 4,
.precision = -1,
.flags = ZEROPAD,
};
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
{
#ifndef IO_RSRC_PRINTK_SIZE
#define IO_RSRC_PRINTK_SIZE 6
#endif
#ifndef MEM_RSRC_PRINTK_SIZE
#define MEM_RSRC_PRINTK_SIZE 10
#endif
static const struct printf_spec io_spec = {
.base = 16,
.field_width = IO_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec mem_spec = {
.base = 16,
.field_width = MEM_RSRC_PRINTK_SIZE,
.precision = -1,
.flags = SPECIAL | SMALL | ZEROPAD,
};
static const struct printf_spec bus_spec = {
.base = 16,
.field_width = 2,
.precision = -1,
.flags = SMALL | ZEROPAD,
};
static const struct printf_spec str_spec = {
.field_width = -1,
.precision = 10,
.flags = LEFT,
};
/* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
* 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]")
#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
char *p = sym, *pend = sym + sizeof(sym);
int decode = (fmt[0] == 'R') ? 1 : 0;
const struct printf_spec *specp;
if (check_pointer(&buf, end, res, spec))
return buf;
*p++ = '[';
if (res->flags & IORESOURCE_IO) {
p = string_nocheck(p, pend, "io ", str_spec);
specp = &io_spec;
} else if (res->flags & IORESOURCE_MEM) {
p = string_nocheck(p, pend, "mem ", str_spec);
specp = &mem_spec;
} else if (res->flags & IORESOURCE_IRQ) {
p = string_nocheck(p, pend, "irq ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_DMA) {
p = string_nocheck(p, pend, "dma ", str_spec);
specp = &default_dec_spec;
} else if (res->flags & IORESOURCE_BUS) {
p = string_nocheck(p, pend, "bus ", str_spec);
specp = &bus_spec;
} else {
p = string_nocheck(p, pend, "??? ", str_spec);
specp = &mem_spec;
decode = 0;
}
if (decode && res->flags & IORESOURCE_UNSET) {
p = string_nocheck(p, pend, "size ", str_spec);
p = number(p, pend, resource_size(res), *specp);
} else {
p = number(p, pend, res->start, *specp);
if (res->start != res->end) {
*p++ = '-';
p = number(p, pend, res->end, *specp);
}
}
if (decode) {
if (res->flags & IORESOURCE_MEM_64)
p = string_nocheck(p, pend, " 64bit", str_spec);
if (res->flags & IORESOURCE_PREFETCH)
p = string_nocheck(p, pend, " pref", str_spec);
if (res->flags & IORESOURCE_WINDOW)
p = string_nocheck(p, pend, " window", str_spec);
if (res->flags & IORESOURCE_DISABLED)
p = string_nocheck(p, pend, " disabled", str_spec);
} else {
p = string_nocheck(p, pend, " flags ", str_spec);
p = number(p, pend, res->flags, default_flag_spec);
}
*p++ = ']';
*p = '\0';
return string_nocheck(buf, end, sym, spec);
}
static noinline_for_stack
char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
int i, len = 1; /* if we pass '%ph[CDN]', field width remains
negative value, fallback to the default */
char separator;
if (spec.field_width == 0)
/* nothing to print */
return buf;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'C':
separator = ':';
break;
case 'D':
separator = '-';
break;
case 'N':
separator = 0;
break;
default:
separator = ' ';
break;
}
if (spec.field_width > 0)
len = min_t(int, spec.field_width, 64);
for (i = 0; i < len; ++i) {
if (buf < end)
*buf = hex_asc_hi(addr[i]);
++buf;
if (buf < end)
*buf = hex_asc_lo(addr[i]);
++buf;
if (separator && i != len - 1) {
if (buf < end)
*buf = separator;
++buf;
}
}
return buf;
}
static noinline_for_stack
char *bitmap_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
const int CHUNKSZ = 32;
int nr_bits = max_t(int, spec.field_width, 0);
int i, chunksz;
bool first = true;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
/* reused to print numbers */
spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
chunksz = nr_bits & (CHUNKSZ - 1);
if (chunksz == 0)
chunksz = CHUNKSZ;
i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ;
for (; i >= 0; i -= CHUNKSZ) {
u32 chunkmask, val;
int word, bit;
chunkmask = ((1ULL << chunksz) - 1);
word = i / BITS_PER_LONG;
bit = i % BITS_PER_LONG;
val = (bitmap[word] >> bit) & chunkmask;
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
spec.field_width = DIV_ROUND_UP(chunksz, 4);
buf = number(buf, end, val, spec);
chunksz = CHUNKSZ;
}
return buf;
}
static noinline_for_stack
char *bitmap_list_string(char *buf, char *end, const unsigned long *bitmap,
struct printf_spec spec, const char *fmt)
{
int nr_bits = max_t(int, spec.field_width, 0);
bool first = true;
int rbot, rtop;
if (check_pointer(&buf, end, bitmap, spec))
return buf;
for_each_set_bitrange(rbot, rtop, bitmap, nr_bits) {
if (!first) {
if (buf < end)
*buf = ',';
buf++;
}
first = false;
buf = number(buf, end, rbot, default_dec_spec);
if (rtop == rbot + 1)
continue;
if (buf < end)
*buf = '-';
buf = number(++buf, end, rtop - 1, default_dec_spec);
}
return buf;
}
static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
char *p = mac_addr;
int i;
char separator;
bool reversed = false;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
separator = '-';
break;
case 'R':
reversed = true;
fallthrough;
default:
separator = ':';
break;
}
for (i = 0; i < 6; i++) {
if (reversed)
p = hex_byte_pack(p, addr[5 - i]);
else
p = hex_byte_pack(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
*p++ = separator;
}
*p = '\0';
return string_nocheck(buf, end, mac_addr, spec);
}
static noinline_for_stack
char *ip4_string(char *p, const u8 *addr, const char *fmt)
{
int i;
bool leading_zeros = (fmt[0] == 'i');
int index;
int step;
switch (fmt[2]) {
case 'h':
#ifdef __BIG_ENDIAN
index = 0;
step = 1;
#else
index = 3;
step = -1;
#endif
break;
case 'l':
index = 3;
step = -1;
break;
case 'n':
case 'b':
default:
index = 0;
step = 1;
break;
}
for (i = 0; i < 4; i++) {
char temp[4] __aligned(2); /* hold each IP quad in reverse order */
int digits = put_dec_trunc8(temp, addr[index]) - temp;
if (leading_zeros) {
if (digits < 3)
*p++ = '0';
if (digits < 2)
*p++ = '0';
}
/* reverse the digits in the quad */
while (digits--)
*p++ = temp[digits];
if (i < 3)
*p++ = '.';
index += step;
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_compressed_string(char *p, const char *addr)
{
int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
u16 word;
u8 hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
memcpy(&in6, addr, sizeof(struct in6_addr));
useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
memset(zerolength, 0, sizeof(zerolength));
if (useIPv4)
range = 6;
else
range = 8;
/* find position of longest 0 run */
for (i = 0; i < range; i++) {
for (j = i; j < range; j++) {
if (in6.s6_addr16[j] != 0)
break;
zerolength[i]++;
}
}
for (i = 0; i < range; i++) {
if (zerolength[i] > longest) {
longest = zerolength[i];
colonpos = i;
}
}
if (longest == 1) /* don't compress a single 0 */
colonpos = -1;
/* emit address */
for (i = 0; i < range; i++) {
if (i == colonpos) {
if (needcolon || i == 0)
*p++ = ':';
*p++ = ':';
needcolon = false;
i += longest - 1;
continue;
}
if (needcolon) {
*p++ = ':';
needcolon = false;
}
/* hex u16 without leading 0s */
word = ntohs(in6.s6_addr16[i]);
hi = word >> 8;
lo = word & 0xff;
if (hi) {
if (hi > 0x0f)
p = hex_byte_pack(p, hi);
else
*p++ = hex_asc_lo(hi);
p = hex_byte_pack(p, lo);
}
else if (lo > 0x0f)
p = hex_byte_pack(p, lo);
else
*p++ = hex_asc_lo(lo);
needcolon = true;
}
if (useIPv4) {
if (needcolon)
*p++ = ':';
p = ip4_string(p, &in6.s6_addr[12], "I4");
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_string(char *p, const char *addr, const char *fmt)
{
int i;
for (i = 0; i < 8; i++) {
p = hex_byte_pack(p, *addr++);
p = hex_byte_pack(p, *addr++);
if (fmt[0] == 'I' && i != 7)
*p++ = ':';
}
*p = '\0';
return p;
}
static noinline_for_stack
char *ip6_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
if (fmt[0] == 'I' && fmt[2] == 'c')
ip6_compressed_string(ip6_addr, addr);
else
ip6_string(ip6_addr, addr, fmt);
return string_nocheck(buf, end, ip6_addr, spec);
}
static noinline_for_stack
char *ip4_addr_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char ip4_addr[sizeof("255.255.255.255")];
ip4_string(ip4_addr, addr, fmt);
return string_nocheck(buf, end, ip4_addr, spec);
}
static noinline_for_stack
char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false, have_s = false, have_f = false, have_c = false;
char ip6_addr[sizeof("[xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255]") +
sizeof(":12345") + sizeof("/123456789") +
sizeof("%1234567890")];
char *p = ip6_addr, *pend = ip6_addr + sizeof(ip6_addr);
const u8 *addr = (const u8 *) &sa->sin6_addr;
char fmt6[2] = { fmt[0], '6' };
u8 off = 0;
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'f':
have_f = true;
break;
case 's':
have_s = true;
break;
case 'c':
have_c = true;
break;
}
}
if (have_p || have_s || have_f) {
*p = '[';
off = 1;
}
if (fmt6[0] == 'I' && have_c)
p = ip6_compressed_string(ip6_addr + off, addr);
else
p = ip6_string(ip6_addr + off, addr, fmt6);
if (have_p || have_s || have_f)
*p++ = ']';
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin6_port), spec);
}
if (have_f) {
*p++ = '/';
p = number(p, pend, ntohl(sa->sin6_flowinfo &
IPV6_FLOWINFO_MASK), spec);
}
if (have_s) {
*p++ = '%';
p = number(p, pend, sa->sin6_scope_id, spec);
}
*p = '\0';
return string_nocheck(buf, end, ip6_addr, spec);
}
static noinline_for_stack
char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
struct printf_spec spec, const char *fmt)
{
bool have_p = false;
char *p, ip4_addr[sizeof("255.255.255.255") + sizeof(":12345")];
char *pend = ip4_addr + sizeof(ip4_addr);
const u8 *addr = (const u8 *) &sa->sin_addr.s_addr;
char fmt4[3] = { fmt[0], '4', 0 };
fmt++;
while (isalpha(*++fmt)) {
switch (*fmt) {
case 'p':
have_p = true;
break;
case 'h':
case 'l':
case 'n':
case 'b':
fmt4[2] = *fmt;
break;
}
}
p = ip4_string(ip4_addr, addr, fmt4);
if (have_p) {
*p++ = ':';
p = number(p, pend, ntohs(sa->sin_port), spec);
}
*p = '\0';
return string_nocheck(buf, end, ip4_addr, spec);
}
static noinline_for_stack
char *ip_addr_string(char *buf, char *end, const void *ptr,
struct printf_spec spec, const char *fmt)
{
char *err_fmt_msg;
if (check_pointer(&buf, end, ptr, spec))
return buf;
switch (fmt[1]) {
case '6':
return ip6_addr_string(buf, end, ptr, spec, fmt);
case '4':
return ip4_addr_string(buf, end, ptr, spec, fmt);
case 'S': {
const union {
struct sockaddr raw;
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} *sa = ptr;
switch (sa->raw.sa_family) {
case AF_INET:
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
case AF_INET6:
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
default:
return error_string(buf, end, "(einval)", spec);
}}
}
err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
return error_string(buf, end, err_fmt_msg, spec);
}
static noinline_for_stack
char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
const char *fmt)
{
bool found = true;
int count = 1;
unsigned int flags = 0;
int len;
if (spec.field_width == 0)
return buf; /* nothing to print */
if (check_pointer(&buf, end, addr, spec))
return buf;
do {
switch (fmt[count++]) {
case 'a':
flags |= ESCAPE_ANY;
break;
case 'c':
flags |= ESCAPE_SPECIAL;
break;
case 'h':
flags |= ESCAPE_HEX;
break;
case 'n':
flags |= ESCAPE_NULL;
break;
case 'o':
flags |= ESCAPE_OCTAL;
break;
case 'p':
flags |= ESCAPE_NP;
break;
case 's':
flags |= ESCAPE_SPACE;
break;
default:
found = false;
break;
}
} while (found);
if (!flags)
flags = ESCAPE_ANY_NP;
len = spec.field_width < 0 ? 1 : spec.field_width;
/*
* string_escape_mem() writes as many characters as it can to
* the given buffer, and returns the total size of the output
* had the buffer been big enough.
*/
buf += string_escape_mem(addr, len, buf, buf < end ? end - buf : 0, flags, NULL);
return buf;
}
static char *va_format(char *buf, char *end, struct va_format *va_fmt,
struct printf_spec spec, const char *fmt)
{
va_list va;
if (check_pointer(&buf, end, va_fmt, spec))
return buf;
va_copy(va, *va_fmt->va);
buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
va_end(va);
return buf;
}
static noinline_for_stack
char *uuid_string(char *buf, char *end, const u8 *addr,
struct printf_spec spec, const char *fmt)
{
char uuid[UUID_STRING_LEN + 1];
char *p = uuid;
int i;
const u8 *index = uuid_index;
bool uc = false;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (*(++fmt)) {
case 'L':
uc = true;
fallthrough;
case 'l':
index = guid_index;
break;
case 'B':
uc = true;
break;
}
for (i = 0; i < 16; i++) {
if (uc)
p = hex_byte_pack_upper(p, addr[index[i]]);
else
p = hex_byte_pack(p, addr[index[i]]);
switch (i) {
case 3:
case 5:
case 7:
case 9:
*p++ = '-';
break;
}
}
*p = 0;
return string_nocheck(buf, end, uuid, spec);
}
static noinline_for_stack
char *netdev_bits(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'F':
num = *(const netdev_features_t *)addr;
size = sizeof(netdev_features_t);
break;
default:
return error_string(buf, end, "(%pN?)", spec);
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *fourcc_string(char *buf, char *end, const u32 *fourcc,
struct printf_spec spec, const char *fmt)
{
char output[sizeof("0123 little-endian (0x01234567)")];
char *p = output;
unsigned int i;
u32 orig, val;
if (fmt[1] != 'c' || fmt[2] != 'c')
return error_string(buf, end, "(%p4?)", spec);
if (check_pointer(&buf, end, fourcc, spec))
return buf;
orig = get_unaligned(fourcc);
val = orig & ~BIT(31);
for (i = 0; i < sizeof(u32); i++) {
unsigned char c = val >> (i * 8);
/* Print non-control ASCII characters as-is, dot otherwise */
*p++ = isascii(c) && isprint(c) ? c : '.';
}
*p++ = ' ';
strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian");
p += strlen(p);
*p++ = ' ';
*p++ = '(';
p = special_hex_number(p, output + sizeof(output) - 2, orig, sizeof(u32));
*p++ = ')';
*p = '\0';
return string(buf, end, output, spec);
}
static noinline_for_stack
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
if (check_pointer(&buf, end, addr, spec))
return buf;
switch (fmt[1]) {
case 'd':
num = *(const dma_addr_t *)addr;
size = sizeof(dma_addr_t);
break;
case 'p':
default:
num = *(const phys_addr_t *)addr;
size = sizeof(phys_addr_t);
break;
}
return special_hex_number(buf, end, num, size);
}
static noinline_for_stack
char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
int year = tm->tm_year + (r ? 0 : 1900);
int mon = tm->tm_mon + (r ? 0 : 1);
buf = number(buf, end, year, default_dec04_spec);
if (buf < end)
*buf = '-';
buf++;
buf = number(buf, end, mon, default_dec02_spec);
if (buf < end)
*buf = '-';
buf++;
return number(buf, end, tm->tm_mday, default_dec02_spec);
}
static noinline_for_stack
char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
{
buf = number(buf, end, tm->tm_hour, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
buf = number(buf, end, tm->tm_min, default_dec02_spec);
if (buf < end)
*buf = ':';
buf++;
return number(buf, end, tm->tm_sec, default_dec02_spec);
}
static noinline_for_stack
char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
struct printf_spec spec, const char *fmt)
{
bool have_t = true, have_d = true;
bool raw = false, iso8601_separator = true;
bool found = true;
int count = 2;
if (check_pointer(&buf, end, tm, spec))
return buf;
switch (fmt[count]) {
case 'd':
have_t = false;
count++;
break;
case 't':
have_d = false;
count++;
break;
}
do {
switch (fmt[count++]) {
case 'r':
raw = true;
break;
case 's':
iso8601_separator = false;
break;
default:
found = false;
break;
}
} while (found);
if (have_d)
buf = date_str(buf, end, tm, raw);
if (have_d && have_t) {
if (buf < end)
*buf = iso8601_separator ? 'T' : ' ';
buf++;
}
if (have_t)
buf = time_str(buf, end, tm, raw);
return buf;
}
static noinline_for_stack
char *time64_str(char *buf, char *end, const time64_t time,
struct printf_spec spec, const char *fmt)
{
struct rtc_time rtc_time;
struct tm tm;
time64_to_tm(time, 0, &tm);
rtc_time.tm_sec = tm.tm_sec;
rtc_time.tm_min = tm.tm_min;
rtc_time.tm_hour = tm.tm_hour;
rtc_time.tm_mday = tm.tm_mday;
rtc_time.tm_mon = tm.tm_mon;
rtc_time.tm_year = tm.tm_year;
rtc_time.tm_wday = tm.tm_wday;
rtc_time.tm_yday = tm.tm_yday;
rtc_time.tm_isdst = 0;
return rtc_str(buf, end, &rtc_time, spec, fmt);
}
static noinline_for_stack
char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
const char *fmt)
{
switch (fmt[1]) {
case 'R':
return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
case 'T':
return time64_str(buf, end, *(const time64_t *)ptr, spec, fmt);
default:
return error_string(buf, end, "(%pt?)", spec);
}
}
static noinline_for_stack
char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
const char *fmt)
{
if (!IS_ENABLED(CONFIG_HAVE_CLK))
return error_string(buf, end, "(%pC?)", spec);
if (check_pointer(&buf, end, clk, spec))
return buf;
switch (fmt[1]) {
case 'n':
default:
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
return ptr_to_id(buf, end, clk, spec);
#endif
}
}
static
char *format_flags(char *buf, char *end, unsigned long flags,
const struct trace_print_flags *names)
{
unsigned long mask;
for ( ; flags && names->name; names++) {
mask = names->mask;
if ((flags & mask) != mask)
continue;
buf = string(buf, end, names->name, default_str_spec);
flags &= ~mask;
if (flags) {
if (buf < end)
*buf = '|';
buf++;
}
}
if (flags)
buf = number(buf, end, flags, default_flag_spec);
return buf;
}
struct page_flags_fields {
int width;
int shift;
int mask;
const struct printf_spec *spec;
const char *name;
};
static const struct page_flags_fields pff[] = {
{SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK,
&default_dec_spec, "section"},
{NODES_WIDTH, NODES_PGSHIFT, NODES_MASK,
&default_dec_spec, "node"},
{ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK,
&default_dec_spec, "zone"},
{LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK,
&default_flag_spec, "lastcpupid"},
{KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK,
&default_flag_spec, "kasantag"},
};
static
char *format_page_flags(char *buf, char *end, unsigned long flags)
{
unsigned long main_flags = flags & PAGEFLAGS_MASK;
bool append = false;
int i;
buf = number(buf, end, flags, default_flag_spec);
if (buf < end)
*buf = '(';
buf++;
/* Page flags from the main area. */
if (main_flags) {
buf = format_flags(buf, end, main_flags, pageflag_names);
append = true;
}
/* Page flags from the fields area */
for (i = 0; i < ARRAY_SIZE(pff); i++) {
/* Skip undefined fields. */
if (!pff[i].width)
continue;
/* Format: Flag Name + '=' (equals sign) + Number + '|' (separator) */
if (append) {
if (buf < end)
*buf = '|';
buf++;
}
buf = string(buf, end, pff[i].name, default_str_spec);
if (buf < end)
*buf = '=';
buf++;
buf = number(buf, end, (flags >> pff[i].shift) & pff[i].mask,
*pff[i].spec);
append = true;
}
if (buf < end)
*buf = ')';
buf++;
return buf;
}
static
char *format_page_type(char *buf, char *end, unsigned int page_type)
{
buf = number(buf, end, page_type, default_flag_spec);
if (buf < end)
*buf = '(';
buf++;
if (page_type_has_type(page_type))
buf = format_flags(buf, end, ~page_type, pagetype_names);
if (buf < end)
*buf = ')';
buf++;
return buf;
}
static noinline_for_stack
char *flags_string(char *buf, char *end, void *flags_ptr,
struct printf_spec spec, const char *fmt)
{
unsigned long flags;
const struct trace_print_flags *names;
if (check_pointer(&buf, end, flags_ptr, spec))
return buf;
switch (fmt[1]) {
case 'p':
return format_page_flags(buf, end, *(unsigned long *)flags_ptr);
case 't':
return format_page_type(buf, end, *(unsigned int *)flags_ptr);
case 'v':
flags = *(unsigned long *)flags_ptr;
names = vmaflag_names;
break;
case 'g':
flags = (__force unsigned long)(*(gfp_t *)flags_ptr);
names = gfpflag_names;
break;
default:
return error_string(buf, end, "(%pG?)", spec);
}
return format_flags(buf, end, flags, names);
}
static noinline_for_stack
char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
char *end)
{
int depth;
/* Loop starting from the root node to the current node. */
for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
struct fwnode_handle *__fwnode =
fwnode_get_nth_parent(fwnode, depth);
buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
fwnode_handle_put(__fwnode);
}
return buf;
}
static noinline_for_stack
char *device_node_string(char *buf, char *end, struct device_node *dn,
struct printf_spec spec, const char *fmt)
{
char tbuf[sizeof("xxxx") + 1];
const char *p;
int ret;
char *buf_start = buf;
struct property *prop;
bool has_mult, pass;
struct printf_spec str_spec = spec;
str_spec.field_width = -1;
if (fmt[0] != 'F')
return error_string(buf, end, "(%pO?)", spec);
if (!IS_ENABLED(CONFIG_OF))
return error_string(buf, end, "(%pOF?)", spec);
if (check_pointer(&buf, end, dn, spec))
return buf;
/* simple case without anything any more format specifiers */
fmt++;
if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
int precision;
if (pass) {
if (buf < end)
*buf = ':';
buf++;
}
switch (*fmt) {
case 'f': /* full_name */
buf = fwnode_full_name_string(of_fwnode_handle(dn), buf,
end);
break;
case 'n': /* name */
p = fwnode_get_name(of_fwnode_handle(dn));
precision = str_spec.precision;
str_spec.precision = strchrnul(p, '@') - p;
buf = string(buf, end, p, str_spec);
str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, default_dec_spec);
break;
case 'P': /* path-spec */
p = fwnode_get_name(of_fwnode_handle(dn));
if (!p[1])
p = "/";
buf = string(buf, end, p, str_spec);
break;
case 'F': /* flags */
tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
tbuf[4] = 0;
buf = string_nocheck(buf, end, tbuf, str_spec);
break;
case 'c': /* major compatible string */
ret = of_property_read_string(dn, "compatible", &p);
if (!ret)
buf = string(buf, end, p, str_spec);
break;
case 'C': /* full compatible string */
has_mult = false;
of_property_for_each_string(dn, "compatible", prop, p) {
if (has_mult)
buf = string_nocheck(buf, end, ",", str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
buf = string(buf, end, p, str_spec);
buf = string_nocheck(buf, end, "\"", str_spec);
has_mult = true;
}
break;
default:
break;
}
}
return widen_string(buf, buf - buf_start, end, spec);
}
static noinline_for_stack
char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
struct printf_spec spec, const char *fmt)
{
struct printf_spec str_spec = spec;
char *buf_start = buf;
str_spec.field_width = -1;
if (*fmt != 'w')
return error_string(buf, end, "(%pf?)", spec);
if (check_pointer(&buf, end, fwnode, spec))
return buf;
fmt++;
switch (*fmt) {
case 'P': /* name */
buf = string(buf, end, fwnode_get_name(fwnode), str_spec);
break;
case 'f': /* full_name */
default:
buf = fwnode_full_name_string(fwnode, buf, end);
break;
}
return widen_string(buf, buf - buf_start, end, spec);
}
int __init no_hash_pointers_enable(char *str)
{
if (no_hash_pointers)
return 0;
no_hash_pointers = true;
pr_warn("**********************************************************\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("** **\n");
pr_warn("** This system shows unhashed kernel memory addresses **\n");
pr_warn("** via the console, logs, and other interfaces. This **\n");
pr_warn("** might reduce the security of your system. **\n");
pr_warn("** **\n");
pr_warn("** If you see this message and you are not debugging **\n");
pr_warn("** the kernel, report this immediately to your system **\n");
pr_warn("** administrator! **\n");
pr_warn("** **\n");
pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
pr_warn("**********************************************************\n");
return 0;
}
early_param("no_hash_pointers", no_hash_pointers_enable);
/* Used for Rust formatting ('%pA'). */
char *rust_fmt_argument(char *buf, char *end, void *ptr);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
* specifiers.
*
* Please update scripts/checkpatch.pl when adding/removing conversion
* characters. (Search for "check for vsprintf extension").
*
* Right now we handle:
*
* - 'S' For symbolic direct pointers (or function descriptors) with offset
* - 's' For symbolic direct pointers (or function descriptors) without offset
* - '[Ss]R' as above with __builtin_extract_return_addr() translation
* - 'S[R]b' as above with module build ID (for use in backtraces)
* - '[Ff]' %pf and %pF were obsoleted and later removed in favor of
* %ps and %pS. Be careful when re-using these specifiers.
* - 'B' For backtraced symbolic direct pointers with offset
* - 'Bb' as above with module build ID (for use in backtraces)
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
* - 'b[l]' For a bitmap, the number of bits is determined by the field
* width which must be explicitly specified either as part of the
* format string '%32b[l]' or through '%*b[l]', [l] selects
* range-list format instead of hex format
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
* - '[mM]R' For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - 'i' [46] for 'raw' IPv4/IPv6 addresses
* IPv6 omits the colons (01020304...0f)
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* [S][pfs]
* Generic IPv4/IPv6 address (struct sockaddr *) that falls back to
* [4] or [6] and is able to print port [p], flowinfo [f], scope [s]
* - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
* - 'I[6S]c' for IPv6 addresses printed as specified by
* https://tools.ietf.org/html/rfc5952
* - 'E[achnops]' For an escaped buffer, where rules are defined by combination
* of the following flags (see string_escape_mem() for the
* details):
* a - ESCAPE_ANY
* c - ESCAPE_SPECIAL
* h - ESCAPE_HEX
* n - ESCAPE_NULL
* o - ESCAPE_OCTAL
* p - ESCAPE_NP
* s - ESCAPE_SPACE
* By default ESCAPE_ANY_NP is used.
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
* b big endian lower case hex (default)
* B big endian UPPER case hex
* l little endian lower case hex
* L little endian UPPER case hex
* big endian output byte order is:
* [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
* little endian output byte order is:
* [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
* - 'V' For a struct va_format which contains a format string * and va_list *,
* call vsnprintf(->format, *->va_list).
* Implements a "recursive vsnprintf".
* Do not use this feature without some mechanism to verify the
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users.
* Use only for procfs, sysfs and similar files, not printk(); please
* read the documentation (path below) first.
* - 'NF' For a netdev_features_t
* - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
* D dash
* N no separator
* The maximum supported length is 64 bytes of the input. Consider
* to use print_hex_dump() for the larger input.
* - 'a[pd]' For address types [p] phys_addr_t, [d] dma_addr_t and derivatives
* (default assumed to be phys_addr_t, passed by reference)
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
* - 't[RT][dt][r][s]' For time and date as represented by:
* R struct rtc_time
* T time64_t
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'G' For flags to be printed as a collection of symbolic strings that would
* construct the specific value. Supported flags given by option:
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
* - 'OF[fnpPcCF]' For a device tree object
* Without any optional arguments prints the full_name
* f device node full_name
* n device node name
* p device node phandle
* P device node path spec (name + @unit)
* F device node flags
* c major compatible string
* C full compatible string
* - 'fw[fP]' For a firmware node (struct fwnode_handle) pointer
* Without an option prints the full name of the node
* f full name
* P node name, including a possible unit address
* - 'x' For printing the address unmodified. Equivalent to "%lx".
* Please read the documentation (path below) before using!
* - '[ku]s' For a BPF/tracing related format specifier, e.g. used out of
* bpf_trace_printk() where [ku] prefix specifies either kernel (k)
* or user (u) memory to probe, and:
* s a string, equivalent to "%s" on direct vsnprintf() use
*
* ** When making changes please also update:
* Documentation/core-api/printk-formats.rst
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*
* There is also a '%pA' format specifier, but it is only intended to be used
* from Rust code to format core::fmt::Arguments. Do *not* use it from C.
* See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
switch (*fmt) {
case 'S':
case 's':
ptr = dereference_symbol_descriptor(ptr);
fallthrough;
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
return resource_string(buf, end, ptr, spec, fmt);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
case 'b':
switch (fmt[1]) {
case 'l':
return bitmap_list_string(buf, end, ptr, spec, fmt);
default:
return bitmap_string(buf, end, ptr, spec, fmt);
}
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
/* [mM]F (FDDI) */
/* [mM]R (Reverse order; Bluetooth) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
* 6: 0001:0203:...:0708
* 6c: 1::708 or 1::1.2.3.4
*/
case 'i': /* Contiguous:
* 4: 001.002.003.004
* 6: 000102...0f
*/
return ip_addr_string(buf, end, ptr, spec, fmt);
case 'E':
return escaped_string(buf, end, ptr, spec, fmt);
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
return va_format(buf, end, ptr, spec, fmt);
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
case '4':
return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, spec, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
case 't':
return time_and_date(buf, end, ptr, spec, fmt);
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
return file_dentry_name(buf, end, ptr, spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
#endif
case 'G':
return flags_string(buf, end, ptr, spec, fmt);
case 'O':
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'A':
if (!IS_ENABLED(CONFIG_RUST)) {
WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
return error_string(buf, end, "(%pA?)", spec);
}
return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
/* %pe with a non-ERR_PTR gets treated as plain %p */
if (!IS_ERR(ptr))
return default_pointer(buf, end, ptr, spec);
return err_ptr(buf, end, ptr, spec);
case 'u':
case 'k':
switch (fmt[1]) {
case 's':
return string(buf, end, ptr, spec);
default:
return error_string(buf, end, "(einval)", spec);
}
default:
return default_pointer(buf, end, ptr, spec);
}
}
/*
* Helper function to decode printf style format.
* Each call decode a token from the format and return the
* number of characters read (or likely the delta where it wants
* to go on the next call).
* The decoded token is returned through the parameters
*
* 'h', 'l', or 'L' for integer fields
* 'z' support added 23/7/1999 S.H.
* 'z' changed to 'Z' --davidm 1/25/99
* 'Z' changed to 'z' --adobriyan 2017-01-25
* 't' added for ptrdiff_t
*
* @fmt: the format string
* @type of the token returned
* @flags: various flags such as +, -, # tokens..
* @field_width: overwritten width
* @base: base of the number (octal, hex, ...)
* @precision: precision of a number
* @qualifier: qualifier of a number (long, size_t, ...)
*/
static noinline_for_stack
int format_decode(const char *fmt, struct printf_spec *spec)
{
const char *start = fmt;
char qualifier;
/* we finished early by reading the field width */
if (spec->type == FORMAT_TYPE_WIDTH) {
if (spec->field_width < 0) {
spec->field_width = -spec->field_width;
spec->flags |= LEFT;
}
spec->type = FORMAT_TYPE_NONE;
goto precision;
}
/* we finished early by reading the precision */
if (spec->type == FORMAT_TYPE_PRECISION) {
if (spec->precision < 0)
spec->precision = 0;
spec->type = FORMAT_TYPE_NONE;
goto qualifier;
}
/* By default */
spec->type = FORMAT_TYPE_NONE;
for (; *fmt ; ++fmt) {
if (*fmt == '%')
break;
}
/* Return the current non-format string */
if (fmt != start || !*fmt)
return fmt - start;
/* Process flags */
spec->flags = 0;
while (1) { /* this also skips first '%' */
bool found = true;
++fmt;
switch (*fmt) {
case '-': spec->flags |= LEFT; break;
case '+': spec->flags |= PLUS; break;
case ' ': spec->flags |= SPACE; break;
case '#': spec->flags |= SPECIAL; break;
case '0': spec->flags |= ZEROPAD; break;
default: found = false;
}
if (!found)
break;
}
/* get field width */
spec->field_width = -1;
if (isdigit(*fmt))
spec->field_width = skip_atoi(&fmt);
else if (*fmt == '*') {
/* it's the next argument */
spec->type = FORMAT_TYPE_WIDTH;
return ++fmt - start;
}
precision:
/* get the precision */
spec->precision = -1;
if (*fmt == '.') {
++fmt;
if (isdigit(*fmt)) {
spec->precision = skip_atoi(&fmt);
if (spec->precision < 0)
spec->precision = 0;
} else if (*fmt == '*') {
/* it's the next argument */
spec->type = FORMAT_TYPE_PRECISION;
return ++fmt - start;
}
}
qualifier:
/* get the conversion qualifier */
qualifier = 0;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
*fmt == 'z' || *fmt == 't') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'l') {
qualifier = 'L';
++fmt;
} else if (qualifier == 'h') {
qualifier = 'H';
++fmt;
}
}
}
/* default base */
spec->base = 10;
switch (*fmt) {
case 'c':
spec->type = FORMAT_TYPE_CHAR;
return ++fmt - start;
case 's':
spec->type = FORMAT_TYPE_STR;
return ++fmt - start;
case 'p':
spec->type = FORMAT_TYPE_PTR;
return ++fmt - start;
case '%':
spec->type = FORMAT_TYPE_PERCENT_CHAR;
return ++fmt - start;
/* integer number formats - set up the flags and "break" */
case 'o':
spec->base = 8;
break;
case 'x':
spec->flags |= SMALL;
fallthrough;
case 'X':
spec->base = 16;
break;
case 'd':
case 'i':
spec->flags |= SIGN;
break;
case 'u':
break;
case 'n':
/*
* Since %n poses a greater security risk than
* utility, treat it as any other invalid or
* unsupported format specifier.
*/
fallthrough;
default:
WARN_ONCE(1, "Please remove unsupported %%%c in format string\n", *fmt);
spec->type = FORMAT_TYPE_INVALID;
return fmt - start;
}
if (qualifier == 'L')
spec->type = FORMAT_TYPE_LONG_LONG;
else if (qualifier == 'l') {
BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
} else if (qualifier == 'z') {
spec->type = FORMAT_TYPE_SIZE_T;
} else if (qualifier == 't') {
spec->type = FORMAT_TYPE_PTRDIFF;
} else if (qualifier == 'H') {
BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE);
spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN);
} else if (qualifier == 'h') {
BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT);
spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN);
} else {
BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT);
spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN);
}
return ++fmt - start;
}
static void
set_field_width(struct printf_spec *spec, int width)
{
spec->field_width = width;
if (WARN_ONCE(spec->field_width != width, "field width %d too large", width)) {
spec->field_width = clamp(width, -FIELD_WIDTH_MAX, FIELD_WIDTH_MAX);
}
}
static void
set_precision(struct printf_spec *spec, int prec)
{
spec->precision = prec;
if (WARN_ONCE(spec->precision != prec, "precision %d too large", prec)) {
spec->precision = clamp(prec, 0, PRECISION_MAX);
}
}
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* This function generally follows C99 vsnprintf, but has some
* extensions and a few limitations:
*
* - ``%n`` is unsupported
* - ``%p*`` is handled by pointer()
*
* See pointer() or Documentation/core-api/printk-formats.rst for more
* extensive description.
*
* **Please update the documentation in both places when making changes**
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*
* If you're not already dealing with a va_list consider using snprintf().
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
unsigned long long num;
char *str, *end;
struct printf_spec spec = {0};
/* Reject out-of-range values early. Large positive sizes are
used for unknown buffer sizes. */
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
while (*fmt) {
const char *old_fmt = fmt;
int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE: {
int copy = read;
if (str < end) {
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
str += read;
break;
}
case FORMAT_TYPE_WIDTH:
set_field_width(&spec, va_arg(args, int));
break;
case FORMAT_TYPE_PRECISION:
set_precision(&spec, va_arg(args, int));
break;
case FORMAT_TYPE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) va_arg(args, int);
if (str < end)
*str = c;
++str;
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
break;
}
case FORMAT_TYPE_STR:
str = string(str, end, va_arg(args, char *), spec);
break;
case FORMAT_TYPE_PTR:
str = pointer(fmt, str, end, va_arg(args, void *),
spec);
while (isalnum(*fmt))
fmt++;
break;
case FORMAT_TYPE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
break;
case FORMAT_TYPE_INVALID:
/*
* Presumably the arguments passed gcc's type
* checking, but there is no safe or sane way
* for us to continue parsing the format and
* fetching from the va_list; the remaining
* specifiers and arguments would be out of
* sync.
*/
goto out;
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = va_arg(args, long long);
break;
case FORMAT_TYPE_ULONG:
num = va_arg(args, unsigned long);
break;
case FORMAT_TYPE_LONG:
num = va_arg(args, long);
break;
case FORMAT_TYPE_SIZE_T:
if (spec.flags & SIGN)
num = va_arg(args, ssize_t);
else
num = va_arg(args, size_t);
break;
case FORMAT_TYPE_PTRDIFF:
num = va_arg(args, ptrdiff_t);
break;
case FORMAT_TYPE_UBYTE:
num = (unsigned char) va_arg(args, int);
break;
case FORMAT_TYPE_BYTE:
num = (signed char) va_arg(args, int);
break;
case FORMAT_TYPE_USHORT:
num = (unsigned short) va_arg(args, int);
break;
case FORMAT_TYPE_SHORT:
num = (short) va_arg(args, int);
break;
case FORMAT_TYPE_INT:
num = (int) va_arg(args, int);
break;
default:
num = va_arg(args, unsigned int);
}
str = number(str, end, num, spec);
}
}
out:
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
/* the trailing null byte doesn't count towards the total */
return str-buf;
}
EXPORT_SYMBOL(vsnprintf);
/**
* vscnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The return value is the number of characters which have been written into
* the @buf not including the trailing '\0'. If @size is == 0 the function
* returns 0.
*
* If you're not already dealing with a va_list consider using scnprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
if (unlikely(!size))
return 0;
i = vsnprintf(buf, size, fmt, args);
if (likely(i < size))
return i;
return size - 1;
}
EXPORT_SYMBOL(vscnprintf);
/**
* snprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters which would be
* generated for the given input, excluding the trailing null,
* as per ISO C99. If the return is greater than or equal to
* @size, the resulting string is truncated.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int snprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(snprintf);
/**
* scnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
* the trailing '\0'. If @size is == 0 the function returns 0.
*/
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(scnprintf);
/**
* vsprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use vsnprintf() or vscnprintf() in order to avoid
* buffer overflows.
*
* If you're not already dealing with a va_list consider using sprintf().
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int vsprintf(char *buf, const char *fmt, va_list args)
{
return vsnprintf(buf, INT_MAX, fmt, args);
}
EXPORT_SYMBOL(vsprintf);
/**
* sprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of characters written
* into @buf. Use snprintf() or scnprintf() in order to avoid
* buffer overflows.
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
int sprintf(char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sprintf);
#ifdef CONFIG_BINARY_PRINTF
/*
* bprintf service:
* vbin_printf() - VA arguments to binary data
* bstr_printf() - Binary data to text string
*/
/**
* vbin_printf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
* @fmt: The format string to use
* @args: Arguments for the format string
*
* The format follows C99 vsnprintf, except %n is ignored, and its argument
* is skipped.
*
* The return value is the number of words(32bits) which would be generated for
* the given input.
*
* NOTE:
* If the return value is greater than @size, the resulting bin_buf is NOT
* valid for bstr_printf().
*/
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
struct printf_spec spec = {0};
char *str, *end;
int width;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
#define save_arg(type) \
({ \
unsigned long long value; \
if (sizeof(type) == 8) { \
unsigned long long val8; \
str = PTR_ALIGN(str, sizeof(u32)); \
val8 = va_arg(args, unsigned long long); \
if (str + sizeof(type) <= end) { \
*(u32 *)str = *(u32 *)&val8; \
*(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
} \
value = val8; \
} else { \
unsigned int val4; \
str = PTR_ALIGN(str, sizeof(type)); \
val4 = va_arg(args, int); \
if (str + sizeof(type) <= end) \
*(typeof(type) *)str = (type)(long)val4; \
value = (unsigned long long)val4; \
} \
str += sizeof(type); \
value; \
})
while (*fmt) {
int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE:
case FORMAT_TYPE_PERCENT_CHAR:
break;
case FORMAT_TYPE_INVALID:
goto out;
case FORMAT_TYPE_WIDTH:
case FORMAT_TYPE_PRECISION:
width = (int)save_arg(int);
/* Pointers may require the width */
if (*fmt == 'p')
set_field_width(&spec, width);
break;
case FORMAT_TYPE_CHAR:
save_arg(char);
break;
case FORMAT_TYPE_STR: {
const char *save_str = va_arg(args, char *);
const char *err_msg;
size_t len;
err_msg = check_pointer_msg(save_str);
if (err_msg)
save_str = err_msg;
len = strlen(save_str) + 1;
if (str + len < end)
memcpy(str, save_str, len);
str += len;
break;
}
case FORMAT_TYPE_PTR:
/* Dereferenced pointers must be done now */
switch (*fmt) {
/* Dereference of functions is still OK */
case 'S':
case 's':
case 'x':
case 'K':
case 'e':
save_arg(void *);
break;
default:
if (!isalnum(*fmt)) {
save_arg(void *);
break;
}
str = pointer(fmt, str, end, va_arg(args, void *),
spec);
if (str + 1 < end)
*str++ = '\0';
else
end[-1] = '\0'; /* Must be nul terminated */
}
/* skip all alphanumeric pointer suffixes */
while (isalnum(*fmt))
fmt++;
break;
default:
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
save_arg(long long);
break;
case FORMAT_TYPE_ULONG:
case FORMAT_TYPE_LONG:
save_arg(unsigned long);
break;
case FORMAT_TYPE_SIZE_T:
save_arg(size_t);
break;
case FORMAT_TYPE_PTRDIFF:
save_arg(ptrdiff_t);
break;
case FORMAT_TYPE_UBYTE:
case FORMAT_TYPE_BYTE:
save_arg(char);
break;
case FORMAT_TYPE_USHORT:
case FORMAT_TYPE_SHORT:
save_arg(short);
break;
default:
save_arg(int);
}
}
}
out:
return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
#undef save_arg
}
EXPORT_SYMBOL_GPL(vbin_printf);
/**
* bstr_printf - Format a string from binary arguments and place it in a buffer
* @buf: The buffer to place the result into
* @size: The size of the buffer, including the trailing null space
* @fmt: The format string to use
* @bin_buf: Binary arguments for the format string
*
* This function like C99 vsnprintf, but the difference is that vsnprintf gets
* arguments from stack, and bstr_printf gets arguments from @bin_buf which is
* a binary buffer that generated by vbin_printf.
*
* The format follows C99 vsnprintf, but has some extensions:
* see vsnprintf comment for details.
*
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
* number of characters written into @buf as return value
* (not including the trailing '\0'), use vscnprintf(). If the
* return is greater than or equal to @size, the resulting
* string is truncated.
*/
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
{
struct printf_spec spec = {0};
char *str, *end;
const char *args = (const char *)bin_buf;
if (WARN_ON_ONCE(size > INT_MAX))
return 0;
str = buf;
end = buf + size;
#define get_arg(type) \
({ \
typeof(type) value; \
if (sizeof(type) == 8) { \
args = PTR_ALIGN(args, sizeof(u32)); \
*(u32 *)&value = *(u32 *)args; \
*((u32 *)&value + 1) = *(u32 *)(args + 4); \
} else { \
args = PTR_ALIGN(args, sizeof(type)); \
value = *(typeof(type) *)args; \
} \
args += sizeof(type); \
value; \
})
/* Make sure end is always >= buf */
if (end < buf) {
end = ((void *)-1);
size = end - buf;
}
while (*fmt) {
const char *old_fmt = fmt;
int read = format_decode(fmt, &spec);
fmt += read;
switch (spec.type) {
case FORMAT_TYPE_NONE: {
int copy = read;
if (str < end) {
if (copy > end - str)
copy = end - str;
memcpy(str, old_fmt, copy);
}
str += read;
break;
}
case FORMAT_TYPE_WIDTH:
set_field_width(&spec, get_arg(int));
break;
case FORMAT_TYPE_PRECISION:
set_precision(&spec, get_arg(int));
break;
case FORMAT_TYPE_CHAR: {
char c;
if (!(spec.flags & LEFT)) {
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
}
c = (unsigned char) get_arg(char);
if (str < end)
*str = c;
++str;
while (--spec.field_width > 0) {
if (str < end)
*str = ' ';
++str;
}
break;
}
case FORMAT_TYPE_STR: {
const char *str_arg = args;
args += strlen(str_arg) + 1;
str = string(str, end, (char *)str_arg, spec);
break;
}
case FORMAT_TYPE_PTR: {
bool process = false;
int copy, len;
/* Non function dereferences were already done */
switch (*fmt) {
case 'S':
case 's':
case 'x':
case 'K':
case 'e':
process = true;
break;
default:
if (!isalnum(*fmt)) {
process = true;
break;
}
/* Pointer dereference was already processed */
if (str < end) {
len = copy = strlen(args);
if (copy > end - str)
copy = end - str;
memcpy(str, args, copy);
str += len;
args += len + 1;
}
}
if (process)
str = pointer(fmt, str, end, get_arg(void *), spec);
while (isalnum(*fmt))
fmt++;
break;
}
case FORMAT_TYPE_PERCENT_CHAR:
if (str < end)
*str = '%';
++str;
break;
case FORMAT_TYPE_INVALID:
goto out;
default: {
unsigned long long num;
switch (spec.type) {
case FORMAT_TYPE_LONG_LONG:
num = get_arg(long long);
break;
case FORMAT_TYPE_ULONG:
case FORMAT_TYPE_LONG:
num = get_arg(unsigned long);
break;
case FORMAT_TYPE_SIZE_T:
num = get_arg(size_t);
break;
case FORMAT_TYPE_PTRDIFF:
num = get_arg(ptrdiff_t);
break;
case FORMAT_TYPE_UBYTE:
num = get_arg(unsigned char);
break;
case FORMAT_TYPE_BYTE:
num = get_arg(signed char);
break;
case FORMAT_TYPE_USHORT:
num = get_arg(unsigned short);
break;
case FORMAT_TYPE_SHORT:
num = get_arg(short);
break;
case FORMAT_TYPE_UINT:
num = get_arg(unsigned int);
break;
default:
num = get_arg(int);
}
str = number(str, end, num, spec);
} /* default: */
} /* switch(spec.type) */
} /* while(*fmt) */
out:
if (size > 0) {
if (str < end)
*str = '\0';
else
end[-1] = '\0';
}
#undef get_arg
/* the trailing null byte doesn't count towards the total */
return str - buf;
}
EXPORT_SYMBOL_GPL(bstr_printf);
/**
* bprintf - Parse a format string and place args' binary value in a buffer
* @bin_buf: The buffer to place args' binary value
* @size: The size of the buffer(by words(32bits), not characters)
* @fmt: The format string to use
* @...: Arguments for the format string
*
* The function returns the number of words(u32) written
* into @bin_buf.
*/
int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
{
va_list args;
int ret;
va_start(args, fmt);
ret = vbin_printf(bin_buf, size, fmt, args);
va_end(args);
return ret;
}
EXPORT_SYMBOL_GPL(bprintf);
#endif /* CONFIG_BINARY_PRINTF */
/**
* vsscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: format of buffer
* @args: arguments
*/
int vsscanf(const char *buf, const char *fmt, va_list args)
{
const char *str = buf;
char *next;
char digit;
int num = 0;
u8 qualifier;
unsigned int base;
union {
long long s;
unsigned long long u;
} val;
s16 field_width;
bool is_sign;
while (*fmt) {
/* skip any white space in format */
/* white space in format matches any amount of
* white space, including none, in the input.
*/
if (isspace(*fmt)) {
fmt = skip_spaces(++fmt);
str = skip_spaces(str);
}
/* anything that is not a conversion must match exactly */
if (*fmt != '%' && *fmt) {
if (*fmt++ != *str++)
break;
continue;
}
if (!*fmt)
break;
++fmt;
/* skip this conversion.
* advance both strings to next white space
*/
if (*fmt == '*') {
if (!*str)
break;
while (!isspace(*fmt) && *fmt != '%' && *fmt) {
/* '%*[' not yet supported, invalid format */
if (*fmt == '[')
return num;
fmt++;
}
while (!isspace(*str) && *str)
str++;
continue;
}
/* get field width */
field_width = -1;
if (isdigit(*fmt)) {
field_width = skip_atoi(&fmt);
if (field_width <= 0)
break;
}
/* get conversion qualifier */
qualifier = -1;
if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
*fmt == 'z') {
qualifier = *fmt++;
if (unlikely(qualifier == *fmt)) {
if (qualifier == 'h') {
qualifier = 'H';
fmt++;
} else if (qualifier == 'l') {
qualifier = 'L';
fmt++;
}
}
}
if (!*fmt)
break;
if (*fmt == 'n') {
/* return number of characters read so far */
*va_arg(args, int *) = str - buf;
++fmt;
continue;
}
if (!*str)
break;
base = 10;
is_sign = false;
switch (*fmt++) {
case 'c':
{
char *s = (char *)va_arg(args, char*);
if (field_width == -1)
field_width = 1;
do {
*s++ = *str++;
} while (--field_width > 0 && *str);
num++;
}
continue;
case 's':
{
char *s = (char *)va_arg(args, char *);
if (field_width == -1)
field_width = SHRT_MAX;
/* first, skip leading white space in buffer */
str = skip_spaces(str);
/* now copy until next white space */
while (*str && !isspace(*str) && field_width--)
*s++ = *str++;
*s = '\0';
num++;
}
continue;
/*
* Warning: This implementation of the '[' conversion specifier
* deviates from its glibc counterpart in the following ways:
* (1) It does NOT support ranges i.e. '-' is NOT a special
* character
* (2) It cannot match the closing bracket ']' itself
* (3) A field width is required
* (4) '%*[' (discard matching input) is currently not supported
*
* Example usage:
* ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]",
* buf1, buf2, buf3);
* if (ret < 3)
* // etc..
*/
case '[':
{
char *s = (char *)va_arg(args, char *);
DECLARE_BITMAP(set, 256) = {0};
unsigned int len = 0;
bool negate = (*fmt == '^');
/* field width is required */
if (field_width == -1)
return num;
if (negate)
++fmt;
for ( ; *fmt && *fmt != ']'; ++fmt, ++len)
__set_bit((u8)*fmt, set);
/* no ']' or no character set found */
if (!*fmt || !len)
return num;
++fmt;
if (negate) {
bitmap_complement(set, set, 256);
/* exclude null '\0' byte */
__clear_bit(0, set);
}
/* match must be non-empty */
if (!test_bit((u8)*str, set))
return num;
while (test_bit((u8)*str, set) && field_width--)
*s++ = *str++;
*s = '\0';
++num;
}
continue;
case 'o':
base = 8;
break;
case 'x':
case 'X':
base = 16;
break;
case 'i':
base = 0;
fallthrough;
case 'd':
is_sign = true;
fallthrough;
case 'u':
break;
case '%':
/* looking for '%' in str */
if (*str++ != '%')
return num;
continue;
default:
/* invalid format; stop here */
return num;
}
/* have some sort of integer conversion.
* first, skip white space in buffer.
*/
str = skip_spaces(str);
digit = *str;
if (is_sign && digit == '-') {
if (field_width == 1)
break;
digit = *(str + 1);
}
if (!digit
|| (base == 16 && !isxdigit(digit))
|| (base == 10 && !isdigit(digit))
|| (base == 8 && !isodigit(digit))
|| (base == 0 && !isdigit(digit)))
break;
if (is_sign)
val.s = simple_strntoll(str,
field_width >= 0 ? field_width : INT_MAX,
&next, base);
else
val.u = simple_strntoull(str,
field_width >= 0 ? field_width : INT_MAX,
&next, base);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
if (is_sign)
*va_arg(args, signed char *) = val.s;
else
*va_arg(args, unsigned char *) = val.u;
break;
case 'h':
if (is_sign)
*va_arg(args, short *) = val.s;
else
*va_arg(args, unsigned short *) = val.u;
break;
case 'l':
if (is_sign)
*va_arg(args, long *) = val.s;
else
*va_arg(args, unsigned long *) = val.u;
break;
case 'L':
if (is_sign)
*va_arg(args, long long *) = val.s;
else
*va_arg(args, unsigned long long *) = val.u;
break;
case 'z':
*va_arg(args, size_t *) = val.u;
break;
default:
if (is_sign)
*va_arg(args, int *) = val.s;
else
*va_arg(args, unsigned int *) = val.u;
break;
}
num++;
if (!next)
break;
str = next;
}
return num;
}
EXPORT_SYMBOL(vsscanf);
/**
* sscanf - Unformat a buffer into a list of arguments
* @buf: input buffer
* @fmt: formatting of buffer
* @...: resulting arguments
*/
int sscanf(const char *buf, const char *fmt, ...)
{
va_list args;
int i;
va_start(args, fmt);
i = vsscanf(buf, fmt, args);
va_end(args);
return i;
}
EXPORT_SYMBOL(sscanf);
| linux-master | lib/vsprintf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <[email protected]>
* Arnt Gulbrandsen, <[email protected]>
* Tom May, <[email protected]>
* Andreas Schwab, <[email protected]>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
* Fixed some nasty bugs, causing some horrible crashes.
* A: At some points, the sum (%0) was used as
* length-counter instead of the length counter
* (%1). Thanks to Roman Hodek for pointing this out.
* B: GCC seems to mess up if one uses too many
* data-registers to hold input values and one tries to
* specify d0 and d1 as scratch registers. Letting gcc
* choose these registers itself solves the problem.
*/
/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
kills, so most of the assembly has to go. */
#include <linux/export.h>
#include <net/checksum.h>
#include <asm/byteorder.h>
#ifndef do_csum
static inline unsigned short from32to16(unsigned int x)
{
/* add up 16-bit and 16-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
}
static unsigned int do_csum(const unsigned char *buff, int len)
{
int odd;
unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
#ifdef __LITTLE_ENDIAN
result += (*buff << 8);
#else
result = *buff;
#endif
len--;
buff++;
}
if (len >= 2) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
len -= 2;
buff += 2;
}
if (len >= 4) {
const unsigned char *end = buff + ((unsigned)len & ~3);
unsigned int carry = 0;
do {
unsigned int w = *(unsigned int *) buff;
buff += 4;
result += carry;
result += w;
carry = (w > result);
} while (buff < end);
result += carry;
result = (result & 0xffff) + (result >> 16);
}
if (len & 2) {
result += *(unsigned short *) buff;
buff += 2;
}
}
if (len & 1)
#ifdef __LITTLE_ENDIAN
result += *buff;
#else
result += (*buff << 8);
#endif
result = from32to16(result);
if (odd)
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
out:
return result;
}
#endif
#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph, ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
#endif
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum wsum)
{
unsigned int sum = (__force unsigned int)wsum;
unsigned int result = do_csum(buff, len);
/* add in old sum, and carry.. */
result += sum;
if (sum > result)
result += 1;
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~do_csum(buff, len);
}
EXPORT_SYMBOL(ip_compute_csum);
#ifndef csum_tcpudp_nofold
static inline u32 from64to32(u64 x)
{
/* add up 32-bit and 32-bit for 32+c bit */
x = (x & 0xffffffff) + (x >> 32);
/* add up carry.. */
x = (x & 0xffffffff) + (x >> 32);
return (u32)x;
}
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
{
unsigned long long s = (__force u32)sum;
s += (__force u32)saddr;
s += (__force u32)daddr;
#ifdef __BIG_ENDIAN
s += proto + len;
#else
s += (proto + len) << 8;
#endif
return (__force __wsum)from64to32(s);
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
#endif
| linux-master | lib/checksum.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Resizable, Scalable, Concurrent Hash Table
*
* Copyright (c) 2014-2015 Thomas Graf <[email protected]>
* Copyright (c) 2008-2014 Patrick McHardy <[email protected]>
*/
/**************************************************************************
* Self Test
**************************************************************************/
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
static int parm_entries = 50000;
module_param(parm_entries, int, 0);
MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)");
static int runs = 4;
module_param(runs, int, 0);
MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
static int max_size = 0;
module_param(max_size, int, 0);
MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
static bool shrinking = false;
module_param(shrinking, bool, 0);
MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
static int size = 8;
module_param(size, int, 0);
MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
static int tcount = 10;
module_param(tcount, int, 0);
MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
static bool enomem_retry = false;
module_param(enomem_retry, bool, 0);
MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
struct test_obj_val {
int id;
int tid;
};
struct test_obj {
struct test_obj_val value;
struct rhash_head node;
};
struct test_obj_rhl {
struct test_obj_val value;
struct rhlist_head list_node;
};
struct thread_data {
unsigned int entries;
int id;
struct task_struct *task;
struct test_obj *objs;
};
static u32 my_hashfn(const void *data, u32 len, u32 seed)
{
const struct test_obj_rhl *obj = data;
return (obj->value.id % 10);
}
static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
{
const struct test_obj_rhl *test_obj = obj;
const struct test_obj_val *val = arg->key;
return test_obj->value.id - val->id;
}
static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
};
static struct rhashtable_params test_rht_params_dup = {
.head_offset = offsetof(struct test_obj_rhl, list_node),
.key_offset = offsetof(struct test_obj_rhl, value),
.key_len = sizeof(struct test_obj_val),
.hashfn = jhash,
.obj_hashfn = my_hashfn,
.obj_cmpfn = my_cmpfn,
.nelem_hint = 128,
.automatic_shrinking = false,
};
static atomic_t startup_count;
static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
const struct rhashtable_params params)
{
int err, retries = -1, enomem_retries = 0;
do {
retries++;
cond_resched();
err = rhashtable_insert_fast(ht, &obj->node, params);
if (err == -ENOMEM && enomem_retry) {
enomem_retries++;
err = -EBUSY;
}
} while (err == -EBUSY);
if (enomem_retries)
pr_info(" %u insertions retried after -ENOMEM\n",
enomem_retries);
return err ? : retries;
}
static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
unsigned int entries)
{
unsigned int i;
for (i = 0; i < entries; i++) {
struct test_obj *obj;
bool expected = !(i % 2);
struct test_obj_val key = {
.id = i,
};
if (array[i / 2].value.id == TEST_INSERT_FAIL)
expected = false;
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
pr_warn("Test failed: Could not find key %u\n", key.id);
return -ENOENT;
} else if (!expected && obj) {
pr_warn("Test failed: Unexpected entry found for key %u\n",
key.id);
return -EEXIST;
} else if (expected && obj) {
if (obj->value.id != i) {
pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
obj->value.id, i);
return -EINVAL;
}
}
cond_resched_rcu();
}
return 0;
}
static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
{
unsigned int total = 0, chain_len = 0;
struct rhashtable_iter hti;
struct rhash_head *pos;
rhashtable_walk_enter(ht, &hti);
rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
if (PTR_ERR(pos) == -EAGAIN) {
pr_info("Info: encountered resize\n");
chain_len++;
continue;
} else if (IS_ERR(pos)) {
pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
PTR_ERR(pos));
break;
}
total++;
}
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
total, atomic_read(&ht->nelems), entries, chain_len);
if (total != atomic_read(&ht->nelems) || total != entries)
pr_warn("Test failed: Total count mismatch ^^^");
}
static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array,
unsigned int entries)
{
struct test_obj *obj;
int err;
unsigned int i, insert_retries = 0;
s64 start, end;
/*
* Insertion Test:
* Insert entries into table with all keys even numbers
*/
pr_info(" Adding %d keys\n", entries);
start = ktime_get_ns();
for (i = 0; i < entries; i++) {
struct test_obj *obj = &array[i];
obj->value.id = i * 2;
err = insert_retry(ht, obj, test_rht_params);
if (err > 0)
insert_retries += err;
else if (err)
return err;
}
if (insert_retries)
pr_info(" %u insertions retried due to memory pressure\n",
insert_retries);
test_bucket_stats(ht, entries);
rcu_read_lock();
test_rht_lookup(ht, array, entries);
rcu_read_unlock();
test_bucket_stats(ht, entries);
pr_info(" Deleting %d keys\n", entries);
for (i = 0; i < entries; i++) {
struct test_obj_val key = {
.id = i * 2,
};
if (array[i].value.id != TEST_INSERT_FAIL) {
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
BUG_ON(!obj);
rhashtable_remove_fast(ht, &obj->node, test_rht_params);
}
cond_resched();
}
end = ktime_get_ns();
pr_info(" Duration of test: %lld ns\n", end - start);
return end - start;
}
static struct rhashtable ht;
static struct rhltable rhlt;
static int __init test_rhltable(unsigned int entries)
{
struct test_obj_rhl *rhl_test_objects;
unsigned long *obj_in_table;
unsigned int i, j, k;
int ret, err;
if (entries == 0)
entries = 1;
rhl_test_objects = vzalloc(array_size(entries,
sizeof(*rhl_test_objects)));
if (!rhl_test_objects)
return -ENOMEM;
ret = -ENOMEM;
obj_in_table = vzalloc(array_size(sizeof(unsigned long),
BITS_TO_LONGS(entries)));
if (!obj_in_table)
goto out_free;
err = rhltable_init(&rhlt, &test_rht_params);
if (WARN_ON(err))
goto out_free;
k = get_random_u32();
ret = 0;
for (i = 0; i < entries; i++) {
rhl_test_objects[i].value.id = k;
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
test_rht_params);
if (WARN(err, "error %d on element %d\n", err, i))
break;
if (err == 0)
set_bit(i, obj_in_table);
}
if (err)
ret = err;
pr_info("test %d add/delete pairs into rhlist\n", entries);
for (i = 0; i < entries; i++) {
struct rhlist_head *h, *pos;
struct test_obj_rhl *obj;
struct test_obj_val key = {
.id = k,
};
bool found;
rcu_read_lock();
h = rhltable_lookup(&rhlt, &key, test_rht_params);
if (WARN(!h, "key not found during iteration %d of %d", i, entries)) {
rcu_read_unlock();
break;
}
if (i) {
j = i - 1;
rhl_for_each_entry_rcu(obj, pos, h, list_node) {
if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone"))
break;
}
}
cond_resched_rcu();
found = false;
rhl_for_each_entry_rcu(obj, pos, h, list_node) {
if (pos == &rhl_test_objects[i].list_node) {
found = true;
break;
}
}
rcu_read_unlock();
if (WARN(!found, "element %d not found", i))
break;
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i);
if (err == 0)
clear_bit(i, obj_in_table);
}
if (ret == 0 && err)
ret = err;
for (i = 0; i < entries; i++) {
WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i);
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
test_rht_params);
if (WARN(err, "error %d on element %d\n", err, i))
break;
if (err == 0)
set_bit(i, obj_in_table);
}
pr_info("test %d random rhlist add/delete operations\n", entries);
for (j = 0; j < entries; j++) {
u32 i = get_random_u32_below(entries);
u32 prand = get_random_u32_below(4);
cond_resched();
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
if (test_bit(i, obj_in_table)) {
clear_bit(i, obj_in_table);
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
i, err, -ENOENT))
continue;
}
if (prand & 1) {
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
if (err == 0) {
if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
continue;
} else {
if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
continue;
}
}
if (prand & 2) {
i = get_random_u32_below(entries);
if (test_bit(i, obj_in_table)) {
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
WARN(err, "cannot remove element at slot %d", i);
if (err == 0)
clear_bit(i, obj_in_table);
} else {
err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
WARN(err, "failed to insert object %d", i);
if (err == 0)
set_bit(i, obj_in_table);
}
}
}
for (i = 0; i < entries; i++) {
cond_resched();
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
if (test_bit(i, obj_in_table)) {
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
err, -ENOENT))
continue;
}
}
rhltable_destroy(&rhlt);
out_free:
vfree(rhl_test_objects);
vfree(obj_in_table);
return ret;
}
static int __init test_rhashtable_max(struct test_obj *array,
unsigned int entries)
{
unsigned int i;
int err;
test_rht_params.max_size = roundup_pow_of_two(entries / 8);
err = rhashtable_init(&ht, &test_rht_params);
if (err)
return err;
for (i = 0; i < ht.max_elems; i++) {
struct test_obj *obj = &array[i];
obj->value.id = i * 2;
err = insert_retry(&ht, obj, test_rht_params);
if (err < 0)
return err;
}
err = insert_retry(&ht, &array[ht.max_elems], test_rht_params);
if (err == -E2BIG) {
err = 0;
} else {
pr_info("insert element %u should have failed with %d, got %d\n",
ht.max_elems, -E2BIG, err);
if (err == 0)
err = -1;
}
rhashtable_destroy(&ht);
return err;
}
static unsigned int __init print_ht(struct rhltable *rhlt)
{
struct rhashtable *ht;
const struct bucket_table *tbl;
char buff[512] = "";
int offset = 0;
unsigned int i, cnt = 0;
ht = &rhlt->ht;
/* Take the mutex to avoid RCU warning */
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
struct test_obj_rhl *p;
pos = rht_ptr_exclusive(tbl->buckets + i);
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
if (!rht_is_a_nulls(pos)) {
offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
}
while (!rht_is_a_nulls(pos)) {
struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
offset += sprintf(buff + offset, "[[");
do {
pos = &list->rhead;
list = rht_dereference(list->next, ht);
p = rht_obj(ht, pos);
offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
list? ", " : " ");
cnt++;
} while (list);
pos = next,
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
}
}
printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
mutex_unlock(&ht->mutex);
return cnt;
}
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
int cnt, bool slow)
{
struct rhltable *rhlt;
unsigned int i, ret;
const char *key;
int err = 0;
rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
if (WARN_ON(!rhlt))
return -EINVAL;
err = rhltable_init(rhlt, &test_rht_params_dup);
if (WARN_ON(err)) {
kfree(rhlt);
return err;
}
for (i = 0; i < cnt; i++) {
rhl_test_objects[i].value.tid = i;
key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
key += test_rht_params_dup.key_offset;
if (slow) {
err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
&rhl_test_objects[i].list_node.rhead));
if (err == -EAGAIN)
err = 0;
} else
err = rhltable_insert(rhlt,
&rhl_test_objects[i].list_node,
test_rht_params_dup);
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
goto skip_print;
}
ret = print_ht(rhlt);
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
skip_print:
rhltable_destroy(rhlt);
kfree(rhlt);
return 0;
}
static int __init test_insert_duplicates_run(void)
{
struct test_obj_rhl rhl_test_objects[3] = {};
pr_info("test inserting duplicates\n");
/* two different values that map to same bucket */
rhl_test_objects[0].value.id = 1;
rhl_test_objects[1].value.id = 21;
/* and another duplicate with same as [0] value
* which will be second on the bucket list */
rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
test_insert_dup(rhl_test_objects, 2, false);
test_insert_dup(rhl_test_objects, 3, false);
test_insert_dup(rhl_test_objects, 2, true);
test_insert_dup(rhl_test_objects, 3, true);
return 0;
}
static int thread_lookup_test(struct thread_data *tdata)
{
unsigned int entries = tdata->entries;
int i, err = 0;
for (i = 0; i < entries; i++) {
struct test_obj *obj;
struct test_obj_val key = {
.id = i,
.tid = tdata->id,
};
obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
pr_err(" found unexpected object %d-%d\n", key.tid, key.id);
err++;
} else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
pr_err(" object %d-%d not found!\n", key.tid, key.id);
err++;
} else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n",
obj->value.tid, obj->value.id, key.tid, key.id);
err++;
}
cond_resched();
}
return err;
}
static int threadfunc(void *data)
{
int i, step, err = 0, insert_retries = 0;
struct thread_data *tdata = data;
if (atomic_dec_and_test(&startup_count))
wake_up(&startup_wait);
if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
pr_err(" thread[%d]: interrupted\n", tdata->id);
goto out;
}
for (i = 0; i < tdata->entries; i++) {
tdata->objs[i].value.id = i;
tdata->objs[i].value.tid = tdata->id;
err = insert_retry(&ht, &tdata->objs[i], test_rht_params);
if (err > 0) {
insert_retries += err;
} else if (err) {
pr_err(" thread[%d]: rhashtable_insert_fast failed\n",
tdata->id);
goto out;
}
}
if (insert_retries)
pr_info(" thread[%d]: %u insertions retried due to memory pressure\n",
tdata->id, insert_retries);
err = thread_lookup_test(tdata);
if (err) {
pr_err(" thread[%d]: rhashtable_lookup_test failed\n",
tdata->id);
goto out;
}
for (step = 10; step > 0; step--) {
for (i = 0; i < tdata->entries; i += step) {
if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
continue;
err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
test_rht_params);
if (err) {
pr_err(" thread[%d]: rhashtable_remove_fast failed\n",
tdata->id);
goto out;
}
tdata->objs[i].value.id = TEST_INSERT_FAIL;
cond_resched();
}
err = thread_lookup_test(tdata);
if (err) {
pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n",
tdata->id);
goto out;
}
}
out:
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
return err;
}
static int __init test_rht_init(void)
{
unsigned int entries;
int i, err, started_threads = 0, failed_threads = 0;
u64 total_time = 0;
struct thread_data *tdata;
struct test_obj *objs;
if (parm_entries < 0)
parm_entries = 1;
entries = min(parm_entries, MAX_ENTRIES);
test_rht_params.automatic_shrinking = shrinking;
test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
test_rht_params.nelem_hint = size;
objs = vzalloc(array_size(sizeof(struct test_obj),
test_rht_params.max_size + 1));
if (!objs)
return -ENOMEM;
pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
size, max_size, shrinking);
for (i = 0; i < runs; i++) {
s64 time;
pr_info("Test %02d:\n", i);
memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj));
err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
err);
continue;
}
time = test_rhashtable(&ht, objs, entries);
rhashtable_destroy(&ht);
if (time < 0) {
vfree(objs);
pr_warn("Test failed: return code %lld\n", time);
return -EINVAL;
}
total_time += time;
}
pr_info("test if its possible to exceed max_size %d: %s\n",
test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ?
"no, ok" : "YES, failed");
vfree(objs);
do_div(total_time, runs);
pr_info("Average test time: %llu\n", total_time);
test_insert_duplicates_run();
if (!tcount)
return 0;
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
atomic_set(&startup_count, tcount);
tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
if (!objs) {
vfree(tdata);
return -ENOMEM;
}
test_rht_params.max_size = max_size ? :
roundup_pow_of_two(tcount * entries);
err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
err);
vfree(tdata);
vfree(objs);
return -EINVAL;
}
for (i = 0; i < tcount; i++) {
tdata[i].id = i;
tdata[i].entries = entries;
tdata[i].objs = objs + i * entries;
tdata[i].task = kthread_run(threadfunc, &tdata[i],
"rhashtable_thrad[%d]", i);
if (IS_ERR(tdata[i].task)) {
pr_err(" kthread_run failed for thread %d\n", i);
atomic_dec(&startup_count);
} else {
started_threads++;
}
}
if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
pr_err(" wait_event interruptible failed\n");
/* count is 0 now, set it to -1 and wake up all threads together */
atomic_dec(&startup_count);
wake_up_all(&startup_wait);
for (i = 0; i < tcount; i++) {
if (IS_ERR(tdata[i].task))
continue;
if ((err = kthread_stop(tdata[i].task))) {
pr_warn("Test failed: thread %d returned: %d\n",
i, err);
failed_threads++;
}
}
rhashtable_destroy(&ht);
vfree(tdata);
vfree(objs);
/*
* rhltable_remove is very expensive, default values can cause test
* to run for 2 minutes or more, use a smaller number instead.
*/
err = test_rhltable(entries / 16);
pr_info("Started %d threads, %d failed, rhltable test returns %d\n",
started_threads, failed_threads, err);
return 0;
}
static void __exit test_rht_exit(void)
{
}
module_init(test_rht_init);
module_exit(test_rht_exit);
MODULE_LICENSE("GPL v2");
| linux-master | lib/test_rhashtable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/lib/string.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* This file should be used only for "library" routines that may have
* alternative implementations on specific architectures (generally
* found in <asm-xx/string.h>), or get overloaded by FORTIFY_SOURCE.
* (Specifically, this file is built with __NO_FORTIFY.)
*
* Other helper functions should live in string_helpers.c.
*/
#define __NO_FORTIFY
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
#include <asm/page.h>
#ifndef __HAVE_ARCH_STRNCASECMP
/**
* strncasecmp - Case insensitive, length-limited string comparison
* @s1: One string
* @s2: The other string
* @len: the maximum number of characters to compare
*/
int strncasecmp(const char *s1, const char *s2, size_t len)
{
/* Yes, Virginia, it had better be unsigned */
unsigned char c1, c2;
if (!len)
return 0;
do {
c1 = *s1++;
c2 = *s2++;
if (!c1 || !c2)
break;
if (c1 == c2)
continue;
c1 = tolower(c1);
c2 = tolower(c2);
if (c1 != c2)
break;
} while (--len);
return (int)c1 - (int)c2;
}
EXPORT_SYMBOL(strncasecmp);
#endif
#ifndef __HAVE_ARCH_STRCASECMP
int strcasecmp(const char *s1, const char *s2)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while (c1 == c2 && c1 != 0);
return c1 - c2;
}
EXPORT_SYMBOL(strcasecmp);
#endif
#ifndef __HAVE_ARCH_STRCPY
char *strcpy(char *dest, const char *src)
{
char *tmp = dest;
while ((*dest++ = *src++) != '\0')
/* nothing */;
return tmp;
}
EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
char *strncpy(char *dest, const char *src, size_t count)
{
char *tmp = dest;
while (count) {
if ((*tmp = *src) != 0)
src++;
tmp++;
count--;
}
return dest;
}
EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
if (size) {
size_t len = (ret >= size) ? size - 1 : ret;
__builtin_memcpy(dest, src, len);
dest[len] = '\0';
}
return ret;
}
EXPORT_SYMBOL(strlcpy);
#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
long res = 0;
if (count == 0 || WARN_ON_ONCE(count > INT_MAX))
return -E2BIG;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* If src is unaligned, don't cross a page boundary,
* since we don't know if the next page is mapped.
*/
if ((long)src & (sizeof(long) - 1)) {
size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
if (limit < max)
max = limit;
}
#else
/* If src or dest is unaligned, don't do word-at-a-time. */
if (((long) dest | (long) src) & (sizeof(long) - 1))
max = 0;
#endif
/*
* read_word_at_a_time() below may read uninitialized bytes after the
* trailing zero and use them in comparisons. Disable this optimization
* under KMSAN to prevent false positive reports.
*/
if (IS_ENABLED(CONFIG_KMSAN))
max = 0;
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
c = read_word_at_a_time(src+res);
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
*(unsigned long *)(dest+res) = c & zero_bytemask(data);
return res + find_zero(data);
}
*(unsigned long *)(dest+res) = c;
res += sizeof(unsigned long);
count -= sizeof(unsigned long);
max -= sizeof(unsigned long);
}
while (count) {
char c;
c = src[res];
dest[res] = c;
if (!c)
return res;
res++;
count--;
}
/* Hit buffer length without finding a NUL; force NUL-termination. */
if (res)
dest[res-1] = '\0';
return -E2BIG;
}
EXPORT_SYMBOL(strscpy);
#endif
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
* of dest, including src's %NUL-terminator. May overrun dest.
* @dest: pointer to end of string being copied into. Must be large enough
* to receive copy.
* @src: pointer to the beginning of string being copied from. Must not overlap
* dest.
*
* stpcpy differs from strcpy in a key way: the return value is a pointer
* to the new %NUL-terminating character in @dest. (For strcpy, the return
* value is a pointer to the start of @dest). This interface is considered
* unsafe as it doesn't perform bounds checking of the inputs. As such it's
* not recommended for usage. Instead, its definition is provided in case
* the compiler lowers other libcalls to stpcpy.
*/
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src);
char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
{
while ((*dest++ = *src++) != '\0')
/* nothing */;
return --dest;
}
EXPORT_SYMBOL(stpcpy);
#ifndef __HAVE_ARCH_STRCAT
char *strcat(char *dest, const char *src)
{
char *tmp = dest;
while (*dest)
dest++;
while ((*dest++ = *src++) != '\0')
;
return tmp;
}
EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
char *strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
if (count) {
while (*dest)
dest++;
while ((*dest++ = *src++) != 0) {
if (--count == 0) {
*dest = '\0';
break;
}
}
}
return tmp;
}
EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
size_t strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
size_t len = strlen(src);
size_t res = dsize + len;
/* This would be a bug */
BUG_ON(dsize >= count);
dest += dsize;
count -= dsize;
if (len >= count)
len = count-1;
__builtin_memcpy(dest, src, len);
dest[len] = 0;
return res;
}
EXPORT_SYMBOL(strlcat);
#endif
#ifndef __HAVE_ARCH_STRCMP
/**
* strcmp - Compare two strings
* @cs: One string
* @ct: Another string
*/
int strcmp(const char *cs, const char *ct)
{
unsigned char c1, c2;
while (1) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
}
return 0;
}
EXPORT_SYMBOL(strcmp);
#endif
#ifndef __HAVE_ARCH_STRNCMP
/**
* strncmp - Compare two length-limited strings
* @cs: One string
* @ct: Another string
* @count: The maximum number of bytes to compare
*/
int strncmp(const char *cs, const char *ct, size_t count)
{
unsigned char c1, c2;
while (count) {
c1 = *cs++;
c2 = *ct++;
if (c1 != c2)
return c1 < c2 ? -1 : 1;
if (!c1)
break;
count--;
}
return 0;
}
EXPORT_SYMBOL(strncmp);
#endif
#ifndef __HAVE_ARCH_STRCHR
/**
* strchr - Find the first occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
*
* Note that the %NUL-terminator is considered part of the string, and can
* be searched for.
*/
char *strchr(const char *s, int c)
{
for (; *s != (char)c; ++s)
if (*s == '\0')
return NULL;
return (char *)s;
}
EXPORT_SYMBOL(strchr);
#endif
#ifndef __HAVE_ARCH_STRCHRNUL
/**
* strchrnul - Find and return a character in a string, or end of string
* @s: The string to be searched
* @c: The character to search for
*
* Returns pointer to first occurrence of 'c' in s. If c is not found, then
* return a pointer to the null byte at the end of s.
*/
char *strchrnul(const char *s, int c)
{
while (*s && *s != (char)c)
s++;
return (char *)s;
}
EXPORT_SYMBOL(strchrnul);
#endif
/**
* strnchrnul - Find and return a character in a length limited string,
* or end of string
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
*
* Returns pointer to the first occurrence of 'c' in s. If c is not found,
* then return a pointer to the last character of the string.
*/
char *strnchrnul(const char *s, size_t count, int c)
{
while (count-- && *s && *s != (char)c)
s++;
return (char *)s;
}
#ifndef __HAVE_ARCH_STRRCHR
/**
* strrchr - Find the last occurrence of a character in a string
* @s: The string to be searched
* @c: The character to search for
*/
char *strrchr(const char *s, int c)
{
const char *last = NULL;
do {
if (*s == (char)c)
last = s;
} while (*s++);
return (char *)last;
}
EXPORT_SYMBOL(strrchr);
#endif
#ifndef __HAVE_ARCH_STRNCHR
/**
* strnchr - Find a character in a length limited string
* @s: The string to be searched
* @count: The number of characters to be searched
* @c: The character to search for
*
* Note that the %NUL-terminator is considered part of the string, and can
* be searched for.
*/
char *strnchr(const char *s, size_t count, int c)
{
while (count--) {
if (*s == (char)c)
return (char *)s;
if (*s++ == '\0')
break;
}
return NULL;
}
EXPORT_SYMBOL(strnchr);
#endif
#ifndef __HAVE_ARCH_STRLEN
size_t strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
size_t strnlen(const char *s, size_t count)
{
const char *sc;
for (sc = s; count-- && *sc != '\0'; ++sc)
/* nothing */;
return sc - s;
}
EXPORT_SYMBOL(strnlen);
#endif
#ifndef __HAVE_ARCH_STRSPN
/**
* strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
* @s: The string to be searched
* @accept: The string to search for
*/
size_t strspn(const char *s, const char *accept)
{
const char *p;
for (p = s; *p != '\0'; ++p) {
if (!strchr(accept, *p))
break;
}
return p - s;
}
EXPORT_SYMBOL(strspn);
#endif
#ifndef __HAVE_ARCH_STRCSPN
/**
* strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
* @s: The string to be searched
* @reject: The string to avoid
*/
size_t strcspn(const char *s, const char *reject)
{
const char *p;
for (p = s; *p != '\0'; ++p) {
if (strchr(reject, *p))
break;
}
return p - s;
}
EXPORT_SYMBOL(strcspn);
#endif
#ifndef __HAVE_ARCH_STRPBRK
/**
* strpbrk - Find the first occurrence of a set of characters
* @cs: The string to be searched
* @ct: The characters to search for
*/
char *strpbrk(const char *cs, const char *ct)
{
const char *sc;
for (sc = cs; *sc != '\0'; ++sc) {
if (strchr(ct, *sc))
return (char *)sc;
}
return NULL;
}
EXPORT_SYMBOL(strpbrk);
#endif
#ifndef __HAVE_ARCH_STRSEP
/**
* strsep - Split a string into tokens
* @s: The string to be searched
* @ct: The characters to search for
*
* strsep() updates @s to point after the token, ready for the next call.
*
* It returns empty tokens, too, behaving exactly like the libc function
* of that name. In fact, it was stolen from glibc2 and de-fancy-fied.
* Same semantics, slimmer shape. ;)
*/
char *strsep(char **s, const char *ct)
{
char *sbegin = *s;
char *end;
if (sbegin == NULL)
return NULL;
end = strpbrk(sbegin, ct);
if (end)
*end++ = '\0';
*s = end;
return sbegin;
}
EXPORT_SYMBOL(strsep);
#endif
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value
* @s: Pointer to the start of the area.
* @c: The byte to fill the area with
* @count: The size of the area.
*
* Do not use memset() to access IO space, use memset_io() instead.
*/
void *memset(void *s, int c, size_t count)
{
char *xs = s;
while (count--)
*xs++ = c;
return s;
}
EXPORT_SYMBOL(memset);
#endif
#ifndef __HAVE_ARCH_MEMSET16
/**
* memset16() - Fill a memory area with a uint16_t
* @s: Pointer to the start of the area.
* @v: The value to fill the area with
* @count: The number of values to store
*
* Differs from memset() in that it fills with a uint16_t instead
* of a byte. Remember that @count is the number of uint16_ts to
* store, not the number of bytes.
*/
void *memset16(uint16_t *s, uint16_t v, size_t count)
{
uint16_t *xs = s;
while (count--)
*xs++ = v;
return s;
}
EXPORT_SYMBOL(memset16);
#endif
#ifndef __HAVE_ARCH_MEMSET32
/**
* memset32() - Fill a memory area with a uint32_t
* @s: Pointer to the start of the area.
* @v: The value to fill the area with
* @count: The number of values to store
*
* Differs from memset() in that it fills with a uint32_t instead
* of a byte. Remember that @count is the number of uint32_ts to
* store, not the number of bytes.
*/
void *memset32(uint32_t *s, uint32_t v, size_t count)
{
uint32_t *xs = s;
while (count--)
*xs++ = v;
return s;
}
EXPORT_SYMBOL(memset32);
#endif
#ifndef __HAVE_ARCH_MEMSET64
/**
* memset64() - Fill a memory area with a uint64_t
* @s: Pointer to the start of the area.
* @v: The value to fill the area with
* @count: The number of values to store
*
* Differs from memset() in that it fills with a uint64_t instead
* of a byte. Remember that @count is the number of uint64_ts to
* store, not the number of bytes.
*/
void *memset64(uint64_t *s, uint64_t v, size_t count)
{
uint64_t *xs = s;
while (count--)
*xs++ = v;
return s;
}
EXPORT_SYMBOL(memset64);
#endif
#ifndef __HAVE_ARCH_MEMCPY
/**
* memcpy - Copy one area of memory to another
* @dest: Where to copy to
* @src: Where to copy from
* @count: The size of the area.
*
* You should not use this function to access IO space, use memcpy_toio()
* or memcpy_fromio() instead.
*/
void *memcpy(void *dest, const void *src, size_t count)
{
char *tmp = dest;
const char *s = src;
while (count--)
*tmp++ = *s++;
return dest;
}
EXPORT_SYMBOL(memcpy);
#endif
#ifndef __HAVE_ARCH_MEMMOVE
/**
* memmove - Copy one area of memory to another
* @dest: Where to copy to
* @src: Where to copy from
* @count: The size of the area.
*
* Unlike memcpy(), memmove() copes with overlapping areas.
*/
void *memmove(void *dest, const void *src, size_t count)
{
char *tmp;
const char *s;
if (dest <= src) {
tmp = dest;
s = src;
while (count--)
*tmp++ = *s++;
} else {
tmp = dest;
tmp += count;
s = src;
s += count;
while (count--)
*--tmp = *--s;
}
return dest;
}
EXPORT_SYMBOL(memmove);
#endif
#ifndef __HAVE_ARCH_MEMCMP
/**
* memcmp - Compare two areas of memory
* @cs: One area of memory
* @ct: Another area of memory
* @count: The size of the area.
*/
#undef memcmp
__visible int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (count >= sizeof(unsigned long)) {
const unsigned long *u1 = cs;
const unsigned long *u2 = ct;
do {
if (get_unaligned(u1) != get_unaligned(u2))
break;
u1++;
u2++;
count -= sizeof(unsigned long);
} while (count >= sizeof(unsigned long));
cs = u1;
ct = u2;
}
#endif
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
return res;
}
EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_BCMP
/**
* bcmp - returns 0 if and only if the buffers have identical contents.
* @a: pointer to first buffer.
* @b: pointer to second buffer.
* @len: size of buffers.
*
* The sign or magnitude of a non-zero return value has no particular
* meaning, and architectures may implement their own more efficient bcmp(). So
* while this particular implementation is a simple (tail) call to memcmp, do
* not rely on anything but whether the return value is zero or non-zero.
*/
int bcmp(const void *a, const void *b, size_t len)
{
return memcmp(a, b, len);
}
EXPORT_SYMBOL(bcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
* @addr: The memory area
* @c: The byte to search for
* @size: The size of the area.
*
* returns the address of the first occurrence of @c, or 1 byte past
* the area if @c is not found
*/
void *memscan(void *addr, int c, size_t size)
{
unsigned char *p = addr;
while (size) {
if (*p == (unsigned char)c)
return (void *)p;
p++;
size--;
}
return (void *)p;
}
EXPORT_SYMBOL(memscan);
#endif
#ifndef __HAVE_ARCH_STRSTR
/**
* strstr - Find the first substring in a %NUL terminated string
* @s1: The string to be searched
* @s2: The string to search for
*/
char *strstr(const char *s1, const char *s2)
{
size_t l1, l2;
l2 = strlen(s2);
if (!l2)
return (char *)s1;
l1 = strlen(s1);
while (l1 >= l2) {
l1--;
if (!memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
EXPORT_SYMBOL(strstr);
#endif
#ifndef __HAVE_ARCH_STRNSTR
/**
* strnstr - Find the first substring in a length-limited string
* @s1: The string to be searched
* @s2: The string to search for
* @len: the maximum number of characters to search
*/
char *strnstr(const char *s1, const char *s2, size_t len)
{
size_t l2;
l2 = strlen(s2);
if (!l2)
return (char *)s1;
while (len >= l2) {
len--;
if (!memcmp(s1, s2, l2))
return (char *)s1;
s1++;
}
return NULL;
}
EXPORT_SYMBOL(strnstr);
#endif
#ifndef __HAVE_ARCH_MEMCHR
/**
* memchr - Find a character in an area of memory.
* @s: The memory area
* @c: The byte to search for
* @n: The size of the area.
*
* returns the address of the first occurrence of @c, or %NULL
* if @c is not found
*/
void *memchr(const void *s, int c, size_t n)
{
const unsigned char *p = s;
while (n-- != 0) {
if ((unsigned char)c == *p++) {
return (void *)(p - 1);
}
}
return NULL;
}
EXPORT_SYMBOL(memchr);
#endif
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
if (*start != value)
return (void *)start;
start++;
bytes--;
}
return NULL;
}
/**
* memchr_inv - Find an unmatching character in an area of memory.
* @start: The memory area
* @c: Find a character other than c
* @bytes: The size of the area.
*
* returns the address of the first character other than @c, or %NULL
* if the whole buffer contains just @c.
*/
void *memchr_inv(const void *start, int c, size_t bytes)
{
u8 value = c;
u64 value64;
unsigned int words, prefix;
if (bytes <= 16)
return check_bytes8(start, value, bytes);
value64 = value;
#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101ULL;
#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
value64 |= value64 << 8;
value64 |= value64 << 16;
value64 |= value64 << 32;
#endif
prefix = (unsigned long)start % 8;
if (prefix) {
u8 *r;
prefix = 8 - prefix;
r = check_bytes8(start, value, prefix);
if (r)
return r;
start += prefix;
bytes -= prefix;
}
words = bytes / 8;
while (words) {
if (*(u64 *)start != value64)
return check_bytes8(start, value, 8);
start += 8;
words--;
}
return check_bytes8(start, value, bytes % 8);
}
EXPORT_SYMBOL(memchr_inv);
| linux-master | lib/string.c |
// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU helper functions for the free area management
*/
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift, unsigned long boundary_size,
unsigned long align_mask)
{
unsigned long index;
/* We don't want the last of the limit */
size -= 1;
again:
index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
if (index < size) {
if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
start = ALIGN(shift + index, boundary_size) - shift;
goto again;
}
bitmap_set(map, index, nr);
return index;
}
return -1;
}
| linux-master | lib/iommu-helper.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
/* out-of-line parts */
#ifndef INLINE_COPY_FROM_USER
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
}
EXPORT_SYMBOL(_copy_from_user);
#endif
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (should_fail_usercopy())
return n;
if (likely(access_ok(to, n))) {
instrument_copy_to_user(to, from, n);
n = raw_copy_to_user(to, from, n);
}
return n;
}
EXPORT_SYMBOL(_copy_to_user);
#endif
/**
* check_zeroed_user: check if a userspace buffer only contains zero bytes
* @from: Source address, in userspace.
* @size: Size of buffer.
*
* This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
* userspace addresses (and is more efficient because we don't care where the
* first non-zero byte is).
*
* Returns:
* * 0: There were non-zero bytes present in the buffer.
* * 1: The buffer was full of zero bytes.
* * -EFAULT: access to userspace failed.
*/
int check_zeroed_user(const void __user *from, size_t size)
{
unsigned long val;
uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
if (unlikely(size == 0))
return 1;
from -= align;
size += align;
if (!user_read_access_begin(from, size))
return -EFAULT;
unsafe_get_user(val, (unsigned long __user *) from, err_fault);
if (align)
val &= ~aligned_byte_mask(align);
while (size > sizeof(unsigned long)) {
if (unlikely(val))
goto done;
from += sizeof(unsigned long);
size -= sizeof(unsigned long);
unsafe_get_user(val, (unsigned long __user *) from, err_fault);
}
if (size < sizeof(unsigned long))
val &= aligned_byte_mask(size);
done:
user_read_access_end();
return (val == 0);
err_fault:
user_read_access_end();
return -EFAULT;
}
EXPORT_SYMBOL(check_zeroed_user);
| linux-master | lib/usercopy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/hexdump.c
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/minmax.h>
#include <linux/export.h>
#include <asm/unaligned.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
const char hex_asc_upper[] = "0123456789ABCDEF";
EXPORT_SYMBOL(hex_asc_upper);
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*
* This function is used to load cryptographic keys, so it is coded in such a
* way that there are no conditions or memory accesses that depend on data.
*
* Explanation of the logic:
* (ch - '9' - 1) is negative if ch <= '9'
* ('0' - 1 - ch) is negative if ch >= '0'
* we "and" these two values, so the result is negative if ch is in the range
* '0' ... '9'
* we are only interested in the sign, so we do a shift ">> 8"; note that right
* shift of a negative value is implementation-defined, so we cast the
* value to (unsigned) before the shift --- we have 0xffffff if ch is in
* the range '0' ... '9', 0 otherwise
* we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
* in the range '0' ... '9', 0 otherwise
* we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
* ... '9', -1 otherwise
* the next line is similar to the previous one, but we need to decode both
* uppercase and lowercase letters, so we use (ch & 0xdf), which converts
* lowercase to uppercase
*/
int hex_to_bin(unsigned char ch)
{
unsigned char cu = ch & 0xdf;
return -1 +
((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
}
EXPORT_SYMBOL(hex_to_bin);
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -EINVAL in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi, lo;
hi = hex_to_bin(*src++);
if (unlikely(hi < 0))
return -EINVAL;
lo = hex_to_bin(*src++);
if (unlikely(lo < 0))
return -EINVAL;
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
/**
* bin2hex - convert binary data to an ascii hexadecimal string
* @dst: ascii hexadecimal result
* @src: binary data
* @count: binary data length
*/
char *bin2hex(char *dst, const void *src, size_t count)
{
const unsigned char *_src = src;
while (count--)
dst = hex_byte_pack(dst, *_src++);
return dst;
}
EXPORT_SYMBOL(bin2hex);
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*
* Return:
* The amount of bytes placed in the buffer without terminating NUL. If the
* output was truncated, then the return value is the number of bytes
* (excluding the terminating NUL) which would have been written to the final
* string if enough space had been available.
*/
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii)
{
const u8 *ptr = buf;
int ngroups;
u8 ch;
int j, lx = 0;
int ascii_column;
int ret;
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if (!is_power_of_2(groupsize) || groupsize > 8)
groupsize = 1;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
ngroups = len / groupsize;
ascii_column = rowsize * 2 + rowsize / groupsize + 1;
if (!linebuflen)
goto overflow1;
if (!len)
goto nil;
if (groupsize == 8) {
const u64 *ptr8 = buf;
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
get_unaligned(ptr8 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else if (groupsize == 4) {
const u32 *ptr4 = buf;
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "",
get_unaligned(ptr4 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else if (groupsize == 2) {
const u16 *ptr2 = buf;
for (j = 0; j < ngroups; j++) {
ret = snprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "",
get_unaligned(ptr2 + j));
if (ret >= linebuflen - lx)
goto overflow1;
lx += ret;
}
} else {
for (j = 0; j < len; j++) {
if (linebuflen < lx + 2)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
if (linebuflen < lx + 2)
goto overflow2;
linebuf[lx++] = hex_asc_lo(ch);
if (linebuflen < lx + 2)
goto overflow2;
linebuf[lx++] = ' ';
}
if (j)
lx--;
}
if (!ascii)
goto nil;
while (lx < ascii_column) {
if (linebuflen < lx + 2)
goto overflow2;
linebuf[lx++] = ' ';
}
for (j = 0; j < len; j++) {
if (linebuflen < lx + 2)
goto overflow2;
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx] = '\0';
return lx;
overflow2:
linebuf[lx++] = '\0';
overflow1:
return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
}
EXPORT_SYMBOL(hex_dump_to_buffer);
#ifdef CONFIG_PRINTK
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
EXPORT_SYMBOL(print_hex_dump);
#endif /* defined(CONFIG_PRINTK) */
| linux-master | lib/hexdump.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dhrystone benchmark test module
*
* Copyright (C) 2022 Glider bv
*/
#include "dhry.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/smp.h>
#define DHRY_VAX 1757
static int dhry_run_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops run_ops = {
.flags = KERNEL_PARAM_OPS_FL_NOARG,
.set = dhry_run_set,
};
static bool dhry_run;
module_param_cb(run, &run_ops, &dhry_run, 0200);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int iterations = -1;
module_param(iterations, int, 0644);
MODULE_PARM_DESC(iterations,
"Number of iterations through the benchmark (default: auto)");
static void dhry_benchmark(void)
{
unsigned int cpu = get_cpu();
int i, n;
if (iterations > 0) {
n = dhry(iterations);
goto report;
}
for (i = DHRY_VAX; i > 0; i <<= 1) {
n = dhry(i);
if (n != -EAGAIN)
break;
}
report:
put_cpu();
if (n >= 0)
pr_info("CPU%u: Dhrystones per Second: %d (%d DMIPS)\n", cpu,
n, n / DHRY_VAX);
else if (n == -EAGAIN)
pr_err("Please increase the number of iterations\n");
else
pr_err("Dhrystone benchmark failed error %pe\n", ERR_PTR(n));
}
static int dhry_run_set(const char *val, const struct kernel_param *kp)
{
int ret;
if (val) {
ret = param_set_bool(val, kp);
if (ret)
return ret;
} else {
dhry_run = true;
}
if (dhry_run && system_state == SYSTEM_RUNNING)
dhry_benchmark();
return 0;
}
static int __init dhry_init(void)
{
if (dhry_run)
dhry_benchmark();
return 0;
}
module_init(dhry_init);
MODULE_AUTHOR("Geert Uytterhoeven <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/dhry_run.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
* Copyright (c) 2017-2018 Microsoft Corporation
* Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <[email protected]>
*/
#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/xarray.h>
#include "radix-tree.h"
/*
* Coding conventions in this file:
*
* @xa is used to refer to the entire xarray.
* @xas is the 'xarray operation state'. It may be either a pointer to
* an xa_state, or an xa_state stored on the stack. This is an unfortunate
* ambiguity.
* @index is the index of the entry being operated on
* @mark is an xa_mark_t; a small number indicating one of the mark bits.
* @node refers to an xa_node; usually the primary one being operated on by
* this function.
* @offset is the index into the slots array inside an xa_node.
* @parent refers to the @xa_node closer to the head than @node.
* @entry refers to something stored in a slot in the xarray
*/
static inline unsigned int xa_lock_type(const struct xarray *xa)
{
return (__force unsigned int)xa->xa_flags & 3;
}
static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
{
if (lock_type == XA_LOCK_IRQ)
xas_lock_irq(xas);
else if (lock_type == XA_LOCK_BH)
xas_lock_bh(xas);
else
xas_lock(xas);
}
static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
{
if (lock_type == XA_LOCK_IRQ)
xas_unlock_irq(xas);
else if (lock_type == XA_LOCK_BH)
xas_unlock_bh(xas);
else
xas_unlock(xas);
}
static inline bool xa_track_free(const struct xarray *xa)
{
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
}
static inline bool xa_zero_busy(const struct xarray *xa)
{
return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
}
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
{
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
xa->xa_flags |= XA_FLAGS_MARK(mark);
}
static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
{
if (xa->xa_flags & XA_FLAGS_MARK(mark))
xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
}
static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
{
return node->marks[(__force unsigned)mark];
}
static inline bool node_get_mark(struct xa_node *node,
unsigned int offset, xa_mark_t mark)
{
return test_bit(offset, node_marks(node, mark));
}
/* returns true if the bit was set */
static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
xa_mark_t mark)
{
return __test_and_set_bit(offset, node_marks(node, mark));
}
/* returns true if the bit was set */
static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
xa_mark_t mark)
{
return __test_and_clear_bit(offset, node_marks(node, mark));
}
static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
{
return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
}
static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
{
bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
}
#define mark_inc(mark) do { \
mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
} while (0)
/*
* xas_squash_marks() - Merge all marks to the first entry
* @xas: Array operation state.
*
* Set a mark on the first entry if any entry has it set. Clear marks on
* all sibling entries.
*/
static void xas_squash_marks(const struct xa_state *xas)
{
unsigned int mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
if (!xas->xa_sibs)
return;
do {
unsigned long *marks = xas->xa_node->marks[mark];
if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
continue;
__set_bit(xas->xa_offset, marks);
bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
} while (mark++ != (__force unsigned)XA_MARK_MAX);
}
/* extracts the offset within this node from the index */
static unsigned int get_offset(unsigned long index, struct xa_node *node)
{
return (index >> node->shift) & XA_CHUNK_MASK;
}
static void xas_set_offset(struct xa_state *xas)
{
xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
}
/* move the index either forwards (find) or backwards (sibling slot) */
static void xas_move_index(struct xa_state *xas, unsigned long offset)
{
unsigned int shift = xas->xa_node->shift;
xas->xa_index &= ~XA_CHUNK_MASK << shift;
xas->xa_index += offset << shift;
}
static void xas_next_offset(struct xa_state *xas)
{
xas->xa_offset++;
xas_move_index(xas, xas->xa_offset);
}
static void *set_bounds(struct xa_state *xas)
{
xas->xa_node = XAS_BOUNDS;
return NULL;
}
/*
* Starts a walk. If the @xas is already valid, we assume that it's on
* the right path and just return where we've got to. If we're in an
* error state, return NULL. If the index is outside the current scope
* of the xarray, return NULL without changing @xas->xa_node. Otherwise
* set @xas->xa_node to NULL and return the current head of the array.
*/
static void *xas_start(struct xa_state *xas)
{
void *entry;
if (xas_valid(xas))
return xas_reload(xas);
if (xas_error(xas))
return NULL;
entry = xa_head(xas->xa);
if (!xa_is_node(entry)) {
if (xas->xa_index)
return set_bounds(xas);
} else {
if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
return set_bounds(xas);
}
xas->xa_node = NULL;
return entry;
}
static void *xas_descend(struct xa_state *xas, struct xa_node *node)
{
unsigned int offset = get_offset(xas->xa_index, node);
void *entry = xa_entry(xas->xa, node, offset);
xas->xa_node = node;
while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
if (node->shift && xa_is_node(entry))
entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;
return entry;
}
/**
* xas_load() - Load an entry from the XArray (advanced).
* @xas: XArray operation state.
*
* Usually walks the @xas to the appropriate state to load the entry
* stored at xa_index. However, it will do nothing and return %NULL if
* @xas is in an error state. xas_load() will never expand the tree.
*
* If the xa_state is set up to operate on a multi-index entry, xas_load()
* may return %NULL or an internal entry, even if there are entries
* present within the range specified by @xas.
*
* Context: Any context. The caller should hold the xa_lock or the RCU lock.
* Return: Usually an entry in the XArray, but see description for exceptions.
*/
void *xas_load(struct xa_state *xas)
{
void *entry = xas_start(xas);
while (xa_is_node(entry)) {
struct xa_node *node = xa_to_node(entry);
if (xas->xa_shift > node->shift)
break;
entry = xas_descend(xas, node);
if (node->shift == 0)
break;
}
return entry;
}
EXPORT_SYMBOL_GPL(xas_load);
#define XA_RCU_FREE ((struct xarray *)1)
static void xa_node_free(struct xa_node *node)
{
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->array = XA_RCU_FREE;
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
/*
* xas_destroy() - Free any resources allocated during the XArray operation.
* @xas: XArray operation state.
*
* Most users will not need to call this function; it is called for you
* by xas_nomem().
*/
void xas_destroy(struct xa_state *xas)
{
struct xa_node *next, *node = xas->xa_alloc;
while (node) {
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
next = rcu_dereference_raw(node->parent);
radix_tree_node_rcu_free(&node->rcu_head);
xas->xa_alloc = node = next;
}
}
/**
* xas_nomem() - Allocate memory if needed.
* @xas: XArray operation state.
* @gfp: Memory allocation flags.
*
* If we need to add new nodes to the XArray, we try to allocate memory
* with GFP_NOWAIT while holding the lock, which will usually succeed.
* If it fails, @xas is flagged as needing memory to continue. The caller
* should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
* the caller should retry the operation.
*
* Forward progress is guaranteed as one node is allocated here and
* stored in the xa_state where it will be found by xas_alloc(). More
* nodes will likely be found in the slab allocator, but we do not tie
* them up here.
*
* Return: true if memory was needed, and was successfully allocated.
*/
bool xas_nomem(struct xa_state *xas, gfp_t gfp)
{
if (xas->xa_node != XA_ERROR(-ENOMEM)) {
xas_destroy(xas);
return false;
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!xas->xa_alloc)
return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
}
EXPORT_SYMBOL_GPL(xas_nomem);
/*
* __xas_nomem() - Drop locks and allocate memory if needed.
* @xas: XArray operation state.
* @gfp: Memory allocation flags.
*
* Internal variant of xas_nomem().
*
* Return: true if memory was needed, and was successfully allocated.
*/
static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
__must_hold(xas->xa->xa_lock)
{
unsigned int lock_type = xa_lock_type(xas->xa);
if (xas->xa_node != XA_ERROR(-ENOMEM)) {
xas_destroy(xas);
return false;
}
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
xas_lock_type(xas, lock_type);
} else {
xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
}
if (!xas->xa_alloc)
return false;
xas->xa_alloc->parent = NULL;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
xas->xa_node = XAS_RESTART;
return true;
}
static void xas_update(struct xa_state *xas, struct xa_node *node)
{
if (xas->xa_update)
xas->xa_update(node);
else
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
}
static void *xas_alloc(struct xa_state *xas, unsigned int shift)
{
struct xa_node *parent = xas->xa_node;
struct xa_node *node = xas->xa_alloc;
if (xas_invalid(xas))
return NULL;
if (node) {
xas->xa_alloc = NULL;
} else {
gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
gfp |= __GFP_ACCOUNT;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node) {
xas_set_err(xas, -ENOMEM);
return NULL;
}
}
if (parent) {
node->offset = xas->xa_offset;
parent->count++;
XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
xas_update(xas, parent);
}
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->shift = shift;
node->count = 0;
node->nr_values = 0;
RCU_INIT_POINTER(node->parent, xas->xa_node);
node->array = xas->xa;
return node;
}
#ifdef CONFIG_XARRAY_MULTI
/* Returns the number of indices covered by a given xa_state */
static unsigned long xas_size(const struct xa_state *xas)
{
return (xas->xa_sibs + 1UL) << xas->xa_shift;
}
#endif
/*
* Use this to calculate the maximum index that will need to be created
* in order to add the entry described by @xas. Because we cannot store a
* multi-index entry at index 0, the calculation is a little more complex
* than you might expect.
*/
static unsigned long xas_max(struct xa_state *xas)
{
unsigned long max = xas->xa_index;
#ifdef CONFIG_XARRAY_MULTI
if (xas->xa_shift || xas->xa_sibs) {
unsigned long mask = xas_size(xas) - 1;
max |= mask;
if (mask == max)
max++;
}
#endif
return max;
}
/* The maximum index that can be contained in the array without expanding it */
static unsigned long max_index(void *entry)
{
if (!xa_is_node(entry))
return 0;
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
}
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
struct xa_node *node = xas->xa_node;
for (;;) {
void *entry;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
if (node->count != 1)
break;
entry = xa_entry_locked(xa, node, 0);
if (!entry)
break;
if (!xa_is_node(entry) && node->shift)
break;
if (xa_is_zero(entry) && xa_zero_busy(xa))
entry = NULL;
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
xa_mark_clear(xa, XA_FREE_MARK);
node->count = 0;
node->nr_values = 0;
if (!xa_is_node(entry))
RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
xas_update(xas, node);
xa_node_free(node);
if (!xa_is_node(entry))
break;
node = xa_to_node(entry);
node->parent = NULL;
}
}
/*
* xas_delete_node() - Attempt to delete an xa_node
* @xas: Array operation state.
*
* Attempts to delete the @xas->xa_node. This will fail if xa->node has
* a non-zero reference count.
*/
static void xas_delete_node(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
for (;;) {
struct xa_node *parent;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
if (node->count)
break;
parent = xa_parent_locked(xas->xa, node);
xas->xa_node = parent;
xas->xa_offset = node->offset;
xa_node_free(node);
if (!parent) {
xas->xa->xa_head = NULL;
xas->xa_node = XAS_BOUNDS;
return;
}
parent->slots[xas->xa_offset] = NULL;
parent->count--;
XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
node = parent;
xas_update(xas, node);
}
if (!node->parent)
xas_shrink(xas);
}
/**
* xas_free_nodes() - Free this node and all nodes that it references
* @xas: Array operation state.
* @top: Node to free
*
* This node has been removed from the tree. We must now free it and all
* of its subnodes. There may be RCU walkers with references into the tree,
* so we must replace all entries with retry markers.
*/
static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
{
unsigned int offset = 0;
struct xa_node *node = top;
for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset);
if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry);
offset = 0;
continue;
}
if (entry)
RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
offset++;
while (offset == XA_CHUNK_SIZE) {
struct xa_node *parent;
parent = xa_parent_locked(xas->xa, node);
offset = node->offset + 1;
node->count = 0;
node->nr_values = 0;
xas_update(xas, node);
xa_node_free(node);
if (node == top)
return;
node = parent;
}
}
}
/*
* xas_expand adds nodes to the head of the tree until it has reached
* sufficient height to be able to contain @xas->xa_index
*/
static int xas_expand(struct xa_state *xas, void *head)
{
struct xarray *xa = xas->xa;
struct xa_node *node = NULL;
unsigned int shift = 0;
unsigned long max = xas_max(xas);
if (!head) {
if (max == 0)
return 0;
while ((max >> shift) >= XA_CHUNK_SIZE)
shift += XA_CHUNK_SHIFT;
return shift + XA_CHUNK_SHIFT;
} else if (xa_is_node(head)) {
node = xa_to_node(head);
shift = node->shift + XA_CHUNK_SHIFT;
}
xas->xa_node = NULL;
while (max > max_index(head)) {
xa_mark_t mark = 0;
XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
node = xas_alloc(xas, shift);
if (!node)
return -ENOMEM;
node->count = 1;
if (xa_is_value(head))
node->nr_values = 1;
RCU_INIT_POINTER(node->slots[0], head);
/* Propagate the aggregated mark info to the new child */
for (;;) {
if (xa_track_free(xa) && mark == XA_FREE_MARK) {
node_mark_all(node, XA_FREE_MARK);
if (!xa_marked(xa, XA_FREE_MARK)) {
node_clear_mark(node, 0, XA_FREE_MARK);
xa_mark_set(xa, XA_FREE_MARK);
}
} else if (xa_marked(xa, mark)) {
node_set_mark(node, 0, mark);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
/*
* Now that the new node is fully initialised, we can add
* it to the tree
*/
if (xa_is_node(head)) {
xa_to_node(head)->offset = 0;
rcu_assign_pointer(xa_to_node(head)->parent, node);
}
head = xa_mk_node(node);
rcu_assign_pointer(xa->xa_head, head);
xas_update(xas, node);
shift += XA_CHUNK_SHIFT;
}
xas->xa_node = node;
return shift;
}
/*
* xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state.
* @allow_root: %true if we can store the entry in the root directly
*
* Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations
* (see the xa_cmpxchg() implementation for an example).
*
* Return: If the slot already existed, returns the contents of this slot.
* If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas.
*/
static void *xas_create(struct xa_state *xas, bool allow_root)
{
struct xarray *xa = xas->xa;
void *entry;
void __rcu **slot;
struct xa_node *node = xas->xa_node;
int shift;
unsigned int order = xas->xa_shift;
if (xas_top(node)) {
entry = xa_head_locked(xa);
xas->xa_node = NULL;
if (!entry && xa_zero_busy(xa))
entry = XA_ZERO_ENTRY;
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
if (!shift && !allow_root)
shift = XA_CHUNK_SHIFT;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
} else if (xas_error(xas)) {
return NULL;
} else if (node) {
unsigned int offset = xas->xa_offset;
shift = node->shift;
entry = xa_entry_locked(xa, node, offset);
slot = &node->slots[offset];
} else {
shift = 0;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
}
while (shift > order) {
shift -= XA_CHUNK_SHIFT;
if (!entry) {
node = xas_alloc(xas, shift);
if (!node)
break;
if (xa_track_free(xa))
node_mark_all(node, XA_FREE_MARK);
rcu_assign_pointer(*slot, xa_mk_node(node));
} else if (xa_is_node(entry)) {
node = xa_to_node(entry);
} else {
break;
}
entry = xas_descend(xas, node);
slot = &node->slots[xas->xa_offset];
}
return entry;
}
/**
* xas_create_range() - Ensure that stores to this range will succeed
* @xas: XArray operation state.
*
* Creates all of the slots in the range covered by @xas. Sets @xas to
* create single-index entries and positions it at the beginning of the
* range. This is for the benefit of users which have not yet been
* converted to use multi-index entries.
*/
void xas_create_range(struct xa_state *xas)
{
unsigned long index = xas->xa_index;
unsigned char shift = xas->xa_shift;
unsigned char sibs = xas->xa_sibs;
xas->xa_index |= ((sibs + 1UL) << shift) - 1;
if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
xas->xa_offset |= sibs;
xas->xa_shift = 0;
xas->xa_sibs = 0;
for (;;) {
xas_create(xas, true);
if (xas_error(xas))
goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK))
goto success;
xas->xa_index -= XA_CHUNK_SIZE;
for (;;) {
struct xa_node *node = xas->xa_node;
if (node->shift >= shift)
break;
xas->xa_node = xa_parent_locked(xas->xa, node);
xas->xa_offset = node->offset - 1;
if (node->offset != 0)
break;
}
}
restore:
xas->xa_shift = shift;
xas->xa_sibs = sibs;
xas->xa_index = index;
return;
success:
xas->xa_index = index;
if (xas->xa_node)
xas_set_offset(xas);
}
EXPORT_SYMBOL_GPL(xas_create_range);
static void update_node(struct xa_state *xas, struct xa_node *node,
int count, int values)
{
if (!node || (!count && !values))
return;
node->count += count;
node->nr_values += values;
XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
xas_update(xas, node);
if (count < 0)
xas_delete_node(xas);
}
/**
* xas_store() - Store this entry in the XArray.
* @xas: XArray operation state.
* @entry: New entry.
*
* If @xas is operating on a multi-index entry, the entry returned by this
* function is essentially meaningless (it may be an internal entry or it
* may be %NULL, even if there are non-NULL entries at some of the indices
* covered by the range). This is not a problem for any current users,
* and can be changed if needed.
*
* Return: The old entry at this index.
*/
void *xas_store(struct xa_state *xas, void *entry)
{
struct xa_node *node;
void __rcu **slot = &xas->xa->xa_head;
unsigned int offset, max;
int count = 0;
int values = 0;
void *first, *next;
bool value = xa_is_value(entry);
if (entry) {
bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
first = xas_create(xas, allow_root);
} else {
first = xas_load(xas);
}
if (xas_invalid(xas))
return first;
node = xas->xa_node;
if (node && (xas->xa_shift < node->shift))
xas->xa_sibs = 0;
if ((first == entry) && !xas->xa_sibs)
return first;
next = first;
offset = xas->xa_offset;
max = xas->xa_offset + xas->xa_sibs;
if (node) {
slot = &node->slots[offset];
if (xas->xa_sibs)
xas_squash_marks(xas);
}
if (!entry)
xas_init_marks(xas);
for (;;) {
/*
* Must clear the marks before setting the entry to NULL,
* otherwise xas_for_each_marked may find a NULL entry and
* stop early. rcu_assign_pointer contains a release barrier
* so the mark clearing will appear to happen before the
* entry is set to NULL.
*/
rcu_assign_pointer(*slot, entry);
if (xa_is_node(next) && (!node || node->shift))
xas_free_nodes(xas, xa_to_node(next));
if (!node)
break;
count += !next - !entry;
values += !xa_is_value(first) - !value;
if (entry) {
if (offset == max)
break;
if (!xa_is_sibling(entry))
entry = xa_mk_sibling(xas->xa_offset);
} else {
if (offset == XA_CHUNK_MASK)
break;
}
next = xa_entry_locked(xas->xa, node, ++offset);
if (!xa_is_sibling(next)) {
if (!entry && (offset > max))
break;
first = next;
}
slot++;
}
update_node(xas, node, count, values);
return first;
}
EXPORT_SYMBOL_GPL(xas_store);
/**
* xas_get_mark() - Returns the state of this mark.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Return: true if the mark is set, false if the mark is clear or @xas
* is in an error state.
*/
bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
{
if (xas_invalid(xas))
return false;
if (!xas->xa_node)
return xa_marked(xas->xa, mark);
return node_get_mark(xas->xa_node, xas->xa_offset, mark);
}
EXPORT_SYMBOL_GPL(xas_get_mark);
/**
* xas_set_mark() - Sets the mark on this entry and its parents.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Sets the specified mark on this entry, and walks up the tree setting it
* on all the ancestor entries. Does nothing if @xas has not been walked to
* an entry, or is in an error state.
*/
void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
unsigned int offset = xas->xa_offset;
if (xas_invalid(xas))
return;
while (node) {
if (node_set_mark(node, offset, mark))
return;
offset = node->offset;
node = xa_parent_locked(xas->xa, node);
}
if (!xa_marked(xas->xa, mark))
xa_mark_set(xas->xa, mark);
}
EXPORT_SYMBOL_GPL(xas_set_mark);
/**
* xas_clear_mark() - Clears the mark on this entry and its parents.
* @xas: XArray operation state.
* @mark: Mark number.
*
* Clears the specified mark on this entry, and walks back to the head
* attempting to clear it on all the ancestor entries. Does nothing if
* @xas has not been walked to an entry, or is in an error state.
*/
void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
{
struct xa_node *node = xas->xa_node;
unsigned int offset = xas->xa_offset;
if (xas_invalid(xas))
return;
while (node) {
if (!node_clear_mark(node, offset, mark))
return;
if (node_any_mark(node, mark))
return;
offset = node->offset;
node = xa_parent_locked(xas->xa, node);
}
if (xa_marked(xas->xa, mark))
xa_mark_clear(xas->xa, mark);
}
EXPORT_SYMBOL_GPL(xas_clear_mark);
/**
* xas_init_marks() - Initialise all marks for the entry
* @xas: Array operations state.
*
* Initialise all marks for the entry specified by @xas. If we're tracking
* free entries with a mark, we need to set it on all entries. All other
* marks are cleared.
*
* This implementation is not as efficient as it could be; we may walk
* up the tree multiple times.
*/
void xas_init_marks(const struct xa_state *xas)
{
xa_mark_t mark = 0;
for (;;) {
if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
xas_set_mark(xas, mark);
else
xas_clear_mark(xas, mark);
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
EXPORT_SYMBOL_GPL(xas_init_marks);
#ifdef CONFIG_XARRAY_MULTI
static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
{
unsigned int marks = 0;
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (node_get_mark(node, offset, mark))
marks |= 1 << (__force unsigned int)mark;
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
return marks;
}
static void node_set_marks(struct xa_node *node, unsigned int offset,
struct xa_node *child, unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
for (;;) {
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
node_mark_all(child, mark);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
/**
* xas_split_alloc() - Allocate memory for splitting an entry.
* @xas: XArray operation state.
* @entry: New entry which will be stored in the array.
* @order: Current entry order.
* @gfp: Memory allocation flags.
*
* This function should be called before calling xas_split().
* If necessary, it will allocate new nodes (and fill them with @entry)
* to prepare for the upcoming split of an entry of @order size into
* entries of the order stored in the @xas.
*
* Context: May sleep if @gfp flags permit.
*/
void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
gfp_t gfp)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
do {
unsigned int i;
void *sibling = NULL;
struct xa_node *node;
node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
if (!node)
goto nomem;
node->array = xas->xa;
for (i = 0; i < XA_CHUNK_SIZE; i++) {
if ((i & mask) == 0) {
RCU_INIT_POINTER(node->slots[i], entry);
sibling = xa_mk_sibling(i);
} else {
RCU_INIT_POINTER(node->slots[i], sibling);
}
}
RCU_INIT_POINTER(node->parent, xas->xa_alloc);
xas->xa_alloc = node;
} while (sibs-- > 0);
return;
nomem:
xas_destroy(xas);
xas_set_err(xas, -ENOMEM);
}
EXPORT_SYMBOL_GPL(xas_split_alloc);
/**
* xas_split() - Split a multi-index entry into smaller entries.
* @xas: XArray operation state.
* @entry: New entry to store in the array.
* @order: Current entry order.
*
* The size of the new entries is set in @xas. The value in @entry is
* copied to all the replacement entries.
*
* Context: Any context. The caller should hold the xa_lock.
*/
void xas_split(struct xa_state *xas, void *entry, unsigned int order)
{
unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
unsigned int offset, marks;
struct xa_node *node;
void *curr = xas_load(xas);
int values = 0;
node = xas->xa_node;
if (xas_top(node))
return;
marks = node_get_marks(node, xas->xa_offset);
offset = xas->xa_offset + sibs;
do {
if (xas->xa_shift < node->shift) {
struct xa_node *child = xas->xa_alloc;
xas->xa_alloc = rcu_dereference_raw(child->parent);
child->shift = node->shift - XA_CHUNK_SHIFT;
child->offset = offset;
child->count = XA_CHUNK_SIZE;
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
node_set_marks(node, offset, child, marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
values--;
xas_update(xas, child);
} else {
unsigned int canon = offset - xas->xa_sibs;
node_set_marks(node, canon, NULL, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
xa_mk_sibling(canon));
values += (xa_is_value(entry) - xa_is_value(curr)) *
(xas->xa_sibs + 1);
}
} while (offset-- > xas->xa_offset);
node->nr_values += values;
xas_update(xas, node);
}
EXPORT_SYMBOL_GPL(xas_split);
#endif
/**
* xas_pause() - Pause a walk to drop a lock.
* @xas: XArray operation state.
*
* Some users need to pause a walk and drop the lock they're holding in
* order to yield to a higher priority thread or carry out an operation
* on an entry. Those users should call this function before they drop
* the lock. It resets the @xas to be suitable for the next iteration
* of the loop after the user has reacquired the lock. If most entries
* found during a walk require you to call xas_pause(), the xa_for_each()
* iterator may be more appropriate.
*
* Note that xas_pause() only works for forward iteration. If a user needs
* to pause a reverse iteration, we will need a xas_pause_rev().
*/
void xas_pause(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
if (xas_invalid(xas))
return;
xas->xa_node = XAS_RESTART;
if (node) {
unsigned long offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
}
EXPORT_SYMBOL_GPL(xas_pause);
/*
* __xas_prev() - Find the previous entry in the XArray.
* @xas: XArray operation state.
*
* Helper function for xas_prev() which handles all the complex cases
* out of line.
*/
void *__xas_prev(struct xa_state *xas)
{
void *entry;
if (!xas_frozen(xas->xa_node))
xas->xa_index--;
if (!xas->xa_node)
return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
xas->xa_offset--;
while (xas->xa_offset == 255) {
xas->xa_offset = xas->xa_node->offset - 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
return set_bounds(xas);
}
for (;;) {
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
xas_set_offset(xas);
}
}
EXPORT_SYMBOL_GPL(__xas_prev);
/*
* __xas_next() - Find the next entry in the XArray.
* @xas: XArray operation state.
*
* Helper function for xas_next() which handles all the complex cases
* out of line.
*/
void *__xas_next(struct xa_state *xas)
{
void *entry;
if (!xas_frozen(xas->xa_node))
xas->xa_index++;
if (!xas->xa_node)
return set_bounds(xas);
if (xas_not_node(xas->xa_node))
return xas_load(xas);
if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
xas->xa_offset++;
while (xas->xa_offset == XA_CHUNK_SIZE) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
return set_bounds(xas);
}
for (;;) {
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
xas_set_offset(xas);
}
}
EXPORT_SYMBOL_GPL(__xas_next);
/**
* xas_find() - Find the next present entry in the XArray.
* @xas: XArray operation state.
* @max: Highest index to return.
*
* If the @xas has not yet been walked to an entry, return the entry
* which has an index >= xas.xa_index. If it has been walked, the entry
* currently being pointed at has been processed, and so we move to the
* next entry.
*
* If no entry is found and the array is smaller than @max, the iterator
* is set to the smallest index not yet in the array. This allows @xas
* to be immediately passed to xas_store().
*
* Return: The entry, if found, otherwise %NULL.
*/
void *xas_find(struct xa_state *xas, unsigned long max)
{
void *entry;
if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
if (xas->xa_index > max)
return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
} else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
} else if (!xas->xa_node->shift &&
xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
}
xas_next_offset(xas);
while (xas->xa_node && (xas->xa_index <= max)) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
continue;
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (xa_is_node(entry)) {
xas->xa_node = xa_to_node(entry);
xas->xa_offset = 0;
continue;
}
if (entry && !xa_is_sibling(entry))
return entry;
xas_next_offset(xas);
}
if (!xas->xa_node)
xas->xa_node = XAS_BOUNDS;
return NULL;
}
EXPORT_SYMBOL_GPL(xas_find);
/**
* xas_find_marked() - Find the next marked entry in the XArray.
* @xas: XArray operation state.
* @max: Highest index to return.
* @mark: Mark number to search for.
*
* If the @xas has not yet been walked to an entry, return the marked entry
* which has an index >= xas.xa_index. If it has been walked, the entry
* currently being pointed at has been processed, and so we return the
* first marked entry with an index > xas.xa_index.
*
* If no marked entry is found and the array is smaller than @max, @xas is
* set to the bounds state and xas->xa_index is set to the smallest index
* not yet in the array. This allows @xas to be immediately passed to
* xas_store().
*
* If no entry is found before @max is reached, @xas is set to the restart
* state.
*
* Return: The entry, if found, otherwise %NULL.
*/
void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
{
bool advance = true;
unsigned int offset;
void *entry;
if (xas_error(xas))
return NULL;
if (xas->xa_index > max)
goto max;
if (!xas->xa_node) {
xas->xa_index = 1;
goto out;
} else if (xas_top(xas->xa_node)) {
advance = false;
entry = xa_head(xas->xa);
xas->xa_node = NULL;
if (xas->xa_index > max_index(entry))
goto out;
if (!xa_is_node(entry)) {
if (xa_marked(xas->xa, mark))
return entry;
xas->xa_index = 1;
goto out;
}
xas->xa_node = xa_to_node(entry);
xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
}
while (xas->xa_index <= max) {
if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
xas->xa_offset = xas->xa_node->offset + 1;
xas->xa_node = xa_parent(xas->xa, xas->xa_node);
if (!xas->xa_node)
break;
advance = false;
continue;
}
if (!advance) {
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (xa_is_sibling(entry)) {
xas->xa_offset = xa_to_sibling(entry);
xas_move_index(xas, xas->xa_offset);
}
}
offset = xas_find_chunk(xas, advance, mark);
if (offset > xas->xa_offset) {
advance = false;
xas_move_index(xas, offset);
/* Mind the wrap */
if ((xas->xa_index - 1) >= max)
goto max;
xas->xa_offset = offset;
if (offset == XA_CHUNK_SIZE)
continue;
}
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
xas_set_offset(xas);
}
out:
if (xas->xa_index > max)
goto max;
return set_bounds(xas);
max:
xas->xa_node = XAS_RESTART;
return NULL;
}
EXPORT_SYMBOL_GPL(xas_find_marked);
/**
* xas_find_conflict() - Find the next present entry in a range.
* @xas: XArray operation state.
*
* The @xas describes both a range and a position within that range.
*
* Context: Any context. Expects xa_lock to be held.
* Return: The next entry in the range covered by @xas or %NULL.
*/
void *xas_find_conflict(struct xa_state *xas)
{
void *curr;
if (xas_error(xas))
return NULL;
if (!xas->xa_node)
return NULL;
if (xas_top(xas->xa_node)) {
curr = xas_start(xas);
if (!curr)
return NULL;
while (xa_is_node(curr)) {
struct xa_node *node = xa_to_node(curr);
curr = xas_descend(xas, node);
}
if (curr)
return curr;
}
if (xas->xa_node->shift > xas->xa_shift)
return NULL;
for (;;) {
if (xas->xa_node->shift == xas->xa_shift) {
if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
break;
} else if (xas->xa_offset == XA_CHUNK_MASK) {
xas->xa_offset = xas->xa_node->offset;
xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
if (!xas->xa_node)
break;
continue;
}
curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
if (xa_is_sibling(curr))
continue;
while (xa_is_node(curr)) {
xas->xa_node = xa_to_node(curr);
xas->xa_offset = 0;
curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
}
if (curr)
return curr;
}
xas->xa_offset -= xas->xa_sibs;
return NULL;
}
EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
* xa_load() - Load an entry from an XArray.
* @xa: XArray.
* @index: index into array.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The entry at @index in @xa.
*/
void *xa_load(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
void *entry;
rcu_read_lock();
do {
entry = xas_load(&xas);
if (xa_is_zero(entry))
entry = NULL;
} while (xas_retry(&xas, entry));
rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
if (xa_is_zero(curr))
return NULL;
if (xas_error(xas))
curr = xas->xa_node;
return curr;
}
/**
* __xa_erase() - Erase this entry from the XArray while locked.
* @xa: XArray.
* @index: Index into array.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Expects xa_lock to be held on entry.
* Return: The entry which used to be at this index.
*/
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
return xas_result(&xas, xas_store(&xas, NULL));
}
EXPORT_SYMBOL(__xa_erase);
/**
* xa_erase() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
*
* After this function returns, loading from @index will return %NULL.
* If the index is part of a multi-index entry, all indices will be erased
* and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock.
* Return: The entry which used to be at this index.
*/
void *xa_erase(struct xarray *xa, unsigned long index)
{
void *entry;
xa_lock(xa);
entry = __xa_erase(xa, index);
xa_unlock(xa);
return entry;
}
EXPORT_SYMBOL(xa_erase);
/**
* __xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: The old entry at this index or xa_err() if an error happened.
*/
void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
do {
curr = xas_store(&xas, entry);
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
return xas_result(&xas, curr);
}
EXPORT_SYMBOL(__xa_store);
/**
* xa_store() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* After this function returns, loads from this index will return @entry.
* Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Any context. Takes and releases the xa_lock.
* May sleep if the @gfp flags permit.
* Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
* cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
* failed.
*/
void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
void *curr;
xa_lock(xa);
curr = __xa_store(xa, index, entry, gfp);
xa_unlock(xa);
return curr;
}
EXPORT_SYMBOL(xa_store);
/**
* __xa_cmpxchg() - Store this entry in the XArray.
* @xa: XArray.
* @index: Index into array.
* @old: Old value to test against.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* You must already be holding the xa_lock when calling this function.
* It will drop the lock if needed to allocate memory, and then reacquire
* it afterwards.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: The old entry at this index or xa_err() if an error happened.
*/
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
do {
curr = xas_load(&xas);
if (curr == old) {
xas_store(&xas, entry);
if (xa_track_free(xa) && entry && !curr)
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
return xas_result(&xas, curr);
}
EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
* @xa: XArray.
* @index: Index into array.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Inserting a NULL entry will store a reserved entry (like xa_reserve())
* if no entry is present. Inserting will fail if a reserved entry is
* present, even though loading from this index will return NULL.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
if (!curr) {
xas_store(&xas, entry);
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
xas_set_err(&xas, -EBUSY);
}
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
EXPORT_SYMBOL(__xa_insert);
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
unsigned long last)
{
unsigned int shift = 0;
unsigned long sibs = last - first;
unsigned int offset = XA_CHUNK_MASK;
xas_set(xas, first);
while ((first & XA_CHUNK_MASK) == 0) {
if (sibs < XA_CHUNK_MASK)
break;
if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
break;
shift += XA_CHUNK_SHIFT;
if (offset == XA_CHUNK_MASK)
offset = sibs & XA_CHUNK_MASK;
sibs >>= XA_CHUNK_SHIFT;
first >>= XA_CHUNK_SHIFT;
}
offset = first & XA_CHUNK_MASK;
if (offset + sibs > XA_CHUNK_MASK)
sibs = XA_CHUNK_MASK - offset;
if ((((first + sibs + 1) << shift) - 1) > last)
sibs -= 1;
xas->xa_shift = shift;
xas->xa_sibs = sibs;
}
/**
* xa_store_range() - Store this entry at a range of indices in the XArray.
* @xa: XArray.
* @first: First index to affect.
* @last: Last index to affect.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* After this function returns, loads from any index between @first and @last,
* inclusive will return @entry.
* Storing into an existing multi-index entry updates the entry of every index.
* The marks associated with @index are unaffected unless @entry is %NULL.
*
* Context: Process context. Takes and releases the xa_lock. May sleep
* if the @gfp flags permit.
* Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
* an XArray, or xa_err(-ENOMEM) if memory allocation failed.
*/
void *xa_store_range(struct xarray *xa, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, 0);
if (WARN_ON_ONCE(xa_is_internal(entry)))
return XA_ERROR(-EINVAL);
if (last < first)
return XA_ERROR(-EINVAL);
do {
xas_lock(&xas);
if (entry) {
unsigned int order = BITS_PER_LONG;
if (last + 1)
order = __ffs(last + 1);
xas_set_order(&xas, last, order);
xas_create(&xas, true);
if (xas_error(&xas))
goto unlock;
}
do {
xas_set_range(&xas, first, last);
xas_store(&xas, entry);
if (xas_error(&xas))
goto unlock;
first += xas_size(&xas);
} while (first <= last);
unlock:
xas_unlock(&xas);
} while (xas_nomem(&xas, gfp));
return xas_result(&xas, NULL);
}
EXPORT_SYMBOL(xa_store_range);
/**
* xa_get_order() - Get the order of an entry.
* @xa: XArray.
* @index: Index of the entry.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
int xa_get_order(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
void *entry;
int order = 0;
rcu_read_lock();
entry = xas_load(&xas);
if (!entry)
goto unlock;
if (!xas.xa_node)
goto unlock;
for (;;) {
unsigned int slot = xas.xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
if (!xa_is_sibling(xas.xa_node->slots[slot]))
break;
order++;
}
order += xas.xa_node->shift;
unlock:
rcu_read_unlock();
return order;
}
EXPORT_SYMBOL(xa_get_order);
#endif /* CONFIG_XARRAY_MULTI */
/**
* __xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @limit: Range for allocated ID.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
* -EBUSY if there are no free entries in @limit.
*/
int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, gfp_t gfp)
{
XA_STATE(xas, xa, 0);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
do {
xas.xa_index = limit.min;
xas_find_marked(&xas, limit.max, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY);
else
*id = xas.xa_index;
xas_store(&xas, entry);
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
EXPORT_SYMBOL(__xa_alloc);
/**
* __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
* @entry: New entry.
* @limit: Range of allocated ID.
* @next: Pointer to next ID to allocate.
* @gfp: Memory allocation flags.
*
* Finds an empty entry in @xa between @limit.min and @limit.max,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
* Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
* in xa_init_flags().
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
* allocation succeeded after wrapping, -ENOMEM if memory could not be
* allocated or -EBUSY if there are no free entries in @limit.
*/
int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
struct xa_limit limit, u32 *next, gfp_t gfp)
{
u32 min = limit.min;
int ret;
limit.min = max(min, *next);
ret = __xa_alloc(xa, id, entry, limit, gfp);
if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
ret = 1;
}
if (ret < 0 && limit.min > min) {
limit.min = min;
ret = __xa_alloc(xa, id, entry, limit, gfp);
if (ret == 0)
ret = 1;
}
if (ret >= 0) {
*next = *id + 1;
if (*next == 0)
xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
}
return ret;
}
EXPORT_SYMBOL(__xa_alloc_cyclic);
/**
* __xa_set_mark() - Set this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry = xas_load(&xas);
if (entry)
xas_set_mark(&xas, mark);
}
EXPORT_SYMBOL(__xa_set_mark);
/**
* __xa_clear_mark() - Clear this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Context: Any context. Expects xa_lock to be held on entry.
*/
void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry = xas_load(&xas);
if (entry)
xas_clear_mark(&xas, mark);
}
EXPORT_SYMBOL(__xa_clear_mark);
/**
* xa_get_mark() - Inquire whether this mark is set on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* This function uses the RCU read lock, so the result may be out of date
* by the time it returns. If you need the result to be stable, use a lock.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: True if the entry at @index has this mark set, false if it doesn't.
*/
bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
XA_STATE(xas, xa, index);
void *entry;
rcu_read_lock();
entry = xas_start(&xas);
while (xas_get_mark(&xas, mark)) {
if (!xa_is_node(entry))
goto found;
entry = xas_descend(&xas, xa_to_node(entry));
}
rcu_read_unlock();
return false;
found:
rcu_read_unlock();
return true;
}
EXPORT_SYMBOL(xa_get_mark);
/**
* xa_set_mark() - Set this mark on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Attempting to set a mark on a %NULL entry does not succeed.
*
* Context: Process context. Takes and releases the xa_lock.
*/
void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
xa_lock(xa);
__xa_set_mark(xa, index, mark);
xa_unlock(xa);
}
EXPORT_SYMBOL(xa_set_mark);
/**
* xa_clear_mark() - Clear this mark on this entry.
* @xa: XArray.
* @index: Index of entry.
* @mark: Mark number.
*
* Clearing a mark always succeeds.
*
* Context: Process context. Takes and releases the xa_lock.
*/
void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
{
xa_lock(xa);
__xa_clear_mark(xa, index, mark);
xa_unlock(xa);
}
EXPORT_SYMBOL(xa_clear_mark);
/**
* xa_find() - Search the XArray for an entry.
* @xa: XArray.
* @indexp: Pointer to an index.
* @max: Maximum index to search to.
* @filter: Selection criterion.
*
* Finds the entry in @xa which matches the @filter, and has the lowest
* index that is at least @indexp and no more than @max.
* If an entry is found, @indexp is updated to be the index of the entry.
* This function is protected by the RCU read lock, so it may not find
* entries which are being simultaneously added. It will not return an
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The entry, if found, otherwise %NULL.
*/
void *xa_find(struct xarray *xa, unsigned long *indexp,
unsigned long max, xa_mark_t filter)
{
XA_STATE(xas, xa, *indexp);
void *entry;
rcu_read_lock();
do {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
} while (xas_retry(&xas, entry));
rcu_read_unlock();
if (entry)
*indexp = xas.xa_index;
return entry;
}
EXPORT_SYMBOL(xa_find);
static bool xas_sibling(struct xa_state *xas)
{
struct xa_node *node = xas->xa_node;
unsigned long mask;
if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
return false;
mask = (XA_CHUNK_SIZE << node->shift) - 1;
return (xas->xa_index & mask) >
((unsigned long)xas->xa_offset << node->shift);
}
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
* @indexp: Pointer to an index.
* @max: Maximum index to search to.
* @filter: Selection criterion.
*
* Finds the entry in @xa which matches the @filter and has the lowest
* index that is above @indexp and no more than @max.
* If an entry is found, @indexp is updated to be the index of the entry.
* This function is protected by the RCU read lock, so it may miss entries
* which are being simultaneously added. It will not return an
* %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The pointer, if found, otherwise %NULL.
*/
void *xa_find_after(struct xarray *xa, unsigned long *indexp,
unsigned long max, xa_mark_t filter)
{
XA_STATE(xas, xa, *indexp + 1);
void *entry;
if (xas.xa_index == 0)
return NULL;
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
if (xas_invalid(&xas))
break;
if (xas_sibling(&xas))
continue;
if (!xas_retry(&xas, entry))
break;
}
rcu_read_unlock();
if (entry)
*indexp = xas.xa_index;
return entry;
}
EXPORT_SYMBOL(xa_find_after);
static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
unsigned long max, unsigned int n)
{
void *entry;
unsigned int i = 0;
rcu_read_lock();
xas_for_each(xas, entry, max) {
if (xas_retry(xas, entry))
continue;
dst[i++] = entry;
if (i == n)
break;
}
rcu_read_unlock();
return i;
}
static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
unsigned long max, unsigned int n, xa_mark_t mark)
{
void *entry;
unsigned int i = 0;
rcu_read_lock();
xas_for_each_marked(xas, entry, max, mark) {
if (xas_retry(xas, entry))
continue;
dst[i++] = entry;
if (i == n)
break;
}
rcu_read_unlock();
return i;
}
/**
* xa_extract() - Copy selected entries from the XArray into a normal array.
* @xa: The source XArray to copy from.
* @dst: The buffer to copy entries into.
* @start: The first index in the XArray eligible to be selected.
* @max: The last index in the XArray eligible to be selected.
* @n: The maximum number of entries to copy.
* @filter: Selection criterion.
*
* Copies up to @n entries that match @filter from the XArray. The
* copied entries will have indices between @start and @max, inclusive.
*
* The @filter may be an XArray mark value, in which case entries which are
* marked with that mark will be copied. It may also be %XA_PRESENT, in
* which case all entries which are not %NULL will be copied.
*
* The entries returned may not represent a snapshot of the XArray at a
* moment in time. For example, if another thread stores to index 5, then
* index 10, calling xa_extract() may return the old contents of index 5
* and the new contents of index 10. Indices not modified while this
* function is running will not be skipped.
*
* If you need stronger guarantees, holding the xa_lock across calls to this
* function will prevent concurrent modification.
*
* Context: Any context. Takes and releases the RCU lock.
* Return: The number of entries copied.
*/
unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
unsigned long max, unsigned int n, xa_mark_t filter)
{
XA_STATE(xas, xa, start);
if (!n)
return 0;
if ((__force unsigned int)filter < XA_MAX_MARKS)
return xas_extract_marked(&xas, dst, max, n, filter);
return xas_extract_present(&xas, dst, max, n);
}
EXPORT_SYMBOL(xa_extract);
/**
* xa_delete_node() - Private interface for workingset code.
* @node: Node to be removed from the tree.
* @update: Function to call to update ancestor nodes.
*
* Context: xa_lock must be held on entry and will not be released.
*/
void xa_delete_node(struct xa_node *node, xa_update_node_t update)
{
struct xa_state xas = {
.xa = node->array,
.xa_index = (unsigned long)node->offset <<
(node->shift + XA_CHUNK_SHIFT),
.xa_shift = node->shift + XA_CHUNK_SHIFT,
.xa_offset = node->offset,
.xa_node = xa_parent_locked(node->array, node),
.xa_update = update,
};
xas_store(&xas, NULL);
}
EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
/**
* xa_destroy() - Free all internal data structures.
* @xa: XArray.
*
* After calling this function, the XArray is empty and has freed all memory
* allocated for its internal data structures. You are responsible for
* freeing the objects referenced by the XArray.
*
* Context: Any context. Takes and releases the xa_lock, interrupt-safe.
*/
void xa_destroy(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
unsigned long flags;
void *entry;
xas.xa_node = NULL;
xas_lock_irqsave(&xas, flags);
entry = xa_head_locked(xa);
RCU_INIT_POINTER(xa->xa_head, NULL);
xas_init_marks(&xas);
if (xa_zero_busy(xa))
xa_mark_clear(xa, XA_FREE_MARK);
/* lockdep checks we're still holding the lock in xas_free_nodes() */
if (xa_is_node(entry))
xas_free_nodes(&xas, xa_to_node(entry));
xas_unlock_irqrestore(&xas, flags);
}
EXPORT_SYMBOL(xa_destroy);
#ifdef XA_DEBUG
void xa_dump_node(const struct xa_node *node)
{
unsigned i, j;
if (!node)
return;
if ((unsigned long)node & 3) {
pr_cont("node %px\n", node);
return;
}
pr_cont("node %px %s %d parent %px shift %d count %d values %d "
"array %px list %px %px marks",
node, node->parent ? "offset" : "max", node->offset,
node->parent, node->shift, node->count, node->nr_values,
node->array, node->private_list.prev, node->private_list.next);
for (i = 0; i < XA_MAX_MARKS; i++)
for (j = 0; j < XA_MARK_LONGS; j++)
pr_cont(" %lx", node->marks[i][j]);
pr_cont("\n");
}
void xa_dump_index(unsigned long index, unsigned int shift)
{
if (!shift)
pr_info("%lu: ", index);
else if (shift >= BITS_PER_LONG)
pr_info("0-%lu: ", ~0UL);
else
pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
}
void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
{
if (!entry)
return;
xa_dump_index(index, shift);
if (xa_is_node(entry)) {
if (shift == 0) {
pr_cont("%px\n", entry);
} else {
unsigned long i;
struct xa_node *node = xa_to_node(entry);
xa_dump_node(node);
for (i = 0; i < XA_CHUNK_SIZE; i++)
xa_dump_entry(node->slots[i],
index + (i << node->shift), node->shift);
}
} else if (xa_is_value(entry))
pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
xa_to_value(entry), entry);
else if (!xa_is_internal(entry))
pr_cont("%px\n", entry);
else if (xa_is_retry(entry))
pr_cont("retry (%ld)\n", xa_to_internal(entry));
else if (xa_is_sibling(entry))
pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else
pr_cont("UNKNOWN ENTRY (%px)\n", entry);
}
void xa_dump(const struct xarray *xa)
{
void *entry = xa->xa_head;
unsigned int shift = 0;
pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
xa->xa_flags, xa_marked(xa, XA_MARK_0),
xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
if (xa_is_node(entry))
shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
xa_dump_entry(entry, 0, shift);
}
#endif
| linux-master | lib/xarray.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]>
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/atomic.h>
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable. Since this is expected to used on
* systems with small numbers of CPUs (<= 4 or so), we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array.
*/
#define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
},
};
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
addr >>= L1_CACHE_SHIFT;
addr ^= (addr >> 8) ^ (addr >> 16);
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val != u)
v->counter += a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
| linux-master | lib/atomic64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* test_fprobe.c - simple sanity test for fprobe
*/
#include <linux/kernel.h>
#include <linux/fprobe.h>
#include <linux/random.h>
#include <kunit/test.h>
#define div_factor 3
static struct kunit *current_test;
static u32 rand1, entry_val, exit_val;
/* Use indirect calls to avoid inlining the target functions */
static u32 (*target)(u32 value);
static u32 (*target2)(u32 value);
static u32 (*target_nest)(u32 value, u32 (*nest)(u32));
static unsigned long target_ip;
static unsigned long target2_ip;
static unsigned long target_nest_ip;
static int entry_return_value;
static noinline u32 fprobe_selftest_target(u32 value)
{
return (value / div_factor);
}
static noinline u32 fprobe_selftest_target2(u32 value)
{
return (value / div_factor) + 1;
}
static noinline u32 fprobe_selftest_nest_target(u32 value, u32 (*nest)(u32))
{
return nest(value + 2);
}
static notrace int fp_entry_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct pt_regs *regs, void *data)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
/* This can be called on the fprobe_selftest_target and the fprobe_selftest_target2 */
if (ip != target_ip)
KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
entry_val = (rand1 / div_factor);
if (fp->entry_data_size) {
KUNIT_EXPECT_NOT_NULL(current_test, data);
if (data)
*(u32 *)data = entry_val;
} else
KUNIT_EXPECT_NULL(current_test, data);
return entry_return_value;
}
static notrace void fp_exit_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct pt_regs *regs, void *data)
{
unsigned long ret = regs_return_value(regs);
KUNIT_EXPECT_FALSE(current_test, preemptible());
if (ip != target_ip) {
KUNIT_EXPECT_EQ(current_test, ip, target2_ip);
KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
} else
KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor));
KUNIT_EXPECT_EQ(current_test, entry_val, (rand1 / div_factor));
exit_val = entry_val + div_factor;
if (fp->entry_data_size) {
KUNIT_EXPECT_NOT_NULL(current_test, data);
if (data)
KUNIT_EXPECT_EQ(current_test, *(u32 *)data, entry_val);
} else
KUNIT_EXPECT_NULL(current_test, data);
}
static notrace int nest_entry_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct pt_regs *regs, void *data)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
return 0;
}
static notrace void nest_exit_handler(struct fprobe *fp, unsigned long ip,
unsigned long ret_ip,
struct pt_regs *regs, void *data)
{
KUNIT_EXPECT_FALSE(current_test, preemptible());
KUNIT_EXPECT_EQ(current_test, ip, target_nest_ip);
}
/* Test entry only (no rethook) */
static void test_fprobe_entry(struct kunit *test)
{
struct fprobe fp_entry = {
.entry_handler = fp_entry_handler,
};
current_test = test;
/* Before register, unregister should be failed. */
KUNIT_EXPECT_NE(test, 0, unregister_fprobe(&fp_entry));
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp_entry, "fprobe_selftest_target*", NULL));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp_entry));
}
static void test_fprobe(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target*", NULL));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static void test_fprobe_syms(struct kunit *test)
{
static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_target2"};
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
entry_val = 0;
exit_val = 0;
target2(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, entry_val + div_factor, exit_val);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
/* Test private entry_data */
static void test_fprobe_data(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
.entry_data_size = sizeof(u32),
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
target(rand1);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
/* Test nr_maxactive */
static void test_fprobe_nest(struct kunit *test)
{
static const char *syms[] = {"fprobe_selftest_target", "fprobe_selftest_nest_target"};
struct fprobe fp = {
.entry_handler = nest_entry_handler,
.exit_handler = nest_exit_handler,
.nr_maxactive = 1,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe_syms(&fp, syms, 2));
target_nest(rand1, target);
KUNIT_EXPECT_EQ(test, 1, fp.nmissed);
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static void test_fprobe_skip(struct kunit *test)
{
struct fprobe fp = {
.entry_handler = fp_entry_handler,
.exit_handler = fp_exit_handler,
};
current_test = test;
KUNIT_EXPECT_EQ(test, 0, register_fprobe(&fp, "fprobe_selftest_target", NULL));
entry_return_value = 1;
entry_val = 0;
exit_val = 0;
target(rand1);
KUNIT_EXPECT_NE(test, 0, entry_val);
KUNIT_EXPECT_EQ(test, 0, exit_val);
KUNIT_EXPECT_EQ(test, 0, fp.nmissed);
entry_return_value = 0;
KUNIT_EXPECT_EQ(test, 0, unregister_fprobe(&fp));
}
static unsigned long get_ftrace_location(void *func)
{
unsigned long size, addr = (unsigned long)func;
if (!kallsyms_lookup_size_offset(addr, &size, NULL) || !size)
return 0;
return ftrace_location_range(addr, addr + size - 1);
}
static int fprobe_test_init(struct kunit *test)
{
rand1 = get_random_u32_above(div_factor);
target = fprobe_selftest_target;
target2 = fprobe_selftest_target2;
target_nest = fprobe_selftest_nest_target;
target_ip = get_ftrace_location(target);
target2_ip = get_ftrace_location(target2);
target_nest_ip = get_ftrace_location(target_nest);
return 0;
}
static struct kunit_case fprobe_testcases[] = {
KUNIT_CASE(test_fprobe_entry),
KUNIT_CASE(test_fprobe),
KUNIT_CASE(test_fprobe_syms),
KUNIT_CASE(test_fprobe_data),
KUNIT_CASE(test_fprobe_nest),
KUNIT_CASE(test_fprobe_skip),
{}
};
static struct kunit_suite fprobe_test_suite = {
.name = "fprobe_test",
.init = fprobe_test_init,
.test_cases = fprobe_testcases,
};
kunit_test_suites(&fprobe_test_suite);
| linux-master | lib/test_fprobe.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/objagg.h>
struct tokey {
unsigned int id;
};
#define NUM_KEYS 32
static int key_id_index(unsigned int key_id)
{
if (key_id >= NUM_KEYS) {
WARN_ON(1);
return 0;
}
return key_id;
}
#define BUF_LEN 128
struct world {
unsigned int root_count;
unsigned int delta_count;
char next_root_buf[BUF_LEN];
struct objagg_obj *objagg_objs[NUM_KEYS];
unsigned int key_refs[NUM_KEYS];
};
struct root {
struct tokey key;
char buf[BUF_LEN];
};
struct delta {
unsigned int key_id_diff;
};
static struct objagg_obj *world_obj_get(struct world *world,
struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
struct tokey key;
int err;
key.id = key_id;
objagg_obj = objagg_obj_get(objagg, &key);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return objagg_obj;
}
if (!world->key_refs[key_id_index(key_id)]) {
world->objagg_objs[key_id_index(key_id)] = objagg_obj;
} else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
pr_err("Key %u: God another object for the same key.\n",
key_id);
err = -EINVAL;
goto err_key_id_check;
}
world->key_refs[key_id_index(key_id)]++;
return objagg_obj;
err_key_id_check:
objagg_obj_put(objagg, objagg_obj);
return ERR_PTR(err);
}
static void world_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id)
{
struct objagg_obj *objagg_obj;
if (!world->key_refs[key_id_index(key_id)])
return;
objagg_obj = world->objagg_objs[key_id_index(key_id)];
objagg_obj_put(objagg, objagg_obj);
world->key_refs[key_id_index(key_id)]--;
}
#define MAX_KEY_ID_DIFF 5
static bool delta_check(void *priv, const void *parent_obj, const void *obj)
{
const struct tokey *parent_key = parent_obj;
const struct tokey *key = obj;
int diff = key->id - parent_key->id;
return diff >= 0 && diff <= MAX_KEY_ID_DIFF;
}
static void *delta_create(void *priv, void *parent_obj, void *obj)
{
struct tokey *parent_key = parent_obj;
struct world *world = priv;
struct tokey *key = obj;
int diff = key->id - parent_key->id;
struct delta *delta;
if (!delta_check(priv, parent_obj, obj))
return ERR_PTR(-EINVAL);
delta = kzalloc(sizeof(*delta), GFP_KERNEL);
if (!delta)
return ERR_PTR(-ENOMEM);
delta->key_id_diff = diff;
world->delta_count++;
return delta;
}
static void delta_destroy(void *priv, void *delta_priv)
{
struct delta *delta = delta_priv;
struct world *world = priv;
world->delta_count--;
kfree(delta);
}
static void *root_create(void *priv, void *obj, unsigned int id)
{
struct world *world = priv;
struct tokey *key = obj;
struct root *root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
memcpy(&root->key, key, sizeof(root->key));
memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
world->root_count++;
return root;
}
static void root_destroy(void *priv, void *root_priv)
{
struct root *root = root_priv;
struct world *world = priv;
world->root_count--;
kfree(root);
}
static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_create_root)
{
unsigned int orig_root_count = world->root_count;
struct objagg_obj *objagg_obj;
const struct root *root;
int err;
if (should_create_root)
get_random_bytes(world->next_root_buf,
sizeof(world->next_root_buf));
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj)) {
pr_err("Key %u: Failed to get object.\n", key_id);
return PTR_ERR(objagg_obj);
}
if (should_create_root) {
if (world->root_count != orig_root_count + 1) {
pr_err("Key %u: Root was not created\n", key_id);
err = -EINVAL;
goto err_check_root_count;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly created\n",
key_id);
err = -EINVAL;
goto err_check_root_count;
}
}
root = objagg_obj_root_priv(objagg_obj);
if (root->key.id != key_id) {
pr_err("Key %u: Root has unexpected key id\n", key_id);
err = -EINVAL;
goto err_check_key_id;
}
if (should_create_root &&
memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
pr_err("Key %u: Buffer does not match the expected content\n",
key_id);
err = -EINVAL;
goto err_check_buf;
}
return 0;
err_check_buf:
err_check_key_id:
err_check_root_count:
objagg_obj_put(objagg, objagg_obj);
return err;
}
static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
unsigned int key_id, bool should_destroy_root)
{
unsigned int orig_root_count = world->root_count;
world_obj_put(world, objagg, key_id);
if (should_destroy_root) {
if (world->root_count != orig_root_count - 1) {
pr_err("Key %u: Root was not destroyed\n", key_id);
return -EINVAL;
}
} else {
if (world->root_count != orig_root_count) {
pr_err("Key %u: Root was incorrectly destroyed\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static int check_stats_zero(struct objagg *objagg)
{
const struct objagg_stats *stats;
int err = 0;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != 0) {
pr_err("Stats: Object count is not zero while it should be\n");
err = -EINVAL;
}
objagg_stats_put(stats);
return err;
}
static int check_stats_nodelta(struct objagg *objagg)
{
const struct objagg_stats *stats;
int i;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return PTR_ERR(stats);
if (stats->stats_info_count != NUM_KEYS) {
pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
NUM_KEYS, stats->stats_info_count);
err = -EINVAL;
goto stats_put;
}
for (i = 0; i < stats->stats_info_count; i++) {
if (stats->stats_info[i].stats.user_count != 2) {
pr_err("Stats: incorrect user count\n");
err = -EINVAL;
goto stats_put;
}
if (stats->stats_info[i].stats.delta_user_count != 2) {
pr_err("Stats: incorrect delta user count\n");
err = -EINVAL;
goto stats_put;
}
}
err = 0;
stats_put:
objagg_stats_put(stats);
return err;
}
static bool delta_check_dummy(void *priv, const void *parent_obj,
const void *obj)
{
return false;
}
static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
{
return ERR_PTR(-EOPNOTSUPP);
}
static void delta_destroy_dummy(void *priv, void *delta_priv)
{
}
static const struct objagg_ops nodelta_ops = {
.obj_size = sizeof(struct tokey),
.delta_check = delta_check_dummy,
.delta_create = delta_create_dummy,
.delta_destroy = delta_destroy_dummy,
.root_create = root_create,
.root_destroy = root_destroy,
};
static int test_nodelta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&nodelta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
err = check_stats_zero(objagg);
if (err)
goto err_stats_first_zero;
/* First round of gets, the root objects should be created */
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, true);
if (err)
goto err_obj_first_get;
}
/* Do the second round of gets, all roots are already created,
* make sure that no new root is created
*/
for (i = 0; i < NUM_KEYS; i++) {
err = test_nodelta_obj_get(&world, objagg, i, false);
if (err)
goto err_obj_second_get;
}
err = check_stats_nodelta(objagg);
if (err)
goto err_stats_nodelta;
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, false);
if (err)
goto err_obj_first_put;
}
for (i = NUM_KEYS - 1; i >= 0; i--) {
err = test_nodelta_obj_put(&world, objagg, i, true);
if (err)
goto err_obj_second_put;
}
err = check_stats_zero(objagg);
if (err)
goto err_stats_second_zero;
objagg_destroy(objagg);
return 0;
err_stats_nodelta:
err_obj_first_put:
err_obj_second_get:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
i = NUM_KEYS;
err_obj_first_get:
err_obj_second_put:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, i);
err_stats_first_zero:
err_stats_second_zero:
objagg_destroy(objagg);
return err;
}
static const struct objagg_ops delta_ops = {
.obj_size = sizeof(struct tokey),
.delta_check = delta_check,
.delta_create = delta_create,
.delta_destroy = delta_destroy,
.root_create = root_create,
.root_destroy = root_destroy,
};
enum action {
ACTION_GET,
ACTION_PUT,
};
enum expect_delta {
EXPECT_DELTA_SAME,
EXPECT_DELTA_INC,
EXPECT_DELTA_DEC,
};
enum expect_root {
EXPECT_ROOT_SAME,
EXPECT_ROOT_INC,
EXPECT_ROOT_DEC,
};
struct expect_stats_info {
struct objagg_obj_stats stats;
bool is_root;
unsigned int key_id;
};
struct expect_stats {
unsigned int info_count;
struct expect_stats_info info[NUM_KEYS];
};
struct action_item {
unsigned int key_id;
enum action action;
enum expect_delta expect_delta;
enum expect_root expect_root;
struct expect_stats expect_stats;
};
#define EXPECT_STATS(count, ...) \
{ \
.info_count = count, \
.info = { __VA_ARGS__ } \
}
#define ROOT(key_id, user_count, delta_user_count) \
{{user_count, delta_user_count}, true, key_id}
#define DELTA(key_id, user_count) \
{{user_count, user_count}, false, key_id}
static const struct action_item action_items[] = {
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(1, ROOT(1, 1, 1)),
}, /* r: 1 d: */
{
7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
}, /* r: 1, 7 d: */
{
3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
DELTA(3, 1)),
}, /* r: 1, 7 d: 3^1 */
{
5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 5^1 */
{
3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
{
1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
{
30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
{
3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
{
1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
DELTA(8, 2), DELTA(5, 1)),
}, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
{
5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30 d: 8^7, 8^7 */
{
5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
DELTA(8, 2)),
}, /* r: 7, 30, 5 d: 8^7, 8^7 */
{
6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 2), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
DELTA(8, 1), DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 8^7, 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1)),
}, /* r: 7, 30, 5 d: 6^5 */
{
8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 7, 30, 5 d: 6^5, 8^5 */
{
7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 30, 5 d: 6^5, 8^5 */
{
30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
EXPECT_STATS(3, ROOT(5, 1, 3),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: 5 d: 6^5, 8^5 */
{
5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
EXPECT_STATS(3, ROOT(5, 0, 2),
DELTA(6, 1), DELTA(8, 1)),
}, /* r: d: 6^5, 8^5 */
{
6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
EXPECT_STATS(2, ROOT(5, 0, 1),
DELTA(8, 1)),
}, /* r: d: 6^5 */
{
8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
EXPECT_STATS(0, ),
}, /* r: d: */
};
static int check_expect(struct world *world,
const struct action_item *action_item,
unsigned int orig_delta_count,
unsigned int orig_root_count)
{
unsigned int key_id = action_item->key_id;
switch (action_item->expect_delta) {
case EXPECT_DELTA_SAME:
if (orig_delta_count != world->delta_count) {
pr_err("Key %u: Delta count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_delta_count + 1 != world->delta_count) {
pr_err("Key %u: Delta count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_DELTA_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_delta_count - 1 != world->delta_count) {
pr_err("Key %u: Delta count was not decremented.\n",
key_id);
return -EINVAL;
}
break;
}
switch (action_item->expect_root) {
case EXPECT_ROOT_SAME:
if (orig_root_count != world->root_count) {
pr_err("Key %u: Root count changed while expected to remain the same.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_INC:
if (WARN_ON(action_item->action == ACTION_PUT))
return -EINVAL;
if (orig_root_count + 1 != world->root_count) {
pr_err("Key %u: Root count was not incremented.\n",
key_id);
return -EINVAL;
}
break;
case EXPECT_ROOT_DEC:
if (WARN_ON(action_item->action == ACTION_GET))
return -EINVAL;
if (orig_root_count - 1 != world->root_count) {
pr_err("Key %u: Root count was not decremented.\n",
key_id);
return -EINVAL;
}
}
return 0;
}
static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
{
const struct tokey *root_key;
const struct delta *delta;
unsigned int key_id;
root_key = objagg_obj_root_priv(objagg_obj);
key_id = root_key->id;
delta = objagg_obj_delta_priv(objagg_obj);
if (delta)
key_id += delta->key_id_diff;
return key_id;
}
static int
check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (stats_info->is_root != expect_stats_info->is_root) {
if (errmsg)
*errmsg = "Incorrect root/delta indication";
return -EINVAL;
}
if (stats_info->stats.user_count !=
expect_stats_info->stats.user_count) {
if (errmsg)
*errmsg = "Incorrect user count";
return -EINVAL;
}
if (stats_info->stats.delta_user_count !=
expect_stats_info->stats.delta_user_count) {
if (errmsg)
*errmsg = "Incorrect delta user count";
return -EINVAL;
}
return 0;
}
static int
check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
const struct expect_stats_info *expect_stats_info,
const char **errmsg)
{
if (obj_to_key_id(stats_info->objagg_obj) !=
expect_stats_info->key_id) {
if (errmsg)
*errmsg = "incorrect key id";
return -EINVAL;
}
return 0;
}
static int check_expect_stats_neigh(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
int pos)
{
int i;
int err;
for (i = pos - 1; i >= 0; i--) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
for (i = pos + 1; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (err)
break;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[pos], NULL);
if (!err)
return 0;
}
return -EINVAL;
}
static int __check_expect_stats(const struct objagg_stats *stats,
const struct expect_stats *expect_stats,
const char **errmsg)
{
int i;
int err;
if (stats->stats_info_count != expect_stats->info_count) {
*errmsg = "Unexpected object count";
return -EINVAL;
}
for (i = 0; i < stats->stats_info_count; i++) {
err = check_expect_stats_nums(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err)
return err;
err = check_expect_stats_key_id(&stats->stats_info[i],
&expect_stats->info[i], errmsg);
if (err) {
/* It is possible that one of the neighbor stats with
* same numbers have the correct key id, so check it
*/
err = check_expect_stats_neigh(stats, expect_stats, i);
if (err)
return err;
}
}
return 0;
}
static int check_expect_stats(struct objagg *objagg,
const struct expect_stats *expect_stats,
const char **errmsg)
{
const struct objagg_stats *stats;
int err;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats)) {
*errmsg = "objagg_stats_get() failed.";
return PTR_ERR(stats);
}
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
}
static int test_delta_action_item(struct world *world,
struct objagg *objagg,
const struct action_item *action_item,
bool inverse)
{
unsigned int orig_delta_count = world->delta_count;
unsigned int orig_root_count = world->root_count;
unsigned int key_id = action_item->key_id;
enum action action = action_item->action;
struct objagg_obj *objagg_obj;
const char *errmsg;
int err;
if (inverse)
action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
switch (action) {
case ACTION_GET:
objagg_obj = world_obj_get(world, objagg, key_id);
if (IS_ERR(objagg_obj))
return PTR_ERR(objagg_obj);
break;
case ACTION_PUT:
world_obj_put(world, objagg, key_id);
break;
}
if (inverse)
return 0;
err = check_expect(world, action_item,
orig_delta_count, orig_root_count);
if (err)
goto errout;
err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
if (err) {
pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
goto errout;
}
return 0;
errout:
/* This can only happen when action is not inversed.
* So in case of an error, cleanup by doing inverse action.
*/
test_delta_action_item(world, objagg, action_item, true);
return err;
}
static int test_delta(void)
{
struct world world = {};
struct objagg *objagg;
int i;
int err;
objagg = objagg_create(&delta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
for (i = 0; i < ARRAY_SIZE(action_items); i++) {
err = test_delta_action_item(&world, objagg,
&action_items[i], false);
if (err)
goto err_do_action_item;
}
objagg_destroy(objagg);
return 0;
err_do_action_item:
for (i--; i >= 0; i--)
test_delta_action_item(&world, objagg, &action_items[i], true);
objagg_destroy(objagg);
return err;
}
struct hints_case {
const unsigned int *key_ids;
size_t key_ids_count;
struct expect_stats expect_stats;
struct expect_stats expect_stats_hints;
};
static const unsigned int hints_case_key_ids[] = {
1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8,
};
static const struct hints_case hints_case = {
.key_ids = hints_case_key_ids,
.key_ids_count = ARRAY_SIZE(hints_case_key_ids),
.expect_stats =
EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(3, 2),
DELTA(5, 2), DELTA(6, 1)),
.expect_stats_hints =
EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1),
DELTA(8, 3), DELTA(5, 2),
DELTA(6, 1), DELTA(7, 1)),
};
static void __pr_debug_stats(const struct objagg_stats *stats)
{
int i;
for (i = 0; i < stats->stats_info_count; i++)
pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i,
obj_to_key_id(stats->stats_info[i].objagg_obj),
stats->stats_info[i].stats.user_count,
stats->stats_info[i].stats.delta_user_count,
stats->stats_info[i].is_root ? "root" : "noroot");
}
static void pr_debug_stats(struct objagg *objagg)
{
const struct objagg_stats *stats;
stats = objagg_stats_get(objagg);
if (IS_ERR(stats))
return;
__pr_debug_stats(stats);
objagg_stats_put(stats);
}
static void pr_debug_hints_stats(struct objagg_hints *objagg_hints)
{
const struct objagg_stats *stats;
stats = objagg_hints_stats_get(objagg_hints);
if (IS_ERR(stats))
return;
__pr_debug_stats(stats);
objagg_stats_put(stats);
}
static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
const struct expect_stats *expect_stats,
const char **errmsg)
{
const struct objagg_stats *stats;
int err;
stats = objagg_hints_stats_get(objagg_hints);
if (IS_ERR(stats))
return PTR_ERR(stats);
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;
}
static int test_hints_case(const struct hints_case *hints_case)
{
struct objagg_obj *objagg_obj;
struct objagg_hints *hints;
struct world world2 = {};
struct world world = {};
struct objagg *objagg2;
struct objagg *objagg;
const char *errmsg;
int i;
int err;
objagg = objagg_create(&delta_ops, NULL, &world);
if (IS_ERR(objagg))
return PTR_ERR(objagg);
for (i = 0; i < hints_case->key_ids_count; i++) {
objagg_obj = world_obj_get(&world, objagg,
hints_case->key_ids[i]);
if (IS_ERR(objagg_obj)) {
err = PTR_ERR(objagg_obj);
goto err_world_obj_get;
}
}
pr_debug_stats(objagg);
err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
if (err) {
pr_err("Stats: %s\n", errmsg);
goto err_check_expect_stats;
}
hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
if (IS_ERR(hints)) {
err = PTR_ERR(hints);
goto err_hints_get;
}
pr_debug_hints_stats(hints);
err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Hints stats: %s\n", errmsg);
goto err_check_expect_hints_stats;
}
objagg2 = objagg_create(&delta_ops, hints, &world2);
if (IS_ERR(objagg2))
return PTR_ERR(objagg2);
for (i = 0; i < hints_case->key_ids_count; i++) {
objagg_obj = world_obj_get(&world2, objagg2,
hints_case->key_ids[i]);
if (IS_ERR(objagg_obj)) {
err = PTR_ERR(objagg_obj);
goto err_world2_obj_get;
}
}
pr_debug_stats(objagg2);
err = check_expect_stats(objagg2, &hints_case->expect_stats_hints,
&errmsg);
if (err) {
pr_err("Stats2: %s\n", errmsg);
goto err_check_expect_stats2;
}
err = 0;
err_check_expect_stats2:
err_world2_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world2, objagg, hints_case->key_ids[i]);
i = hints_case->key_ids_count;
objagg_destroy(objagg2);
err_check_expect_hints_stats:
objagg_hints_put(hints);
err_hints_get:
err_check_expect_stats:
err_world_obj_get:
for (i--; i >= 0; i--)
world_obj_put(&world, objagg, hints_case->key_ids[i]);
objagg_destroy(objagg);
return err;
}
static int test_hints(void)
{
return test_hints_case(&hints_case);
}
static int __init test_objagg_init(void)
{
int err;
err = test_nodelta();
if (err)
return err;
err = test_delta();
if (err)
return err;
return test_hints();
}
static void __exit test_objagg_exit(void)
{
}
module_init(test_objagg_init);
module_exit(test_objagg_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Jiri Pirko <[email protected]>");
MODULE_DESCRIPTION("Test module for objagg");
| linux-master | lib/test_objagg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* lib/plist.c
*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <[email protected]>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <[email protected]>
*
* (C) 2005 Thomas Gleixner <[email protected]>
*
* Simplifications of the original code by
* Oleg Nesterov <[email protected]>
*
* Based on simple lists (include/linux/list.h).
*
* This file contains the add / del functions which are considered to
* be too large to inline. See include/linux/plist.h for further
* information.
*/
#include <linux/bug.h>
#include <linux/plist.h>
#ifdef CONFIG_DEBUG_PLIST
static struct plist_head test_head;
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
WARN(n->prev != p || p->next != n,
"top: %p, n: %p, p: %p\n"
"prev: %p, n: %p, p: %p\n"
"next: %p, n: %p, p: %p\n",
t, t->next, t->prev,
p, p->next, p->prev,
n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
{
struct list_head *prev = top, *next = top->next;
plist_check_prev_next(top, prev, next);
while (next != top) {
prev = next;
next = prev->next;
plist_check_prev_next(top, prev, next);
}
}
static void plist_check_head(struct plist_head *head)
{
if (!plist_head_empty(head))
plist_check_list(&plist_first(head)->prio_list);
plist_check_list(&head->node_list);
}
#else
# define plist_check_head(h) do { } while (0)
#endif
/**
* plist_add - add @node to @head
*
* @node: &struct plist_node pointer
* @head: &struct plist_head pointer
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
struct plist_node *first, *iter, *prev = NULL;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
WARN_ON(!plist_node_empty(node));
WARN_ON(!list_empty(&node->prio_list));
if (plist_head_empty(head))
goto ins_node;
first = iter = plist_first(head);
do {
if (node->prio < iter->prio) {
node_next = &iter->node_list;
break;
}
prev = iter;
iter = list_entry(iter->prio_list.next,
struct plist_node, prio_list);
} while (iter != first);
if (!prev || prev->prio != node->prio)
list_add_tail(&node->prio_list, &iter->prio_list);
ins_node:
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
/**
* plist_del - Remove a @node from plist.
*
* @node: &struct plist_node pointer - entry to be removed
* @head: &struct plist_head pointer - list head
*/
void plist_del(struct plist_node *node, struct plist_head *head)
{
plist_check_head(head);
if (!list_empty(&node->prio_list)) {
if (node->node_list.next != &head->node_list) {
struct plist_node *next;
next = list_entry(node->node_list.next,
struct plist_node, node_list);
/* add the next plist_node into prio_list */
if (list_empty(&next->prio_list))
list_add(&next->prio_list, &node->prio_list);
}
list_del_init(&node->prio_list);
}
list_del_init(&node->node_list);
plist_check_head(head);
}
/**
* plist_requeue - Requeue @node at end of same-prio entries.
*
* This is essentially an optimized plist_del() followed by
* plist_add(). It moves an entry already in the plist to
* after any other same-priority entries.
*
* @node: &struct plist_node pointer - entry to be moved
* @head: &struct plist_head pointer - list head
*/
void plist_requeue(struct plist_node *node, struct plist_head *head)
{
struct plist_node *iter;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
BUG_ON(plist_head_empty(head));
BUG_ON(plist_node_empty(node));
if (node == plist_last(head))
return;
iter = plist_next(node);
if (node->prio != iter->prio)
return;
plist_del(node, head);
plist_for_each_continue(iter, head) {
if (node->prio != iter->prio) {
node_next = &iter->node_list;
break;
}
}
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
#ifdef CONFIG_DEBUG_PLIST
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/module.h>
#include <linux/init.h>
static struct plist_node __initdata test_node[241];
static void __init plist_test_check(int nr_expect)
{
struct plist_node *first, *prio_pos, *node_pos;
if (plist_head_empty(&test_head)) {
BUG_ON(nr_expect != 0);
return;
}
prio_pos = first = plist_first(&test_head);
plist_for_each(node_pos, &test_head) {
if (nr_expect-- < 0)
break;
if (node_pos == first)
continue;
if (node_pos->prio == prio_pos->prio) {
BUG_ON(!list_empty(&node_pos->prio_list));
continue;
}
BUG_ON(prio_pos->prio > node_pos->prio);
BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
prio_pos = node_pos;
}
BUG_ON(nr_expect != 0);
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
static void __init plist_test_requeue(struct plist_node *node)
{
plist_requeue(node, &test_head);
if (node != plist_last(&test_head))
BUG_ON(node->prio == plist_next(node)->prio);
}
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
printk(KERN_DEBUG "start plist test\n");
plist_head_init(&test_head);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
for (loop = 0; loop < 1000; loop++) {
r = r * 193939 % 47629;
i = r % ARRAY_SIZE(test_node);
if (plist_node_empty(test_node + i)) {
r = r * 193939 % 47629;
test_node[i].prio = r % 99;
plist_add(test_node + i, &test_head);
nr_expect++;
} else {
plist_del(test_node + i, &test_head);
nr_expect--;
}
plist_test_check(nr_expect);
if (!plist_node_empty(test_node + i)) {
plist_test_requeue(test_node + i);
plist_test_check(nr_expect);
}
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
if (plist_node_empty(test_node + i))
continue;
plist_del(test_node + i, &test_head);
nr_expect--;
plist_test_check(nr_expect);
}
printk(KERN_DEBUG "end plist test\n");
return 0;
}
module_init(plist_test);
#endif
| linux-master | lib/plist.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* klist.c - Routines for manipulating klists.
*
* Copyright (C) 2005 Patrick Mochel
*
* This klist interface provides a couple of structures that wrap around
* struct list_head to provide explicit list "head" (struct klist) and list
* "node" (struct klist_node) objects. For struct klist, a spinlock is
* included that protects access to the actual list itself. struct
* klist_node provides a pointer to the klist that owns it and a kref
* reference count that indicates the number of current users of that node
* in the list.
*
* The entire point is to provide an interface for iterating over a list
* that is safe and allows for modification of the list during the
* iteration (e.g. insertion and removal), including modification of the
* current node on the list.
*
* It works using a 3rd object type - struct klist_iter - that is declared
* and initialized before an iteration. klist_next() is used to acquire the
* next element in the list. It returns NULL if there are no more items.
* Internally, that routine takes the klist's lock, decrements the
* reference count of the previous klist_node and increments the count of
* the next klist_node. It then drops the lock and returns.
*
* There are primitives for adding and removing nodes to/from a klist.
* When deleting, klist_del() will simply decrement the reference count.
* Only when the count goes to 0 is the node removed from the list.
* klist_remove() will try to delete the node from the list and block until
* it is actually removed. This is useful for objects (like devices) that
* have been removed from the system and must be freed (but must wait until
* all accessors have finished).
*/
#include <linux/klist.h>
#include <linux/export.h>
#include <linux/sched.h>
/*
* Use the lowest bit of n_klist to mark deleted nodes and exclude
* dead ones from iteration.
*/
#define KNODE_DEAD 1LU
#define KNODE_KLIST_MASK ~KNODE_DEAD
static struct klist *knode_klist(struct klist_node *knode)
{
return (struct klist *)
((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
}
static bool knode_dead(struct klist_node *knode)
{
return (unsigned long)knode->n_klist & KNODE_DEAD;
}
static void knode_set_klist(struct klist_node *knode, struct klist *klist)
{
knode->n_klist = klist;
/* no knode deserves to start its life dead */
WARN_ON(knode_dead(knode));
}
static void knode_kill(struct klist_node *knode)
{
/* and no knode should die twice ever either, see we're very humane */
WARN_ON(knode_dead(knode));
*(unsigned long *)&knode->n_klist |= KNODE_DEAD;
}
/**
* klist_init - Initialize a klist structure.
* @k: The klist we're initializing.
* @get: The get function for the embedding object (NULL if none)
* @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
* deletion) then the get/put arguments are used to initialise
* functions that take and release references on the embedding
* objects.
*/
void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
spin_lock_init(&k->k_lock);
k->get = get;
k->put = put;
}
EXPORT_SYMBOL_GPL(klist_init);
static void add_head(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void add_tail(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
kref_init(&n->n_ref);
knode_set_klist(n, k);
if (k->get)
k->get(n);
}
/**
* klist_add_head - Initialize a klist_node and add it to front.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_head(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_head(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_head);
/**
* klist_add_tail - Initialize a klist_node and add it to back.
* @n: node we're adding.
* @k: klist it's going on.
*/
void klist_add_tail(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_tail(k, n);
}
EXPORT_SYMBOL_GPL(klist_add_tail);
/**
* klist_add_behind - Init a klist_node and add it after an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_behind(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_behind);
/**
* klist_add_before - Init a klist_node and add it before an existing node
* @n: node we're adding.
* @pos: node to put @n after
*/
void klist_add_before(struct klist_node *n, struct klist_node *pos)
{
struct klist *k = knode_klist(pos);
klist_node_init(k, n);
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &pos->n_node);
spin_unlock(&k->k_lock);
}
EXPORT_SYMBOL_GPL(klist_add_before);
struct klist_waiter {
struct list_head list;
struct klist_node *node;
struct task_struct *process;
int woken;
};
static DEFINE_SPINLOCK(klist_remove_lock);
static LIST_HEAD(klist_remove_waiters);
static void klist_release(struct kref *kref)
{
struct klist_waiter *waiter, *tmp;
struct klist_node *n = container_of(kref, struct klist_node, n_ref);
WARN_ON(!knode_dead(n));
list_del(&n->n_node);
spin_lock(&klist_remove_lock);
list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
if (waiter->node != n)
continue;
list_del(&waiter->list);
waiter->woken = 1;
mb();
wake_up_process(waiter->process);
}
spin_unlock(&klist_remove_lock);
knode_set_klist(n, NULL);
}
static int klist_dec_and_del(struct klist_node *n)
{
return kref_put(&n->n_ref, klist_release);
}
static void klist_put(struct klist_node *n, bool kill)
{
struct klist *k = knode_klist(n);
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
if (kill)
knode_kill(n);
if (!klist_dec_and_del(n))
put = NULL;
spin_unlock(&k->k_lock);
if (put)
put(n);
}
/**
* klist_del - Decrement the reference count of node and try to remove.
* @n: node we're deleting.
*/
void klist_del(struct klist_node *n)
{
klist_put(n, true);
}
EXPORT_SYMBOL_GPL(klist_del);
/**
* klist_remove - Decrement the refcount of node and wait for it to go away.
* @n: node we're removing.
*/
void klist_remove(struct klist_node *n)
{
struct klist_waiter waiter;
waiter.node = n;
waiter.process = current;
waiter.woken = 0;
spin_lock(&klist_remove_lock);
list_add(&waiter.list, &klist_remove_waiters);
spin_unlock(&klist_remove_lock);
klist_del(n);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (waiter.woken)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
EXPORT_SYMBOL_GPL(klist_remove);
/**
* klist_node_attached - Say whether a node is bound to a list or not.
* @n: Node that we're testing.
*/
int klist_node_attached(struct klist_node *n)
{
return (n->n_klist != NULL);
}
EXPORT_SYMBOL_GPL(klist_node_attached);
/**
* klist_iter_init_node - Initialize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter we're filling.
* @n: node to start with.
*
* Similar to klist_iter_init(), but starts the action off with @n,
* instead of with the list head.
*/
void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n)
{
i->i_klist = k;
i->i_cur = NULL;
if (n && kref_get_unless_zero(&n->n_ref))
i->i_cur = n;
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
/**
* klist_iter_init - Iniitalize a klist_iter structure.
* @k: klist we're iterating.
* @i: klist_iter structure we're filling.
*
* Similar to klist_iter_init_node(), but start with the list head.
*/
void klist_iter_init(struct klist *k, struct klist_iter *i)
{
klist_iter_init_node(k, i, NULL);
}
EXPORT_SYMBOL_GPL(klist_iter_init);
/**
* klist_iter_exit - Finish a list iteration.
* @i: Iterator structure.
*
* Must be called when done iterating over list, as it decrements the
* refcount of the current node. Necessary in case iteration exited before
* the end of the list was reached, and always good form.
*/
void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
klist_put(i->i_cur, false);
i->i_cur = NULL;
}
}
EXPORT_SYMBOL_GPL(klist_iter_exit);
static struct klist_node *to_klist_node(struct list_head *n)
{
return container_of(n, struct klist_node, n_node);
}
/**
* klist_prev - Ante up prev node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the prev node, increment its reference
* count, drop the lock, and return that prev node.
*/
struct klist_node *klist_prev(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
unsigned long flags;
spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
if (!klist_dec_and_del(last))
put = NULL;
} else
prev = to_klist_node(i->i_klist->k_list.prev);
i->i_cur = NULL;
while (prev != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(prev))) {
kref_get(&prev->n_ref);
i->i_cur = prev;
break;
}
prev = to_klist_node(prev->n_node.prev);
}
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_prev);
/**
* klist_next - Ante up next node in list.
* @i: Iterator structure.
*
* First grab list lock. Decrement the reference count of the previous
* node, if there was one. Grab the next node, increment its reference
* count, drop the lock, and return that next node.
*/
struct klist_node *klist_next(struct klist_iter *i)
{
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
unsigned long flags;
spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
if (!klist_dec_and_del(last))
put = NULL;
} else
next = to_klist_node(i->i_klist->k_list.next);
i->i_cur = NULL;
while (next != to_klist_node(&i->i_klist->k_list)) {
if (likely(!knode_dead(next))) {
kref_get(&next->n_ref);
i->i_cur = next;
break;
}
next = to_klist_node(next->n_node.next);
}
spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
return i->i_cur;
}
EXPORT_SYMBOL_GPL(klist_next);
| linux-master | lib/klist.c |
#include <linux/module.h>
#include <linux/glob.h>
/*
* The only reason this code can be compiled as a module is because the
* ATA code that depends on it can be as well. In practice, they're
* both usually compiled in and the module overhead goes away.
*/
MODULE_DESCRIPTION("glob(7) matching");
MODULE_LICENSE("Dual MIT/GPL");
/**
* glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0)
* @pat: Shell-style pattern to match, e.g. "*.[ch]".
* @str: String to match. The pattern must match the entire string.
*
* Perform shell-style glob matching, returning true (1) if the match
* succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0).
*
* Pattern metacharacters are ?, *, [ and \.
* (And, inside character classes, !, - and ].)
*
* This is small and simple implementation intended for device blacklists
* where a string is matched against a number of patterns. Thus, it
* does not preprocess the patterns. It is non-recursive, and run-time
* is at most quadratic: strlen(@str)*strlen(@pat).
*
* An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa");
* it takes 6 passes over the pattern before matching the string.
*
* Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT
* treat / or leading . specially; it isn't actually used for pathnames.
*
* Note that according to glob(7) (and unlike bash), character classes
* are complemented by a leading !; this does not support the regex-style
* [^a-z] syntax.
*
* An opening bracket without a matching close is matched literally.
*/
bool __pure glob_match(char const *pat, char const *str)
{
/*
* Backtrack to previous * on mismatch and retry starting one
* character later in the string. Because * matches all characters
* (no exception for /), it can be easily proved that there's
* never a need to backtrack multiple levels.
*/
char const *back_pat = NULL, *back_str;
/*
* Loop over each token (character or class) in pat, matching
* it against the remaining unmatched tail of str. Return false
* on mismatch, or true after matching the trailing nul bytes.
*/
for (;;) {
unsigned char c = *str++;
unsigned char d = *pat++;
switch (d) {
case '?': /* Wildcard: anything but nul */
if (c == '\0')
return false;
break;
case '*': /* Any-length wildcard */
if (*pat == '\0') /* Optimize trailing * case */
return true;
back_pat = pat;
back_str = --str; /* Allow zero-length match */
break;
case '[': { /* Character class */
bool match = false, inverted = (*pat == '!');
char const *class = pat + inverted;
unsigned char a = *class++;
/*
* Iterate over each span in the character class.
* A span is either a single character a, or a
* range a-b. The first span may begin with ']'.
*/
do {
unsigned char b = a;
if (a == '\0') /* Malformed */
goto literal;
if (class[0] == '-' && class[1] != ']') {
b = class[1];
if (b == '\0')
goto literal;
class += 2;
/* Any special action if a > b? */
}
match |= (a <= c && c <= b);
} while ((a = *class++) != ']');
if (match == inverted)
goto backtrack;
pat = class;
}
break;
case '\\':
d = *pat++;
fallthrough;
default: /* Literal character */
literal:
if (c == d) {
if (d == '\0')
return true;
break;
}
backtrack:
if (c == '\0' || !back_pat)
return false; /* No point continuing */
/* Try again from last *, one character later in str. */
pat = back_pat;
str = ++back_str;
break;
}
}
}
EXPORT_SYMBOL(glob_match);
| linux-master | lib/glob.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
* Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
*
* For corner cases with UBSAN, try testing with:
*
* ./tools/testing/kunit/kunit.py run --arch=x86_64 \
* --kconfig_add CONFIG_FORTIFY_SOURCE=y \
* --kconfig_add CONFIG_UBSAN=y \
* --kconfig_add CONFIG_UBSAN_TRAP=y \
* --kconfig_add CONFIG_UBSAN_BOUNDS=y \
* --kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
* --make_options LLVM=1 fortify
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
static char array_unknown[] = "compiler thinks I might change";
static void known_sizes_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
/* Externally defined and dynamically sized string pointer: */
KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
}
/* This is volatile so the optimizer can't perform DCE below. */
static volatile int pick;
/* Not inline to keep optimizer from figuring out which string we want. */
static noinline size_t want_minus_one(int pick)
{
const char *str;
switch (pick) {
case 1:
str = "4444";
break;
case 2:
str = "333";
break;
default:
str = "1";
break;
}
return __compiletime_strlen(str);
}
static void control_flow_split_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
#define KUNIT_EXPECT_BOS(test, p, expected, name) \
KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
expected, \
"__alloc_size() not working with __bos on " name "\n")
#if !__has_builtin(__builtin_dynamic_object_size)
#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
/* Silence "unused variable 'expected'" warning. */ \
KUNIT_EXPECT_EQ(test, expected, expected)
#else
#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
expected, \
"__alloc_size() not working with __bdos on " name "\n")
#endif
/* If the execpted size is a constant value, __bos can see it. */
#define check_const(_expected, alloc, free) do { \
size_t expected = (_expected); \
void *p = alloc; \
KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
free; \
} while (0)
/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
#define check_dynamic(_expected, alloc, free) do { \
size_t expected = (_expected); \
void *p = alloc; \
KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
free; \
} while (0)
/* Assortment of constant-value kinda-edge cases. */
#define CONST_TEST_BODY(TEST_alloc) do { \
/* Special-case vmalloc()-family to skip 0-sized allocs. */ \
if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
TEST_alloc(check_const, 0, 0); \
TEST_alloc(check_const, 1, 1); \
TEST_alloc(check_const, 128, 128); \
TEST_alloc(check_const, 1023, 1023); \
TEST_alloc(check_const, 1025, 1025); \
TEST_alloc(check_const, 4096, 4096); \
TEST_alloc(check_const, 4097, 4097); \
} while (0)
static volatile size_t zero_size;
static volatile size_t unknown_size = 50;
#if !__has_builtin(__builtin_dynamic_object_size)
#define DYNAMIC_TEST_BODY(TEST_alloc) \
kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
#else
#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
size_t size = unknown_size; \
\
/* \
* Expected size is "size" in each test, before it is then \
* internally incremented in each test. Requires we disable \
* -Wunsequenced. \
*/ \
TEST_alloc(check_dynamic, size, size++); \
/* Make sure incrementing actually happened. */ \
KUNIT_EXPECT_NE(test, size, unknown_size); \
} while (0)
#endif
#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
static void alloc_size_##allocator##_const_test(struct kunit *test) \
{ \
CONST_TEST_BODY(TEST_##allocator); \
} \
static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
{ \
DYNAMIC_TEST_BODY(TEST_##allocator); \
}
#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
void *orig; \
size_t len; \
\
checker(expected_size, kmalloc(alloc_size, gfp), \
kfree(p)); \
checker(expected_size, \
kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, kzalloc(alloc_size, gfp), \
kfree(p)); \
checker(expected_size, \
kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, kcalloc(1, alloc_size, gfp), \
kfree(p)); \
checker(expected_size, kcalloc(alloc_size, 1, gfp), \
kfree(p)); \
checker(expected_size, \
kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, \
kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
kfree(p)); \
checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
kfree(p)); \
checker(expected_size, \
kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, \
kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
kfree(p)); \
checker(expected_size, __kmalloc(alloc_size, gfp), \
kfree(p)); \
checker(expected_size, \
__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker((expected_size) * 2, \
krealloc(orig, (alloc_size) * 2, gfp), \
kfree(p)); \
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker((expected_size) * 2, \
krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
kfree(p)); \
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker((expected_size) * 2, \
krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
kfree(p)); \
\
len = 11; \
/* Using memdup() with fixed size, so force unknown length. */ \
if (!__builtin_constant_p(expected_size)) \
len += zero_size; \
checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
/* Sizes are in pages, not bytes. */
#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
checker((expected_pages) * PAGE_SIZE, \
vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
__vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
/* Sizes are in pages (and open-coded for side-effects), not bytes. */
#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
size_t prev_size; \
void *orig; \
\
checker((expected_pages) * PAGE_SIZE, \
kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
vfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
vfree(p)); \
\
prev_size = (expected_pages) * PAGE_SIZE; \
orig = kvmalloc(prev_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker(((expected_pages) * PAGE_SIZE) * 2, \
kvrealloc(orig, prev_size, \
((alloc_pages) * PAGE_SIZE) * 2, gfp), \
kvfree(p)); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
const char dev_name[] = "fortify-test"; \
struct device *dev; \
void *orig; \
size_t len; \
\
/* Create dummy device for devm_kmalloc()-family tests. */ \
dev = root_device_register(dev_name); \
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
"Cannot register test device\n"); \
\
checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
devm_kfree(dev, p)); \
checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
devm_kfree(dev, p)); \
checker(expected_size, \
devm_kmalloc_array(dev, 1, alloc_size, gfp), \
devm_kfree(dev, p)); \
checker(expected_size, \
devm_kmalloc_array(dev, alloc_size, 1, gfp), \
devm_kfree(dev, p)); \
checker(expected_size, \
devm_kcalloc(dev, 1, alloc_size, gfp), \
devm_kfree(dev, p)); \
checker(expected_size, \
devm_kcalloc(dev, alloc_size, 1, gfp), \
devm_kfree(dev, p)); \
\
orig = devm_kmalloc(dev, alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
checker((expected_size) * 2, \
devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
devm_kfree(dev, p)); \
\
len = 4; \
/* Using memdup() with fixed size, so force unknown length. */ \
if (!__builtin_constant_p(expected_size)) \
len += zero_size; \
checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
devm_kfree(dev, p)); \
\
device_unregister(dev); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
KUNIT_CASE(alloc_size_kmalloc_const_test),
KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
KUNIT_CASE(alloc_size_vmalloc_const_test),
KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
KUNIT_CASE(alloc_size_kvmalloc_const_test),
KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
{}
};
static struct kunit_suite fortify_test_suite = {
.name = "fortify",
.test_cases = fortify_test_cases,
};
kunit_test_suite(fortify_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/fortify_kunit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* helpers to map values in a linear range to range index
*
* Original idea borrowed from regulator framework
*
* It might be useful if we could support also inversely proportional ranges?
* Copyright 2020 ROHM Semiconductors
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/linear_range.h>
#include <linux/module.h>
/**
* linear_range_values_in_range - return the amount of values in a range
* @r: pointer to linear range where values are counted
*
* Compute the amount of values in range pointed by @r. Note, values can
* be all equal - range with selectors 0,...,2 with step 0 still contains
* 3 values even though they are all equal.
*
* Return: the amount of values in range pointed by @r
*/
unsigned int linear_range_values_in_range(const struct linear_range *r)
{
if (!r)
return 0;
return r->max_sel - r->min_sel + 1;
}
EXPORT_SYMBOL_GPL(linear_range_values_in_range);
/**
* linear_range_values_in_range_array - return the amount of values in ranges
* @r: pointer to array of linear ranges where values are counted
* @ranges: amount of ranges we include in computation.
*
* Compute the amount of values in ranges pointed by @r. Note, values can
* be all equal - range with selectors 0,...,2 with step 0 still contains
* 3 values even though they are all equal.
*
* Return: the amount of values in first @ranges ranges pointed by @r
*/
unsigned int linear_range_values_in_range_array(const struct linear_range *r,
int ranges)
{
int i, values_in_range = 0;
for (i = 0; i < ranges; i++) {
int values;
values = linear_range_values_in_range(&r[i]);
if (!values)
return values;
values_in_range += values;
}
return values_in_range;
}
EXPORT_SYMBOL_GPL(linear_range_values_in_range_array);
/**
* linear_range_get_max_value - return the largest value in a range
* @r: pointer to linear range where value is looked from
*
* Return: the largest value in the given range
*/
unsigned int linear_range_get_max_value(const struct linear_range *r)
{
return r->min + (r->max_sel - r->min_sel) * r->step;
}
EXPORT_SYMBOL_GPL(linear_range_get_max_value);
/**
* linear_range_get_value - fetch a value from given range
* @r: pointer to linear range where value is looked from
* @selector: selector for which the value is searched
* @val: address where found value is updated
*
* Search given ranges for value which matches given selector.
*
* Return: 0 on success, -EINVAL given selector is not found from any of the
* ranges.
*/
int linear_range_get_value(const struct linear_range *r, unsigned int selector,
unsigned int *val)
{
if (r->min_sel > selector || r->max_sel < selector)
return -EINVAL;
*val = r->min + (selector - r->min_sel) * r->step;
return 0;
}
EXPORT_SYMBOL_GPL(linear_range_get_value);
/**
* linear_range_get_value_array - fetch a value from array of ranges
* @r: pointer to array of linear ranges where value is looked from
* @ranges: amount of ranges in an array
* @selector: selector for which the value is searched
* @val: address where found value is updated
*
* Search through an array of ranges for value which matches given selector.
*
* Return: 0 on success, -EINVAL given selector is not found from any of the
* ranges.
*/
int linear_range_get_value_array(const struct linear_range *r, int ranges,
unsigned int selector, unsigned int *val)
{
int i;
for (i = 0; i < ranges; i++)
if (r[i].min_sel <= selector && r[i].max_sel >= selector)
return linear_range_get_value(&r[i], selector, val);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(linear_range_get_value_array);
/**
* linear_range_get_selector_low - return linear range selector for value
* @r: pointer to linear range where selector is looked from
* @val: value for which the selector is searched
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
* Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is in the range, then @found is set true.
*
* Return: 0 on success, -EINVAL if range is invalid or does not contain
* value smaller or equal to given value
*/
int linear_range_get_selector_low(const struct linear_range *r,
unsigned int val, unsigned int *selector,
bool *found)
{
*found = false;
if (r->min > val)
return -EINVAL;
if (linear_range_get_max_value(r) < val) {
*selector = r->max_sel;
return 0;
}
*found = true;
if (r->step == 0)
*selector = r->min_sel;
else
*selector = (val - r->min) / r->step + r->min_sel;
return 0;
}
EXPORT_SYMBOL_GPL(linear_range_get_selector_low);
/**
* linear_range_get_selector_low_array - return linear range selector for value
* @r: pointer to array of linear ranges where selector is looked from
* @ranges: amount of ranges to scan from array
* @val: value for which the selector is searched
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
* Scan array of ranges for selector for which range value matches given
* input value. Value is matching if it is equal or smaller than given
* value. If given value is found to be in a range scanning is stopped and
* @found is set true. If a range with values smaller than given value is found
* but the range max is being smaller than given value, then the range's
* biggest selector is updated to @selector but scanning ranges is continued
* and @found is set to false.
*
* Return: 0 on success, -EINVAL if range array is invalid or does not contain
* range with a value smaller or equal to given value
*/
int linear_range_get_selector_low_array(const struct linear_range *r,
int ranges, unsigned int val,
unsigned int *selector, bool *found)
{
int i;
int ret = -EINVAL;
for (i = 0; i < ranges; i++) {
int tmpret;
tmpret = linear_range_get_selector_low(&r[i], val, selector,
found);
if (!tmpret)
ret = 0;
if (*found)
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array);
/**
* linear_range_get_selector_high - return linear range selector for value
* @r: pointer to linear range where selector is looked from
* @val: value for which the selector is searched
* @selector: address where found selector value is updated
* @found: flag to indicate that given value was in the range
*
* Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or higher than given
* value. If given value is in the range, then @found is set true.
*
* Return: 0 on success, -EINVAL if range is invalid or does not contain
* value greater or equal to given value
*/
int linear_range_get_selector_high(const struct linear_range *r,
unsigned int val, unsigned int *selector,
bool *found)
{
*found = false;
if (linear_range_get_max_value(r) < val)
return -EINVAL;
if (r->min > val) {
*selector = r->min_sel;
return 0;
}
*found = true;
if (r->step == 0)
*selector = r->max_sel;
else
*selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel;
return 0;
}
EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
/**
* linear_range_get_selector_within - return linear range selector for value
* @r: pointer to linear range where selector is looked from
* @val: value for which the selector is searched
* @selector: address where found selector value is updated
*
* Return selector for which range value is closest match for given
* input value. Value is matching if it is equal or lower than given
* value. But return maximum selector if given value is higher than
* maximum value.
*/
void linear_range_get_selector_within(const struct linear_range *r,
unsigned int val, unsigned int *selector)
{
if (r->min > val) {
*selector = r->min_sel;
return;
}
if (linear_range_get_max_value(r) < val) {
*selector = r->max_sel;
return;
}
if (r->step == 0)
*selector = r->min_sel;
else
*selector = (val - r->min) / r->step + r->min_sel;
}
EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
MODULE_DESCRIPTION("linear-ranges helper");
MODULE_LICENSE("GPL");
| linux-master | lib/linear_ranges.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.