python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <linux/build_bug.h>
#include <linux/errno.h>
#include <linux/errname.h>
#include <linux/kernel.h>
#include <linux/math.h>
/*
* Ensure these tables do not accidentally become gigantic if some
* huge errno makes it in. On most architectures, the first table will
* only have about 140 entries, but mips and parisc have more sparsely
* allocated errnos (with EHWPOISON = 257 on parisc, and EDQUOT = 1133
* on mips), so this wastes a bit of space on those - though we
* special case the EDQUOT case.
*/
#define E(err) [err + BUILD_BUG_ON_ZERO(err <= 0 || err > 300)] = "-" #err
static const char *names_0[] = {
E(E2BIG),
E(EACCES),
E(EADDRINUSE),
E(EADDRNOTAVAIL),
E(EADV),
E(EAFNOSUPPORT),
E(EAGAIN), /* EWOULDBLOCK */
E(EALREADY),
E(EBADE),
E(EBADF),
E(EBADFD),
E(EBADMSG),
E(EBADR),
E(EBADRQC),
E(EBADSLT),
E(EBFONT),
E(EBUSY),
E(ECANCELED), /* ECANCELLED */
E(ECHILD),
E(ECHRNG),
E(ECOMM),
E(ECONNABORTED),
E(ECONNREFUSED), /* EREFUSED */
E(ECONNRESET),
E(EDEADLK), /* EDEADLOCK */
#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */
E(EDEADLOCK),
#endif
E(EDESTADDRREQ),
E(EDOM),
E(EDOTDOT),
#ifndef CONFIG_MIPS
E(EDQUOT),
#endif
E(EEXIST),
E(EFAULT),
E(EFBIG),
E(EHOSTDOWN),
E(EHOSTUNREACH),
E(EHWPOISON),
E(EIDRM),
E(EILSEQ),
#ifdef EINIT
E(EINIT),
#endif
E(EINPROGRESS),
E(EINTR),
E(EINVAL),
E(EIO),
E(EISCONN),
E(EISDIR),
E(EISNAM),
E(EKEYEXPIRED),
E(EKEYREJECTED),
E(EKEYREVOKED),
E(EL2HLT),
E(EL2NSYNC),
E(EL3HLT),
E(EL3RST),
E(ELIBACC),
E(ELIBBAD),
E(ELIBEXEC),
E(ELIBMAX),
E(ELIBSCN),
E(ELNRNG),
E(ELOOP),
E(EMEDIUMTYPE),
E(EMFILE),
E(EMLINK),
E(EMSGSIZE),
E(EMULTIHOP),
E(ENAMETOOLONG),
E(ENAVAIL),
E(ENETDOWN),
E(ENETRESET),
E(ENETUNREACH),
E(ENFILE),
E(ENOANO),
E(ENOBUFS),
E(ENOCSI),
E(ENODATA),
E(ENODEV),
E(ENOENT),
E(ENOEXEC),
E(ENOKEY),
E(ENOLCK),
E(ENOLINK),
E(ENOMEDIUM),
E(ENOMEM),
E(ENOMSG),
E(ENONET),
E(ENOPKG),
E(ENOPROTOOPT),
E(ENOSPC),
E(ENOSR),
E(ENOSTR),
#ifdef ENOSYM
E(ENOSYM),
#endif
E(ENOSYS),
E(ENOTBLK),
E(ENOTCONN),
E(ENOTDIR),
E(ENOTEMPTY),
E(ENOTNAM),
E(ENOTRECOVERABLE),
E(ENOTSOCK),
E(ENOTTY),
E(ENOTUNIQ),
E(ENXIO),
E(EOPNOTSUPP),
E(EOVERFLOW),
E(EOWNERDEAD),
E(EPERM),
E(EPFNOSUPPORT),
E(EPIPE),
#ifdef EPROCLIM
E(EPROCLIM),
#endif
E(EPROTO),
E(EPROTONOSUPPORT),
E(EPROTOTYPE),
E(ERANGE),
E(EREMCHG),
#ifdef EREMDEV
E(EREMDEV),
#endif
E(EREMOTE),
E(EREMOTEIO),
#ifdef EREMOTERELEASE
E(EREMOTERELEASE),
#endif
E(ERESTART),
E(ERFKILL),
E(EROFS),
#ifdef ERREMOTE
E(ERREMOTE),
#endif
E(ESHUTDOWN),
E(ESOCKTNOSUPPORT),
E(ESPIPE),
E(ESRCH),
E(ESRMNT),
E(ESTALE),
E(ESTRPIPE),
E(ETIME),
E(ETIMEDOUT),
E(ETOOMANYREFS),
E(ETXTBSY),
E(EUCLEAN),
E(EUNATCH),
E(EUSERS),
E(EXDEV),
E(EXFULL),
};
#undef E
#ifdef EREFUSED /* parisc */
static_assert(EREFUSED == ECONNREFUSED);
#endif
#ifdef ECANCELLED /* parisc */
static_assert(ECANCELLED == ECANCELED);
#endif
static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */
#define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
static const char *names_512[] = {
E(ERESTARTSYS),
E(ERESTARTNOINTR),
E(ERESTARTNOHAND),
E(ENOIOCTLCMD),
E(ERESTART_RESTARTBLOCK),
E(EPROBE_DEFER),
E(EOPENSTALE),
E(ENOPARAM),
E(EBADHANDLE),
E(ENOTSYNC),
E(EBADCOOKIE),
E(ENOTSUPP),
E(ETOOSMALL),
E(ESERVERFAULT),
E(EBADTYPE),
E(EJUKEBOX),
E(EIOCBQUEUED),
E(ERECALLCONFLICT),
};
#undef E
static const char *__errname(unsigned err)
{
if (err < ARRAY_SIZE(names_0))
return names_0[err];
if (err >= 512 && err - 512 < ARRAY_SIZE(names_512))
return names_512[err - 512];
/* But why? */
if (IS_ENABLED(CONFIG_MIPS) && err == EDQUOT) /* 1133 */
return "-EDQUOT";
return NULL;
}
/*
* errname(EIO) -> "EIO"
* errname(-EIO) -> "-EIO"
*/
const char *errname(int err)
{
const char *name = __errname(abs(err));
if (!name)
return NULL;
return err > 0 ? name + 1 : name;
}
| linux-master | lib/errname.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
*/
#include <linux/export.h>
#include <linux/io.h>
/**
* __iowrite32_copy - copy data to MMIO space, in 32-bit units
* @to: destination, in MMIO space (must be 32-bit aligned)
* @from: source (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
const void *from,
size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
const u32 *end = src + count;
while (src < end)
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
/**
* __ioread32_copy - copy data from MMIO space, in 32-bit units
* @to: destination (must be 32-bit aligned)
* @from: source, in MMIO space (must be 32-bit aligned)
* @count: number of 32-bit quantities to copy
*
* Copy data from MMIO space to kernel space, in units of 32 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __ioread32_copy(void *to, const void __iomem *from, size_t count)
{
u32 *dst = to;
const u32 __iomem *src = from;
const u32 __iomem *end = src + count;
while (src < end)
*dst++ = __raw_readl(src++);
}
EXPORT_SYMBOL_GPL(__ioread32_copy);
/**
* __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units
* @to: destination, in MMIO space (must be 64-bit aligned)
* @from: source (must be 64-bit aligned)
* @count: number of 64-bit quantities to copy
*
* Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
const void *from,
size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
const u64 *src = from;
const u64 *end = src + count;
while (src < end)
__raw_writeq(*src++, dst++);
#else
__iowrite32_copy(to, from, count * 2);
#endif
}
EXPORT_SYMBOL_GPL(__iowrite64_copy);
| linux-master | lib/iomap_copy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Provide a default dump_stack() function for architectures
* which don't implement their own.
*/
#include <linux/kernel.h>
#include <linux/buildid.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <linux/kexec.h>
#include <linux/utsname.h>
#include <linux/stop_machine.h>
static char dump_stack_arch_desc_str[128];
/**
* dump_stack_set_arch_desc - set arch-specific str to show with task dumps
* @fmt: printf-style format string
* @...: arguments for the format string
*
* The configured string will be printed right after utsname during task
* dumps. Usually used to add arch-specific system identifiers. If an
* arch wants to make use of such an ID string, it should initialize this
* as soon as possible during boot.
*/
void __init dump_stack_set_arch_desc(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
fmt, args);
va_end(args);
}
#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
#define BUILD_ID_FMT " %20phN"
#define BUILD_ID_VAL vmlinux_build_id
#else
#define BUILD_ID_FMT "%s"
#define BUILD_ID_VAL ""
#endif
/**
* dump_stack_print_info - print generic debug info for dump_stack()
* @log_lvl: log level
*
* Arch-specific dump_stack() implementations can use this function to
* print out the same debug information as the generic dump_stack().
*/
void dump_stack_print_info(const char *log_lvl)
{
printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s" BUILD_ID_FMT "\n",
log_lvl, raw_smp_processor_id(), current->pid, current->comm,
kexec_crash_loaded() ? "Kdump: loaded " : "",
print_tainted(),
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version, BUILD_ID_VAL);
if (dump_stack_arch_desc_str[0] != '\0')
printk("%sHardware name: %s\n",
log_lvl, dump_stack_arch_desc_str);
print_worker_info(log_lvl, current);
print_stop_info(log_lvl, current);
}
/**
* show_regs_print_info - print generic debug info for show_regs()
* @log_lvl: log level
*
* show_regs() implementations can use this function to print out generic
* debug information.
*/
void show_regs_print_info(const char *log_lvl)
{
dump_stack_print_info(log_lvl);
}
static void __dump_stack(const char *log_lvl)
{
dump_stack_print_info(log_lvl);
show_stack(NULL, NULL, log_lvl);
}
/**
* dump_stack_lvl - dump the current task information and its stack trace
* @log_lvl: log level
*
* Architectures can override this implementation by implementing its own.
*/
asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
{
unsigned long flags;
/*
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
printk_cpu_sync_get_irqsave(flags);
__dump_stack(log_lvl);
printk_cpu_sync_put_irqrestore(flags);
}
EXPORT_SYMBOL(dump_stack_lvl);
asmlinkage __visible void dump_stack(void)
{
dump_stack_lvl(KERN_DEFAULT);
}
EXPORT_SYMBOL(dump_stack);
| linux-master | lib/dump_stack.c |
/*
* Test cases for lib/uuid.c module.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/uuid.h>
struct test_uuid_data {
const char *uuid;
guid_t le;
uuid_t be;
};
static const struct test_uuid_data test_uuid_test_data[] = {
{
.uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
.le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
.be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
},
{
.uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
.le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
.be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
},
{
.uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
.le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
.be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
},
};
static const char * const test_uuid_wrong_data[] = {
"c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
"64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
"0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
};
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
const char *data, const char *actual)
{
pr_err("%s test #%u %s %s data: '%s'\n",
prefix,
total_tests,
wrong ? "passed on wrong" : "failed on",
be ? "BE" : "LE",
data);
if (actual && *actual)
pr_err("%s test #%u actual data: '%s'\n",
prefix,
total_tests,
actual);
failed_tests++;
}
static void __init test_uuid_test(const struct test_uuid_data *data)
{
guid_t le;
uuid_t be;
char buf[48];
/* LE */
total_tests++;
if (guid_parse(data->uuid, &le))
test_uuid_failed("conversion", false, false, data->uuid, NULL);
total_tests++;
if (!guid_equal(&data->le, &le)) {
sprintf(buf, "%pUl", &le);
test_uuid_failed("cmp", false, false, data->uuid, buf);
}
/* BE */
total_tests++;
if (uuid_parse(data->uuid, &be))
test_uuid_failed("conversion", false, true, data->uuid, NULL);
total_tests++;
if (!uuid_equal(&data->be, &be)) {
sprintf(buf, "%pUb", &be);
test_uuid_failed("cmp", false, true, data->uuid, buf);
}
}
static void __init test_uuid_wrong(const char *data)
{
guid_t le;
uuid_t be;
/* LE */
total_tests++;
if (!guid_parse(data, &le))
test_uuid_failed("negative", true, false, data, NULL);
/* BE */
total_tests++;
if (!uuid_parse(data, &be))
test_uuid_failed("negative", true, true, data, NULL);
}
static int __init test_uuid_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
test_uuid_test(&test_uuid_test_data[i]);
for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
test_uuid_wrong(test_uuid_wrong_data[i]);
if (failed_tests == 0)
pr_info("all %u tests passed\n", total_tests);
else
pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
return failed_tests ? -EINVAL : 0;
}
module_init(test_uuid_init);
static void __exit test_uuid_exit(void)
{
/* do nothing */
}
module_exit(test_uuid_exit);
MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | lib/test_uuid.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* A generic kernel FIFO implementation
*
* Copyright (C) 2009/2010 Stefani Seibold <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
/*
* internal helper to calculate the unused elements in a fifo
*/
static inline unsigned int kfifo_unused(struct __kfifo *fifo)
{
return (fifo->mask + 1) - (fifo->in - fifo->out);
}
int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
size_t esize, gfp_t gfp_mask)
{
/*
* round up to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
size = roundup_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
fifo->esize = esize;
if (size < 2) {
fifo->data = NULL;
fifo->mask = 0;
return -EINVAL;
}
fifo->data = kmalloc_array(esize, size, gfp_mask);
if (!fifo->data) {
fifo->mask = 0;
return -ENOMEM;
}
fifo->mask = size - 1;
return 0;
}
EXPORT_SYMBOL(__kfifo_alloc);
void __kfifo_free(struct __kfifo *fifo)
{
kfree(fifo->data);
fifo->in = 0;
fifo->out = 0;
fifo->esize = 0;
fifo->data = NULL;
fifo->mask = 0;
}
EXPORT_SYMBOL(__kfifo_free);
int __kfifo_init(struct __kfifo *fifo, void *buffer,
unsigned int size, size_t esize)
{
size /= esize;
if (!is_power_of_2(size))
size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
fifo->esize = esize;
fifo->data = buffer;
if (size < 2) {
fifo->mask = 0;
return -EINVAL;
}
fifo->mask = size - 1;
return 0;
}
EXPORT_SYMBOL(__kfifo_init);
static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
memcpy(fifo->data + off, src, l);
memcpy(fifo->data, src + l, len - l);
/*
* make sure that the data in the fifo is up to date before
* incrementing the fifo->in index counter
*/
smp_wmb();
}
unsigned int __kfifo_in(struct __kfifo *fifo,
const void *buf, unsigned int len)
{
unsigned int l;
l = kfifo_unused(fifo);
if (len > l)
len = l;
kfifo_copy_in(fifo, buf, len, fifo->in);
fifo->in += len;
return len;
}
EXPORT_SYMBOL(__kfifo_in);
static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
memcpy(dst, fifo->data + off, l);
memcpy(dst + l, fifo->data, len - l);
/*
* make sure that the data is copied before
* incrementing the fifo->out index counter
*/
smp_wmb();
}
unsigned int __kfifo_out_peek(struct __kfifo *fifo,
void *buf, unsigned int len)
{
unsigned int l;
l = fifo->in - fifo->out;
if (len > l)
len = l;
kfifo_copy_out(fifo, buf, len, fifo->out);
return len;
}
EXPORT_SYMBOL(__kfifo_out_peek);
unsigned int __kfifo_out(struct __kfifo *fifo,
void *buf, unsigned int len)
{
len = __kfifo_out_peek(fifo, buf, len);
fifo->out += len;
return len;
}
EXPORT_SYMBOL(__kfifo_out);
static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
const void __user *from, unsigned int len, unsigned int off,
unsigned int *copied)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
unsigned long ret;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
ret = copy_from_user(fifo->data + off, from, l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret + len - l, esize);
else {
ret = copy_from_user(fifo->data, from + l, len - l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret, esize);
}
/*
* make sure that the data in the fifo is up to date before
* incrementing the fifo->in index counter
*/
smp_wmb();
*copied = len - ret * esize;
/* return the number of elements which are not copied */
return ret;
}
int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
unsigned long len, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int esize = fifo->esize;
int err;
if (esize != 1)
len /= esize;
l = kfifo_unused(fifo);
if (len > l)
len = l;
ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
if (unlikely(ret)) {
len -= ret;
err = -EFAULT;
} else
err = 0;
fifo->in += len;
return err;
}
EXPORT_SYMBOL(__kfifo_from_user);
static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
unsigned int len, unsigned int off, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
ret = copy_to_user(to, fifo->data + off, l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret + len - l, esize);
else {
ret = copy_to_user(to + l, fifo->data, len - l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret, esize);
}
/*
* make sure that the data is copied before
* incrementing the fifo->out index counter
*/
smp_wmb();
*copied = len - ret * esize;
/* return the number of elements which are not copied */
return ret;
}
int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int esize = fifo->esize;
int err;
if (esize != 1)
len /= esize;
l = fifo->in - fifo->out;
if (len > l)
len = l;
ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
if (unlikely(ret)) {
len -= ret;
err = -EFAULT;
} else
err = 0;
fifo->out += len;
return err;
}
EXPORT_SYMBOL(__kfifo_to_user);
static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
int nents, unsigned int len)
{
int n;
unsigned int l;
unsigned int off;
struct page *page;
if (!nents)
return 0;
if (!len)
return 0;
n = 0;
page = virt_to_page(buf);
off = offset_in_page(buf);
l = 0;
while (len >= l + PAGE_SIZE - off) {
struct page *npage;
l += PAGE_SIZE;
buf += PAGE_SIZE;
npage = virt_to_page(buf);
if (page_to_phys(page) != page_to_phys(npage) - l) {
sg_set_page(sgl, page, l - off, off);
sgl = sg_next(sgl);
if (++n == nents || sgl == NULL)
return n;
page = npage;
len -= l - off;
l = off = 0;
}
}
sg_set_page(sgl, page, len, off);
return n + 1;
}
static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
int nents, unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
unsigned int n;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
return n;
}
unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len)
{
unsigned int l;
l = kfifo_unused(fifo);
if (len > l)
len = l;
return setup_sgl(fifo, sgl, nents, len, fifo->in);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare);
unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len)
{
unsigned int l;
l = fifo->in - fifo->out;
if (len > l)
len = l;
return setup_sgl(fifo, sgl, nents, len, fifo->out);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare);
unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
{
unsigned int max = (1 << (recsize << 3)) - 1;
if (len > max)
return max;
return len;
}
EXPORT_SYMBOL(__kfifo_max_r);
#define __KFIFO_PEEK(data, out, mask) \
((data)[(out) & (mask)])
/*
* __kfifo_peek_n internal helper function for determinate the length of
* the next record in the fifo
*/
static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
{
unsigned int l;
unsigned int mask = fifo->mask;
unsigned char *data = fifo->data;
l = __KFIFO_PEEK(data, fifo->out, mask);
if (--recsize)
l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
return l;
}
#define __KFIFO_POKE(data, in, mask, val) \
( \
(data)[(in) & (mask)] = (unsigned char)(val) \
)
/*
* __kfifo_poke_n internal helper function for storing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
{
unsigned int mask = fifo->mask;
unsigned char *data = fifo->data;
__KFIFO_POKE(data, fifo->in, mask, n);
if (recsize > 1)
__KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
}
unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
{
return __kfifo_peek_n(fifo, recsize);
}
EXPORT_SYMBOL(__kfifo_len_r);
unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
unsigned int len, size_t recsize)
{
if (len + recsize > kfifo_unused(fifo))
return 0;
__kfifo_poke_n(fifo, len, recsize);
kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
fifo->in += len + recsize;
return len;
}
EXPORT_SYMBOL(__kfifo_in_r);
static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
void *buf, unsigned int len, size_t recsize, unsigned int *n)
{
*n = __kfifo_peek_n(fifo, recsize);
if (len > *n)
len = *n;
kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
return len;
}
unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
unsigned int n;
if (fifo->in == fifo->out)
return 0;
return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
}
EXPORT_SYMBOL(__kfifo_out_peek_r);
unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
unsigned int n;
if (fifo->in == fifo->out)
return 0;
len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
fifo->out += n + recsize;
return len;
}
EXPORT_SYMBOL(__kfifo_out_r);
void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
{
unsigned int n;
n = __kfifo_peek_n(fifo, recsize);
fifo->out += n + recsize;
}
EXPORT_SYMBOL(__kfifo_skip_r);
int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
unsigned long len, unsigned int *copied, size_t recsize)
{
unsigned long ret;
len = __kfifo_max_r(len, recsize);
if (len + recsize > kfifo_unused(fifo)) {
*copied = 0;
return 0;
}
__kfifo_poke_n(fifo, len, recsize);
ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
if (unlikely(ret)) {
*copied = 0;
return -EFAULT;
}
fifo->in += len + recsize;
return 0;
}
EXPORT_SYMBOL(__kfifo_from_user_r);
int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied, size_t recsize)
{
unsigned long ret;
unsigned int n;
if (fifo->in == fifo->out) {
*copied = 0;
return 0;
}
n = __kfifo_peek_n(fifo, recsize);
if (len > n)
len = n;
ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
if (unlikely(ret)) {
*copied = 0;
return -EFAULT;
}
fifo->out += n + recsize;
return 0;
}
EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
if (len + recsize > kfifo_unused(fifo))
return 0;
return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
unsigned int len, size_t recsize)
{
len = __kfifo_max_r(len, recsize);
__kfifo_poke_n(fifo, len, recsize);
fifo->in += len + recsize;
}
EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
BUG_ON(!nents);
len = __kfifo_max_r(len, recsize);
if (len + recsize > fifo->in - fifo->out)
return 0;
return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
{
unsigned int len;
len = __kfifo_peek_n(fifo, recsize);
fifo->out += len + recsize;
}
EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
| linux-master | lib/kfifo.c |
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_ro.c"
| linux-master | lib/fdt_ro.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <[email protected]>
*/
#ifdef STATIC
#define PREBOOT
#include "lz4/lz4_decompress.c"
#else
#include <linux/decompress/unlz4.h>
#endif
#include <linux/types.h>
#include <linux/lz4.h>
#include <linux/decompress/mm.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
/*
* Note: Uncompressed chunk size is used in the compressor side
* (userspace side for compression).
* It is hardcoded because there is not proper way to extract it
* from the binary stream which is generated by the preliminary
* version of LZ4 tool so far.
*/
#define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20)
#define ARCHIVE_MAGICNUMBER 0x184C2102
STATIC inline int INIT unlz4(u8 *input, long in_len,
long (*fill)(void *, unsigned long),
long (*flush)(void *, unsigned long),
u8 *output, long *posp,
void (*error) (char *x))
{
int ret = -1;
size_t chunksize = 0;
size_t uncomp_chunksize = LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE;
u8 *inp;
u8 *inp_start;
u8 *outp;
long size = in_len;
#ifdef PREBOOT
size_t out_len = get_unaligned_le32(input + in_len);
#endif
size_t dest_len;
if (output) {
outp = output;
} else if (!flush) {
error("NULL output pointer and no flush function provided");
goto exit_0;
} else {
outp = large_malloc(uncomp_chunksize);
if (!outp) {
error("Could not allocate output buffer");
goto exit_0;
}
}
if (input && fill) {
error("Both input pointer and fill function provided,");
goto exit_1;
} else if (input) {
inp = input;
} else if (!fill) {
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
if (!inp) {
error("Could not allocate input buffer");
goto exit_1;
}
}
inp_start = inp;
if (posp)
*posp = 0;
if (fill) {
size = fill(inp, 4);
if (size < 4) {
error("data corrupted");
goto exit_2;
}
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
if (!fill) {
inp += 4;
size -= 4;
}
} else {
error("invalid header");
goto exit_2;
}
if (posp)
*posp += 4;
for (;;) {
if (fill) {
size = fill(inp, 4);
if (size == 0)
break;
if (size < 4) {
error("data corrupted");
goto exit_2;
}
} else if (size < 4) {
/* empty or end-of-file */
goto exit_3;
}
chunksize = get_unaligned_le32(inp);
if (chunksize == ARCHIVE_MAGICNUMBER) {
if (!fill) {
inp += 4;
size -= 4;
}
if (posp)
*posp += 4;
continue;
}
if (!fill && chunksize == 0) {
/* empty or end-of-file */
goto exit_3;
}
if (posp)
*posp += 4;
if (!fill) {
inp += 4;
size -= 4;
} else {
if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
size = fill(inp, chunksize);
if (size < chunksize) {
error("data corrupted");
goto exit_2;
}
}
#ifdef PREBOOT
if (out_len >= uncomp_chunksize) {
dest_len = uncomp_chunksize;
out_len -= dest_len;
} else
dest_len = out_len;
ret = LZ4_decompress_fast(inp, outp, dest_len);
chunksize = ret;
#else
dest_len = uncomp_chunksize;
ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
dest_len = ret;
#endif
if (ret < 0) {
error("Decoding failed");
goto exit_2;
}
ret = -1;
if (flush && flush(outp, dest_len) != dest_len)
goto exit_2;
if (output)
outp += dest_len;
if (posp)
*posp += chunksize;
if (!fill) {
size -= chunksize;
if (size == 0)
break;
else if (size < 0) {
error("data corrupted");
goto exit_2;
}
inp += chunksize;
}
}
exit_3:
ret = 0;
exit_2:
if (!input)
large_free(inp_start);
exit_1:
if (!output)
large_free(outp);
exit_0:
return ret;
}
#ifdef PREBOOT
STATIC int INIT __decompress(unsigned char *buf, long in_len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *output, long out_len,
long *posp,
void (*error)(char *x)
)
{
return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
}
#endif
| linux-master | lib/decompress_unlz4.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
static __init int memset16_selftest(void)
{
unsigned i, j, k;
u16 v, *p;
p = kmalloc(256 * 2 * 2, GFP_KERNEL);
if (!p)
return -1;
for (i = 0; i < 256; i++) {
for (j = 0; j < 256; j++) {
memset(p, 0xa1, 256 * 2 * sizeof(v));
memset16(p + i, 0xb1b2, j);
for (k = 0; k < 512; k++) {
v = p[k];
if (k < i) {
if (v != 0xa1a1)
goto fail;
} else if (k < i + j) {
if (v != 0xb1b2)
goto fail;
} else {
if (v != 0xa1a1)
goto fail;
}
}
}
}
fail:
kfree(p);
if (i < 256)
return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
static __init int memset32_selftest(void)
{
unsigned i, j, k;
u32 v, *p;
p = kmalloc(256 * 2 * 4, GFP_KERNEL);
if (!p)
return -1;
for (i = 0; i < 256; i++) {
for (j = 0; j < 256; j++) {
memset(p, 0xa1, 256 * 2 * sizeof(v));
memset32(p + i, 0xb1b2b3b4, j);
for (k = 0; k < 512; k++) {
v = p[k];
if (k < i) {
if (v != 0xa1a1a1a1)
goto fail;
} else if (k < i + j) {
if (v != 0xb1b2b3b4)
goto fail;
} else {
if (v != 0xa1a1a1a1)
goto fail;
}
}
}
}
fail:
kfree(p);
if (i < 256)
return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
static __init int memset64_selftest(void)
{
unsigned i, j, k;
u64 v, *p;
p = kmalloc(256 * 2 * 8, GFP_KERNEL);
if (!p)
return -1;
for (i = 0; i < 256; i++) {
for (j = 0; j < 256; j++) {
memset(p, 0xa1, 256 * 2 * sizeof(v));
memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
for (k = 0; k < 512; k++) {
v = p[k];
if (k < i) {
if (v != 0xa1a1a1a1a1a1a1a1ULL)
goto fail;
} else if (k < i + j) {
if (v != 0xb1b2b3b4b5b6b7b8ULL)
goto fail;
} else {
if (v != 0xa1a1a1a1a1a1a1a1ULL)
goto fail;
}
}
}
}
fail:
kfree(p);
if (i < 256)
return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
static __init int strchr_selftest(void)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
char *result;
int i;
for (i = 0; i < strlen(test_string) + 1; i++) {
result = strchr(test_string, test_string[i]);
if (result - test_string != i)
return i + 'a';
}
result = strchr(empty_string, '\0');
if (result != empty_string)
return 0x101;
result = strchr(empty_string, 'a');
if (result)
return 0x102;
result = strchr(test_string, 'z');
if (result)
return 0x103;
return 0;
}
static __init int strnchr_selftest(void)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
char *result;
int i, j;
for (i = 0; i < strlen(test_string) + 1; i++) {
for (j = 0; j < strlen(test_string) + 2; j++) {
result = strnchr(test_string, j, test_string[i]);
if (j <= i) {
if (!result)
continue;
return ((i + 'a') << 8) | j;
}
if (result - test_string != i)
return ((i + 'a') << 8) | j;
}
}
result = strnchr(empty_string, 0, '\0');
if (result)
return 0x10001;
result = strnchr(empty_string, 1, '\0');
if (result != empty_string)
return 0x10002;
result = strnchr(empty_string, 1, 'a');
if (result)
return 0x10003;
result = strnchr(NULL, 0, '\0');
if (result)
return 0x10004;
return 0;
}
static __init int strspn_selftest(void)
{
static const struct strspn_test {
const char str[16];
const char accept[16];
const char reject[16];
unsigned a;
unsigned r;
} tests[] __initconst = {
{ "foobar", "", "", 0, 6 },
{ "abba", "abc", "ABBA", 4, 4 },
{ "abba", "a", "b", 1, 1 },
{ "", "abc", "abc", 0, 0},
};
const struct strspn_test *s = tests;
size_t i, res;
for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
res = strspn(s->str, s->accept);
if (res != s->a)
return 0x100 + 2*i;
res = strcspn(s->str, s->reject);
if (res != s->r)
return 0x100 + 2*i + 1;
}
return 0;
}
static __exit void string_selftest_remove(void)
{
}
static __init int string_selftest_init(void)
{
int test, subtest;
test = 1;
subtest = memset16_selftest();
if (subtest)
goto fail;
test = 2;
subtest = memset32_selftest();
if (subtest)
goto fail;
test = 3;
subtest = memset64_selftest();
if (subtest)
goto fail;
test = 4;
subtest = strchr_selftest();
if (subtest)
goto fail;
test = 5;
subtest = strnchr_selftest();
if (subtest)
goto fail;
test = 6;
subtest = strspn_selftest();
if (subtest)
goto fail;
pr_info("String selftests succeeded\n");
return 0;
fail:
pr_crit("String selftest failure %d.%08x\n", test, subtest);
return 0;
}
module_init(string_selftest_init);
module_exit(string_selftest_remove);
MODULE_LICENSE("GPL v2");
| linux-master | lib/test_string.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/module.h>
#include <linux/libgcc.h>
word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
{
const DWunion au = {.ll = a};
const DWunion bu = {.ll = b};
if ((unsigned int) au.s.high < (unsigned int) bu.s.high)
return 0;
else if ((unsigned int) au.s.high > (unsigned int) bu.s.high)
return 2;
if ((unsigned int) au.s.low < (unsigned int) bu.s.low)
return 0;
else if ((unsigned int) au.s.low > (unsigned int) bu.s.low)
return 2;
return 1;
}
EXPORT_SYMBOL(__ucmpdi2);
| linux-master | lib/ucmpdi2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This module tests the blackhole_dev that is created during the
* net subsystem initialization. The test this module performs is
* by injecting an skb into the stack with skb->dev as the
* blackhole_dev and expects kernel to behave in a sane manner
* (in other words, *not crash*)!
*
* Copyright (c) 2018, Mahesh Bandewar <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
#include <linux/ipv6.h>
#include <net/dst.h>
#define SKB_SIZE 256
#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
#define TAIL_SIZE 32 /* random tail-room */
#define UDP_PORT 1234
static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
/* Reserve head-room for the headers */
skb_reserve(skb, HEAD_SIZE);
/* Add data to the skb */
data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
memset(__skb_put(skb, data_len), 0xf, data_len);
/* Add protocol data */
/* (Transport) UDP */
uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
skb_set_transport_header(skb, 0);
uh->source = uh->dest = htons(UDP_PORT);
uh->len = htons(data_len);
uh->check = 0;
/* (Network) IPv6 */
ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
skb_set_network_header(skb, 0);
ip6h->hop_limit = 32;
ip6h->payload_len = data_len + sizeof(struct udphdr);
ip6h->nexthdr = IPPROTO_UDP;
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
skb->dev = blackhole_netdev;
/* Now attempt to send the packet */
ret = dev_queue_xmit(skb);
switch (ret) {
case NET_XMIT_SUCCESS:
pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
break;
case NET_XMIT_DROP:
pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
break;
case NET_XMIT_CN:
pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
break;
default:
pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
}
return 0;
}
static void __exit test_blackholedev_exit(void)
{
pr_warn("test_blackholedev module terminating.\n");
}
module_init(test_blackholedev_init);
module_exit(test_blackholedev_exit);
MODULE_AUTHOR("Mahesh Bandewar <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/test_blackhole_dev.c |
#include <linux/libfdt_env.h>
#include "../scripts/dtc/libfdt/fdt_sw.c"
| linux-master | lib/fdt_sw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file exists solely to ensure debug information for some core
* data structures is included in the final image even for
* CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However,
* adding appropriate #includes is fine.
*/
#include <linux/cred.h>
#include <linux/crypto.h>
#include <linux/dcache.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/fscache-cache.h>
#include <linux/io.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stdarg.h>
#include <linux/types.h>
#include <net/addrconf.h>
#include <net/sock.h>
#include <net/tcp.h>
| linux-master | lib/debug_info.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/kmsan-checks.h>
#include <linux/export.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines don't assume any hardware mappings, and just
* encode the PIO/MMIO as part of the cookie. They coldly assume that
* the MMIO IO mappings are not in the low address range.
*
* Architectures for which this is not true can't use this generic
* implementation and should do their own copy.
*/
#ifndef HAVE_ARCH_PIO_SIZE
/*
* We encode the physical PIO addresses (0-0xffff) into the
* pointer by offsetting them with a constant (0x10000) and
* assuming that all the low addresses are always PIO. That means
* we can do some sanity checks on the low bits, and don't
* need to just take things for granted.
*/
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
#endif
static void bad_io_access(unsigned long port, const char *access)
{
static int count = 10;
if (count) {
count--;
WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
}
}
/*
* Ugly macros are a way of life.
*/
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port >= PIO_RESERVED) { \
is_mmio; \
} else if (port > PIO_OFFSET) { \
port &= PIO_MASK; \
is_pio; \
} else \
bad_io_access(port, #is_pio ); \
} while (0)
#ifndef pio_read16be
#define pio_read16be(port) swab16(inw(port))
#define pio_read32be(port) swab32(inl(port))
#endif
#ifndef mmio_read16be
#define mmio_read16be(addr) swab16(readw(addr))
#define mmio_read32be(addr) swab32(readl(addr))
#define mmio_read64be(addr) swab64(readq(addr))
#endif
/*
* Here and below, we apply __no_kmsan_checks to functions reading data from
* hardware, to ensure that KMSAN marks their return values as initialized.
*/
__no_kmsan_checks
unsigned int ioread8(const void __iomem *addr)
{
IO_COND(addr, return inb(port), return readb(addr));
return 0xff;
}
__no_kmsan_checks
unsigned int ioread16(const void __iomem *addr)
{
IO_COND(addr, return inw(port), return readw(addr));
return 0xffff;
}
__no_kmsan_checks
unsigned int ioread16be(const void __iomem *addr)
{
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
return 0xffff;
}
__no_kmsan_checks
unsigned int ioread32(const void __iomem *addr)
{
IO_COND(addr, return inl(port), return readl(addr));
return 0xffffffff;
}
__no_kmsan_checks
unsigned int ioread32be(const void __iomem *addr)
{
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
return 0xffffffff;
}
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
#ifdef readq
static u64 pio_read64_lo_hi(unsigned long port)
{
u64 lo, hi;
lo = inl(port);
hi = inl(port + sizeof(u32));
return lo | (hi << 32);
}
static u64 pio_read64_hi_lo(unsigned long port)
{
u64 lo, hi;
hi = inl(port + sizeof(u32));
lo = inl(port);
return lo | (hi << 32);
}
static u64 pio_read64be_lo_hi(unsigned long port)
{
u64 lo, hi;
lo = pio_read32be(port + sizeof(u32));
hi = pio_read32be(port);
return lo | (hi << 32);
}
static u64 pio_read64be_hi_lo(unsigned long port)
{
u64 lo, hi;
hi = pio_read32be(port);
lo = pio_read32be(port + sizeof(u32));
return lo | (hi << 32);
}
__no_kmsan_checks
u64 ioread64_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
return 0xffffffffffffffffULL;
}
__no_kmsan_checks
u64 ioread64_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
return 0xffffffffffffffffULL;
}
__no_kmsan_checks
u64 ioread64be_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_lo_hi(port),
return mmio_read64be(addr));
return 0xffffffffffffffffULL;
}
__no_kmsan_checks
u64 ioread64be_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_hi_lo(port),
return mmio_read64be(addr));
return 0xffffffffffffffffULL;
}
EXPORT_SYMBOL(ioread64_lo_hi);
EXPORT_SYMBOL(ioread64_hi_lo);
EXPORT_SYMBOL(ioread64be_lo_hi);
EXPORT_SYMBOL(ioread64be_hi_lo);
#endif /* readq */
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
#define mmio_write16be(val,port) writew(swab16(val),port)
#define mmio_write32be(val,port) writel(swab32(val),port)
#define mmio_write64be(val,port) writeq(swab64(val),port)
#endif
void iowrite8(u8 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outb(val,port), writeb(val, addr));
}
void iowrite16(u16 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outw(val,port), writew(val, addr));
}
void iowrite16be(u16 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
}
void iowrite32(u32 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outl(val,port), writel(val, addr));
}
void iowrite32be(u32 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
}
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
#ifdef writeq
static void pio_write64_lo_hi(u64 val, unsigned long port)
{
outl(val, port);
outl(val >> 32, port + sizeof(u32));
}
static void pio_write64_hi_lo(u64 val, unsigned long port)
{
outl(val >> 32, port + sizeof(u32));
outl(val, port);
}
static void pio_write64be_lo_hi(u64 val, unsigned long port)
{
pio_write32be(val, port + sizeof(u32));
pio_write32be(val >> 32, port);
}
static void pio_write64be_hi_lo(u64 val, unsigned long port)
{
pio_write32be(val >> 32, port);
pio_write32be(val, port + sizeof(u32));
}
void iowrite64_lo_hi(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_lo_hi(val, port),
writeq(val, addr));
}
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_hi_lo(val, port),
writeq(val, addr));
}
void iowrite64be_lo_hi(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_lo_hi(val, port),
mmio_write64be(val, addr));
}
void iowrite64be_hi_lo(u64 val, void __iomem *addr)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_hi_lo(val, port),
mmio_write64be(val, addr));
}
EXPORT_SYMBOL(iowrite64_lo_hi);
EXPORT_SYMBOL(iowrite64_hi_lo);
EXPORT_SYMBOL(iowrite64be_lo_hi);
EXPORT_SYMBOL(iowrite64be_hi_lo);
#endif /* readq */
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
* convert to CPU byte order. We write in "IO byte
* order" (we also don't have IO barriers).
*/
#ifndef mmio_insb
static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __raw_readb(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __raw_readw(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __raw_readl(addr);
*dst = data;
dst++;
}
}
#endif
#ifndef mmio_outsb
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
__raw_writeb(*src, addr);
src++;
}
}
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
__raw_writew(*src, addr);
src++;
}
}
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
__raw_writel(*src, addr);
src++;
}
}
#endif
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
/* KMSAN must treat values read from devices as initialized. */
kmsan_unpoison_memory(dst, count);
}
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
/* KMSAN must treat values read from devices as initialized. */
kmsan_unpoison_memory(dst, count * 2);
}
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
/* KMSAN must treat values read from devices as initialized. */
kmsan_unpoison_memory(dst, count * 4);
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(src, count);
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(src, count * 2);
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
/* Make sure uninitialized memory isn't copied to devices. */
kmsan_check_memory(src, count * 4);
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
}
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
#ifdef CONFIG_HAS_IOPORT_MAP
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
if (port > PIO_MASK)
return NULL;
return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
}
void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifdef CONFIG_PCI
/* Hide the details if this is a MMIO or PIO address space and just do what
* you expect in the correct way. */
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
IO_COND(addr, /* nothing */, iounmap(addr));
}
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_PCI */
| linux-master | lib/iomap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Helpers for formatting and printing strings
*
* Copyright 31 August 2008 James Bottomley
* Copyright (C) 2013, Intel Corporation
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
* @units: units to use (powers of 1000 or 1024)
* @buf: buffer to format to
* @len: length of buffer
*
* This function returns a string formatted to 3 significant figures
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
*/
void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
};
static const char *const units_2[] = {
"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
static const unsigned int divisor[] = {
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
static const unsigned int rounding[] = { 500, 50, 5 };
int i = 0, j;
u32 remainder = 0, sf_cap;
char tmp[8];
const char *unit;
tmp[0] = '\0';
if (blk_size == 0)
size = 0;
if (size == 0)
goto out;
/* This is Napier's algorithm. Reduce the original block size to
*
* coefficient * divisor[units]^i
*
* we do the reduction so both coefficients are just under 32 bits so
* that multiplying them together won't overflow 64 bits and we keep
* as much precision as possible in the numbers.
*
* Note: it's safe to throw away the remainders here because all the
* precision is in the coefficients.
*/
while (blk_size >> 32) {
do_div(blk_size, divisor[units]);
i++;
}
while (size >> 32) {
do_div(size, divisor[units]);
i++;
}
/* now perform the actual multiplication keeping i as the sum of the
* two logarithms */
size *= blk_size;
/* and logarithmically reduce it until it's just under the divisor */
while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
/* work out in j how many digits of precision we need from the
* remainder */
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
if (units == STRING_UNITS_2) {
/* express the remainder as a decimal. It's currently the
* numerator of a fraction whose denominator is
* divisor[units], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
remainder >>= 10;
}
/* add a 5 to the digit below what will be printed to ensure
* an arithmetical round up and carry it through to size */
remainder += rounding[j];
if (remainder >= 1000) {
remainder -= 1000;
size += 1;
}
if (j) {
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
out:
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
unit = units_str[units][i];
snprintf(buf, len, "%u%s %s", (u32)size,
tmp, unit);
}
EXPORT_SYMBOL(string_get_size);
/**
* parse_int_array_user - Split string into a sequence of integers
* @from: The user space buffer to read from
* @count: The maximum number of bytes to read
* @array: Returned pointer to sequence of integers
*
* On success @array is allocated and initialized with a sequence of
* integers extracted from the @from plus an additional element that
* begins the sequence and specifies the integers count.
*
* Caller takes responsibility for freeing @array when it is no longer
* needed.
*/
int parse_int_array_user(const char __user *from, size_t count, int **array)
{
int *ints, nints;
char *buf;
int ret = 0;
buf = memdup_user_nul(from, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
get_options(buf, 0, &nints);
if (!nints) {
ret = -ENOENT;
goto free_buf;
}
ints = kcalloc(nints + 1, sizeof(*ints), GFP_KERNEL);
if (!ints) {
ret = -ENOMEM;
goto free_buf;
}
get_options(buf, nints + 1, ints);
*array = ints;
free_buf:
kfree(buf);
return ret;
}
EXPORT_SYMBOL(parse_int_array_user);
static bool unescape_space(char **src, char **dst)
{
char *p = *dst, *q = *src;
switch (*q) {
case 'n':
*p = '\n';
break;
case 'r':
*p = '\r';
break;
case 't':
*p = '\t';
break;
case 'v':
*p = '\v';
break;
case 'f':
*p = '\f';
break;
default:
return false;
}
*dst += 1;
*src += 1;
return true;
}
static bool unescape_octal(char **src, char **dst)
{
char *p = *dst, *q = *src;
u8 num;
if (isodigit(*q) == 0)
return false;
num = (*q++) & 7;
while (num < 32 && isodigit(*q) && (q - *src < 3)) {
num <<= 3;
num += (*q++) & 7;
}
*p = num;
*dst += 1;
*src = q;
return true;
}
static bool unescape_hex(char **src, char **dst)
{
char *p = *dst, *q = *src;
int digit;
u8 num;
if (*q++ != 'x')
return false;
num = digit = hex_to_bin(*q++);
if (digit < 0)
return false;
digit = hex_to_bin(*q);
if (digit >= 0) {
q++;
num = (num << 4) | digit;
}
*p = num;
*dst += 1;
*src = q;
return true;
}
static bool unescape_special(char **src, char **dst)
{
char *p = *dst, *q = *src;
switch (*q) {
case '\"':
*p = '\"';
break;
case '\\':
*p = '\\';
break;
case 'a':
*p = '\a';
break;
case 'e':
*p = '\e';
break;
default:
return false;
}
*dst += 1;
*src += 1;
return true;
}
/**
* string_unescape - unquote characters in the given string
* @src: source buffer (escaped)
* @dst: destination buffer (unescaped)
* @size: size of the destination buffer (0 to unlimit)
* @flags: combination of the flags.
*
* Description:
* The function unquotes characters in the given string.
*
* Because the size of the output will be the same as or less than the size of
* the input, the transformation may be performed in place.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will always be NULL-terminated. Source string must be
* NULL-terminated as well. The supported flags are::
*
* UNESCAPE_SPACE:
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
* UNESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (1 to 3 digits)
* UNESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
* UNESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
* UNESCAPE_ANY:
* all previous together
*
* Return:
* The amount of the characters processed to the destination buffer excluding
* trailing '\0' is returned.
*/
int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
{
char *out = dst;
while (*src && --size) {
if (src[0] == '\\' && src[1] != '\0' && size > 1) {
src++;
size--;
if (flags & UNESCAPE_SPACE &&
unescape_space(&src, &out))
continue;
if (flags & UNESCAPE_OCTAL &&
unescape_octal(&src, &out))
continue;
if (flags & UNESCAPE_HEX &&
unescape_hex(&src, &out))
continue;
if (flags & UNESCAPE_SPECIAL &&
unescape_special(&src, &out))
continue;
*out++ = '\\';
}
*out++ = *src++;
}
*out = '\0';
return out - dst;
}
EXPORT_SYMBOL(string_unescape);
static bool escape_passthrough(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = c;
*dst = out + 1;
return true;
}
static bool escape_space(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
switch (c) {
case '\n':
to = 'n';
break;
case '\r':
to = 'r';
break;
case '\t':
to = 't';
break;
case '\v':
to = 'v';
break;
case '\f':
to = 'f';
break;
default:
return false;
}
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = to;
++out;
*dst = out;
return true;
}
static bool escape_special(unsigned char c, char **dst, char *end)
{
char *out = *dst;
unsigned char to;
switch (c) {
case '\\':
to = '\\';
break;
case '\a':
to = 'a';
break;
case '\e':
to = 'e';
break;
case '"':
to = '"';
break;
default:
return false;
}
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = to;
++out;
*dst = out;
return true;
}
static bool escape_null(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (c)
return false;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = '0';
++out;
*dst = out;
return true;
}
static bool escape_octal(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = ((c >> 6) & 0x07) + '0';
++out;
if (out < end)
*out = ((c >> 3) & 0x07) + '0';
++out;
if (out < end)
*out = ((c >> 0) & 0x07) + '0';
++out;
*dst = out;
return true;
}
static bool escape_hex(unsigned char c, char **dst, char *end)
{
char *out = *dst;
if (out < end)
*out = '\\';
++out;
if (out < end)
*out = 'x';
++out;
if (out < end)
*out = hex_asc_hi(c);
++out;
if (out < end)
*out = hex_asc_lo(c);
++out;
*dst = out;
return true;
}
/**
* string_escape_mem - quote characters in the given memory buffer
* @src: source buffer (unescaped)
* @isz: source buffer size
* @dst: destination buffer (escaped)
* @osz: destination buffer size
* @flags: combination of the flags
* @only: NULL-terminated string containing characters used to limit
* the selected escape class. If characters are included in @only
* that would not normally be escaped by the classes selected
* in @flags, they will be copied to @dst unescaped.
*
* Description:
* The process of escaping byte buffer includes several parts. They are applied
* in the following sequence.
*
* 1. The character is not matched to the one from @only string and thus
* must go as-is to the output.
* 2. The character is matched to the printable and ASCII classes, if asked,
* and in case of match it passes through to the output.
* 3. The character is matched to the printable or ASCII class, if asked,
* and in case of match it passes through to the output.
* 4. The character is checked if it falls into the class given by @flags.
* %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
* character. Note that they actually can't go together, otherwise
* %ESCAPE_HEX will be ignored.
*
* Caller must provide valid source and destination pointers. Be aware that
* destination buffer will not be NULL-terminated, thus caller have to append
* it if needs. The supported flags are::
*
* %ESCAPE_SPACE: (special white space, not space itself)
* '\f' - form feed
* '\n' - new line
* '\r' - carriage return
* '\t' - horizontal tab
* '\v' - vertical tab
* %ESCAPE_SPECIAL:
* '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
* %ESCAPE_NULL:
* '\0' - null
* %ESCAPE_OCTAL:
* '\NNN' - byte with octal value NNN (3 digits)
* %ESCAPE_ANY:
* all previous together
* %ESCAPE_NP:
* escape only non-printable characters, checked by isprint()
* %ESCAPE_ANY_NP:
* all previous together
* %ESCAPE_HEX:
* '\xHH' - byte with hexadecimal value HH (2 digits)
* %ESCAPE_NA:
* escape only non-ascii characters, checked by isascii()
* %ESCAPE_NAP:
* escape only non-printable or non-ascii characters
* %ESCAPE_APPEND:
* append characters from @only to be escaped by the given classes
*
* %ESCAPE_APPEND would help to pass additional characters to the escaped, when
* one of %ESCAPE_NP, %ESCAPE_NA, or %ESCAPE_NAP is provided.
*
* One notable caveat, the %ESCAPE_NAP, %ESCAPE_NP and %ESCAPE_NA have the
* higher priority than the rest of the flags (%ESCAPE_NAP is the highest).
* It doesn't make much sense to use either of them without %ESCAPE_OCTAL
* or %ESCAPE_HEX, because they cover most of the other character classes.
* %ESCAPE_NAP can utilize %ESCAPE_SPACE or %ESCAPE_SPECIAL in addition to
* the above.
*
* Return:
* The total size of the escaped output that would be generated for
* the given input and flags. To check whether the output was
* truncated, compare the return value to osz. There is room left in
* dst for a '\0' terminator if and only if ret < osz.
*/
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only)
{
char *p = dst;
char *end = p + osz;
bool is_dict = only && *only;
bool is_append = flags & ESCAPE_APPEND;
while (isz--) {
unsigned char c = *src++;
bool in_dict = is_dict && strchr(only, c);
/*
* Apply rules in the following sequence:
* - the @only string is supplied and does not contain a
* character under question
* - the character is printable and ASCII, when @flags has
* %ESCAPE_NAP bit set
* - the character is printable, when @flags has
* %ESCAPE_NP bit set
* - the character is ASCII, when @flags has
* %ESCAPE_NA bit set
* - the character doesn't fall into a class of symbols
* defined by given @flags
* In these cases we just pass through a character to the
* output buffer.
*
* When %ESCAPE_APPEND is passed, the characters from @only
* have been excluded from the %ESCAPE_NAP, %ESCAPE_NP, and
* %ESCAPE_NA cases.
*/
if (!(is_append || in_dict) && is_dict &&
escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isascii(c) && isprint(c) &&
flags & ESCAPE_NAP && escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isprint(c) &&
flags & ESCAPE_NP && escape_passthrough(c, &p, end))
continue;
if (!(is_append && in_dict) && isascii(c) &&
flags & ESCAPE_NA && escape_passthrough(c, &p, end))
continue;
if (flags & ESCAPE_SPACE && escape_space(c, &p, end))
continue;
if (flags & ESCAPE_SPECIAL && escape_special(c, &p, end))
continue;
if (flags & ESCAPE_NULL && escape_null(c, &p, end))
continue;
/* ESCAPE_OCTAL and ESCAPE_HEX always go last */
if (flags & ESCAPE_OCTAL && escape_octal(c, &p, end))
continue;
if (flags & ESCAPE_HEX && escape_hex(c, &p, end))
continue;
escape_passthrough(c, &p, end);
}
return p - dst;
}
EXPORT_SYMBOL(string_escape_mem);
/*
* Return an allocated string that has been escaped of special characters
* and double quotes, making it safe to log in quotes.
*/
char *kstrdup_quotable(const char *src, gfp_t gfp)
{
size_t slen, dlen;
char *dst;
const int flags = ESCAPE_HEX;
const char esc[] = "\f\n\r\t\v\a\e\\\"";
if (!src)
return NULL;
slen = strlen(src);
dlen = string_escape_mem(src, slen, NULL, 0, flags, esc);
dst = kmalloc(dlen + 1, gfp);
if (!dst)
return NULL;
WARN_ON(string_escape_mem(src, slen, dst, dlen, flags, esc) != dlen);
dst[dlen] = '\0';
return dst;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable);
/*
* Returns allocated NULL-terminated string containing process
* command line, with inter-argument NULLs replaced with spaces,
* and other special characters escaped.
*/
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
{
char *buffer, *quoted;
int i, res;
buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buffer)
return NULL;
res = get_cmdline(task, buffer, PAGE_SIZE - 1);
buffer[res] = '\0';
/* Collapse trailing NULLs, leave res pointing to last non-NULL. */
while (--res >= 0 && buffer[res] == '\0')
;
/* Replace inter-argument NULLs. */
for (i = 0; i <= res; i++)
if (buffer[i] == '\0')
buffer[i] = ' ';
/* Make sure result is printable. */
quoted = kstrdup_quotable(buffer, gfp);
kfree(buffer);
return quoted;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_cmdline);
/*
* Returns allocated NULL-terminated string containing pathname,
* with special characters escaped, able to be safely logged. If
* there is an error, the leading character will be "<".
*/
char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
{
char *temp, *pathname;
if (!file)
return kstrdup("<unknown>", gfp);
/* We add 11 spaces for ' (deleted)' to be appended */
temp = kmalloc(PATH_MAX + 11, GFP_KERNEL);
if (!temp)
return kstrdup("<no_memory>", gfp);
pathname = file_path(file, temp, PATH_MAX + 11);
if (IS_ERR(pathname))
pathname = kstrdup("<too_long>", gfp);
else
pathname = kstrdup_quotable(pathname, gfp);
kfree(temp);
return pathname;
}
EXPORT_SYMBOL_GPL(kstrdup_quotable_file);
/*
* Returns duplicate string in which the @old characters are replaced by @new.
*/
char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp)
{
char *dst;
dst = kstrdup(src, gfp);
if (!dst)
return NULL;
return strreplace(dst, old, new);
}
EXPORT_SYMBOL_GPL(kstrdup_and_replace);
/**
* kasprintf_strarray - allocate and fill array of sequential strings
* @gfp: flags for the slab allocator
* @prefix: prefix to be used
* @n: amount of lines to be allocated and filled
*
* Allocates and fills @n strings using pattern "%s-%zu", where prefix
* is provided by caller. The caller is responsible to free them with
* kfree_strarray() after use.
*
* Returns array of strings or NULL when memory can't be allocated.
*/
char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n)
{
char **names;
size_t i;
names = kcalloc(n + 1, sizeof(char *), gfp);
if (!names)
return NULL;
for (i = 0; i < n; i++) {
names[i] = kasprintf(gfp, "%s-%zu", prefix, i);
if (!names[i]) {
kfree_strarray(names, i);
return NULL;
}
}
return names;
}
EXPORT_SYMBOL_GPL(kasprintf_strarray);
/**
* kfree_strarray - free a number of dynamically allocated strings contained
* in an array and the array itself
*
* @array: Dynamically allocated array of strings to free.
* @n: Number of strings (starting from the beginning of the array) to free.
*
* Passing a non-NULL @array and @n == 0 as well as NULL @array are valid
* use-cases. If @array is NULL, the function does nothing.
*/
void kfree_strarray(char **array, size_t n)
{
unsigned int i;
if (!array)
return;
for (i = 0; i < n; i++)
kfree(array[i]);
kfree(array);
}
EXPORT_SYMBOL_GPL(kfree_strarray);
struct strarray {
char **array;
size_t n;
};
static void devm_kfree_strarray(struct device *dev, void *res)
{
struct strarray *array = res;
kfree_strarray(array->array, array->n);
}
char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
{
struct strarray *ptr;
ptr = devres_alloc(devm_kfree_strarray, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
ptr->array = kasprintf_strarray(GFP_KERNEL, prefix, n);
if (!ptr->array) {
devres_free(ptr);
return ERR_PTR(-ENOMEM);
}
ptr->n = n;
devres_add(dev, ptr);
return ptr->array;
}
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
* strscpy_pad() - Copy a C-string into a sized buffer
* @dest: Where to copy the string to
* @src: Where to copy the string from
* @count: Size of destination buffer
*
* Copy the string, or as much of it as fits, into the dest buffer. The
* behavior is undefined if the string buffers overlap. The destination
* buffer is always %NUL terminated, unless it's zero-sized.
*
* If the source string is shorter than the destination buffer, zeros
* the tail of the destination buffer.
*
* For full explanation of why you may want to consider using the
* 'strscpy' functions please see the function docstring for strscpy().
*
* Returns:
* * The number of characters copied (not including the trailing %NUL)
* * -E2BIG if count is 0 or @src was truncated.
*/
ssize_t strscpy_pad(char *dest, const char *src, size_t count)
{
ssize_t written;
written = strscpy(dest, src, count);
if (written < 0 || written == count - 1)
return written;
memset(dest + written + 1, 0, count - written - 1);
return written;
}
EXPORT_SYMBOL(strscpy_pad);
/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
* Returns a pointer to the first non-whitespace character in @str.
*/
char *skip_spaces(const char *str)
{
while (isspace(*str))
++str;
return (char *)str;
}
EXPORT_SYMBOL(skip_spaces);
/**
* strim - Removes leading and trailing whitespace from @s.
* @s: The string to be stripped.
*
* Note that the first trailing whitespace is replaced with a %NUL-terminator
* in the given string @s. Returns a pointer to the first non-whitespace
* character in @s.
*/
char *strim(char *s)
{
size_t size;
char *end;
size = strlen(s);
if (!size)
return s;
end = s + size - 1;
while (end >= s && isspace(*end))
end--;
*(end + 1) = '\0';
return skip_spaces(s);
}
EXPORT_SYMBOL(strim);
/**
* sysfs_streq - return true if strings are equal, modulo trailing newline
* @s1: one string
* @s2: another string
*
* This routine returns true iff two strings are equal, treating both
* NUL and newline-then-NUL as equivalent string terminations. It's
* geared for use with sysfs input strings, which generally terminate
* with newlines but are compared against values without newlines.
*/
bool sysfs_streq(const char *s1, const char *s2)
{
while (*s1 && *s1 == *s2) {
s1++;
s2++;
}
if (*s1 == *s2)
return true;
if (!*s1 && *s2 == '\n' && !s2[1])
return true;
if (*s1 == '\n' && !s1[1] && !*s2)
return true;
return false;
}
EXPORT_SYMBOL(sysfs_streq);
/**
* match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @string: string to match with
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*
* Return:
* index of a @string in the @array if matches, or %-EINVAL otherwise.
*/
int match_string(const char * const *array, size_t n, const char *string)
{
int index;
const char *item;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (!strcmp(item, string))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(match_string);
/**
* __sysfs_match_string - matches given string in an array
* @array: array of strings
* @n: number of strings in the array or -1 for NULL terminated arrays
* @str: string to match with
*
* Returns index of @str in the @array or -EINVAL, just like match_string().
* Uses sysfs_streq instead of strcmp for matching.
*
* This routine will look for a string in an array of strings up to the
* n-th element in the array or until the first NULL element.
*
* Historically the value of -1 for @n, was used to search in arrays that
* are NULL terminated. However, the function does not make a distinction
* when finishing the search: either @n elements have been compared OR
* the first NULL element was found.
*/
int __sysfs_match_string(const char * const *array, size_t n, const char *str)
{
const char *item;
int index;
for (index = 0; index < n; index++) {
item = array[index];
if (!item)
break;
if (sysfs_streq(item, str))
return index;
}
return -EINVAL;
}
EXPORT_SYMBOL(__sysfs_match_string);
/**
* strreplace - Replace all occurrences of character in string.
* @str: The string to operate on.
* @old: The character being replaced.
* @new: The character @old is replaced with.
*
* Replaces the each @old character with a @new one in the given string @str.
*
* Return: pointer to the string @str itself.
*/
char *strreplace(char *str, char old, char new)
{
char *s = str;
for (; *s; ++s)
if (*s == old)
*s = new;
return str;
}
EXPORT_SYMBOL(strreplace);
/**
* memcpy_and_pad - Copy one buffer to another with padding
* @dest: Where to copy to
* @dest_len: The destination buffer size
* @src: Where to copy from
* @count: The number of bytes to copy
* @pad: Character to use for padding if space is left in destination.
*/
void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
int pad)
{
if (dest_len > count) {
memcpy(dest, src, count);
memset(dest + count, pad, dest_len - count);
} else {
memcpy(dest, src, dest_len);
}
}
EXPORT_SYMBOL(memcpy_and_pad);
#ifdef CONFIG_FORTIFY_SOURCE
/* These are placeholders for fortify compile-time warnings. */
void __read_overflow2_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__read_overflow2_field);
void __write_overflow_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__write_overflow_field);
void fortify_panic(const char *name)
{
pr_emerg("detected buffer overflow in %s\n", name);
BUG();
}
EXPORT_SYMBOL(fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */
| linux-master | lib/string_helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/* I/O iterator tests. This can only test kernel-backed iterator types.
*
* Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/bvec.h>
#include <kunit/test.h>
MODULE_DESCRIPTION("iov_iter testing");
MODULE_AUTHOR("David Howells <[email protected]>");
MODULE_LICENSE("GPL");
struct kvec_test_range {
int from, to;
};
static const struct kvec_test_range kvec_test_ranges[] = {
{ 0x00002, 0x00002 },
{ 0x00027, 0x03000 },
{ 0x05193, 0x18794 },
{ 0x20000, 0x20000 },
{ 0x20000, 0x24000 },
{ 0x24000, 0x27001 },
{ 0x29000, 0xffffb },
{ 0xffffd, 0xffffe },
{ -1 }
};
static inline u8 pattern(unsigned long x)
{
return x & 0xff;
}
static void iov_kunit_unmap(void *data)
{
vunmap(data);
}
static void *__init iov_kunit_create_buffer(struct kunit *test,
struct page ***ppages,
size_t npages)
{
struct page **pages;
unsigned long got;
void *buffer;
pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
*ppages = pages;
got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
if (got != npages) {
release_pages(pages, got);
KUNIT_ASSERT_EQ(test, got, npages);
}
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
return buffer;
}
static void __init iov_kunit_load_kvec(struct kunit *test,
struct iov_iter *iter, int dir,
struct kvec *kvec, unsigned int kvmax,
void *buffer, size_t bufsize,
const struct kvec_test_range *pr)
{
size_t size = 0;
int i;
for (i = 0; i < kvmax; i++, pr++) {
if (pr->from < 0)
break;
KUNIT_ASSERT_GE(test, pr->to, pr->from);
KUNIT_ASSERT_LE(test, pr->to, bufsize);
kvec[i].iov_base = buffer + pr->from;
kvec[i].iov_len = pr->to - pr->from;
size += pr->to - pr->from;
}
KUNIT_ASSERT_LE(test, size, bufsize);
iov_iter_kvec(iter, dir, kvec, i, size);
}
/*
* Test copying to a ITER_KVEC-type iterator.
*/
static void __init iov_kunit_copy_to_kvec(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct page **spages, **bpages;
struct kvec kvec[8];
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, patt;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
scratch = iov_kunit_create_buffer(test, &spages, npages);
for (i = 0; i < bufsize; i++)
scratch[i] = pattern(i);
buffer = iov_kunit_create_buffer(test, &bpages, npages);
memset(buffer, 0, bufsize);
iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
buffer, bufsize, kvec_test_ranges);
size = iter.count;
copied = copy_to_iter(scratch, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
/* Build the expected image in the scratch buffer. */
patt = 0;
memset(scratch, 0, bufsize);
for (pr = kvec_test_ranges; pr->from >= 0; pr++)
for (i = pr->from; i < pr->to; i++)
scratch[i] = pattern(patt++);
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
if (buffer[i] != scratch[i])
return;
}
KUNIT_SUCCEED();
}
/*
* Test copying from a ITER_KVEC-type iterator.
*/
static void __init iov_kunit_copy_from_kvec(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct page **spages, **bpages;
struct kvec kvec[8];
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, j;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
buffer = iov_kunit_create_buffer(test, &bpages, npages);
for (i = 0; i < bufsize; i++)
buffer[i] = pattern(i);
scratch = iov_kunit_create_buffer(test, &spages, npages);
memset(scratch, 0, bufsize);
iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
buffer, bufsize, kvec_test_ranges);
size = min(iter.count, bufsize);
copied = copy_from_iter(scratch, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
/* Build the expected image in the main buffer. */
i = 0;
memset(buffer, 0, bufsize);
for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
for (j = pr->from; j < pr->to; j++) {
buffer[i++] = pattern(j);
if (i >= bufsize)
goto stop;
}
}
stop:
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
if (scratch[i] != buffer[i])
return;
}
KUNIT_SUCCEED();
}
struct bvec_test_range {
int page, from, to;
};
static const struct bvec_test_range bvec_test_ranges[] = {
{ 0, 0x0002, 0x0002 },
{ 1, 0x0027, 0x0893 },
{ 2, 0x0193, 0x0794 },
{ 3, 0x0000, 0x1000 },
{ 4, 0x0000, 0x1000 },
{ 5, 0x0000, 0x1000 },
{ 6, 0x0000, 0x0ffb },
{ 6, 0x0ffd, 0x0ffe },
{ -1, -1, -1 }
};
static void __init iov_kunit_load_bvec(struct kunit *test,
struct iov_iter *iter, int dir,
struct bio_vec *bvec, unsigned int bvmax,
struct page **pages, size_t npages,
size_t bufsize,
const struct bvec_test_range *pr)
{
struct page *can_merge = NULL, *page;
size_t size = 0;
int i;
for (i = 0; i < bvmax; i++, pr++) {
if (pr->from < 0)
break;
KUNIT_ASSERT_LT(test, pr->page, npages);
KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
KUNIT_ASSERT_GE(test, pr->from, 0);
KUNIT_ASSERT_GE(test, pr->to, pr->from);
KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
page = pages[pr->page];
if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
i--;
bvec[i].bv_len += pr->to;
} else {
bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
}
size += pr->to - pr->from;
if ((pr->to & ~PAGE_MASK) == 0)
can_merge = page + pr->to / PAGE_SIZE;
else
can_merge = NULL;
}
iov_iter_bvec(iter, dir, bvec, i, size);
}
/*
* Test copying to a ITER_BVEC-type iterator.
*/
static void __init iov_kunit_copy_to_bvec(struct kunit *test)
{
const struct bvec_test_range *pr;
struct iov_iter iter;
struct bio_vec bvec[8];
struct page **spages, **bpages;
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, b, patt;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
scratch = iov_kunit_create_buffer(test, &spages, npages);
for (i = 0; i < bufsize; i++)
scratch[i] = pattern(i);
buffer = iov_kunit_create_buffer(test, &bpages, npages);
memset(buffer, 0, bufsize);
iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
bpages, npages, bufsize, bvec_test_ranges);
size = iter.count;
copied = copy_to_iter(scratch, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
/* Build the expected image in the scratch buffer. */
b = 0;
patt = 0;
memset(scratch, 0, bufsize);
for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
u8 *p = scratch + pr->page * PAGE_SIZE;
for (i = pr->from; i < pr->to; i++)
p[i] = pattern(patt++);
}
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
if (buffer[i] != scratch[i])
return;
}
KUNIT_SUCCEED();
}
/*
* Test copying from a ITER_BVEC-type iterator.
*/
static void __init iov_kunit_copy_from_bvec(struct kunit *test)
{
const struct bvec_test_range *pr;
struct iov_iter iter;
struct bio_vec bvec[8];
struct page **spages, **bpages;
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, j;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
buffer = iov_kunit_create_buffer(test, &bpages, npages);
for (i = 0; i < bufsize; i++)
buffer[i] = pattern(i);
scratch = iov_kunit_create_buffer(test, &spages, npages);
memset(scratch, 0, bufsize);
iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
bpages, npages, bufsize, bvec_test_ranges);
size = iter.count;
copied = copy_from_iter(scratch, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
/* Build the expected image in the main buffer. */
i = 0;
memset(buffer, 0, bufsize);
for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
size_t patt = pr->page * PAGE_SIZE;
for (j = pr->from; j < pr->to; j++) {
buffer[i++] = pattern(patt + j);
if (i >= bufsize)
goto stop;
}
}
stop:
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
if (scratch[i] != buffer[i])
return;
}
KUNIT_SUCCEED();
}
static void iov_kunit_destroy_xarray(void *data)
{
struct xarray *xarray = data;
xa_destroy(xarray);
kfree(xarray);
}
static void __init iov_kunit_load_xarray(struct kunit *test,
struct iov_iter *iter, int dir,
struct xarray *xarray,
struct page **pages, size_t npages)
{
size_t size = 0;
int i;
for (i = 0; i < npages; i++) {
void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
KUNIT_ASSERT_FALSE(test, xa_is_err(x));
size += PAGE_SIZE;
}
iov_iter_xarray(iter, dir, xarray, 0, size);
}
static struct xarray *iov_kunit_create_xarray(struct kunit *test)
{
struct xarray *xarray;
xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
xa_init(xarray);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
return xarray;
}
/*
* Test copying to a ITER_XARRAY-type iterator.
*/
static void __init iov_kunit_copy_to_xarray(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct xarray *xarray;
struct page **spages, **bpages;
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, patt;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
xarray = iov_kunit_create_xarray(test);
scratch = iov_kunit_create_buffer(test, &spages, npages);
for (i = 0; i < bufsize; i++)
scratch[i] = pattern(i);
buffer = iov_kunit_create_buffer(test, &bpages, npages);
memset(buffer, 0, bufsize);
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
i = 0;
for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
size = pr->to - pr->from;
KUNIT_ASSERT_LE(test, pr->to, bufsize);
iov_iter_xarray(&iter, READ, xarray, pr->from, size);
copied = copy_to_iter(scratch + i, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
i += size;
}
/* Build the expected image in the scratch buffer. */
patt = 0;
memset(scratch, 0, bufsize);
for (pr = kvec_test_ranges; pr->from >= 0; pr++)
for (i = pr->from; i < pr->to; i++)
scratch[i] = pattern(patt++);
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
if (buffer[i] != scratch[i])
return;
}
KUNIT_SUCCEED();
}
/*
* Test copying from a ITER_XARRAY-type iterator.
*/
static void __init iov_kunit_copy_from_xarray(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct xarray *xarray;
struct page **spages, **bpages;
u8 *scratch, *buffer;
size_t bufsize, npages, size, copied;
int i, j;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
xarray = iov_kunit_create_xarray(test);
buffer = iov_kunit_create_buffer(test, &bpages, npages);
for (i = 0; i < bufsize; i++)
buffer[i] = pattern(i);
scratch = iov_kunit_create_buffer(test, &spages, npages);
memset(scratch, 0, bufsize);
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
i = 0;
for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
size = pr->to - pr->from;
KUNIT_ASSERT_LE(test, pr->to, bufsize);
iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
copied = copy_from_iter(scratch + i, size, &iter);
KUNIT_EXPECT_EQ(test, copied, size);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
i += size;
}
/* Build the expected image in the main buffer. */
i = 0;
memset(buffer, 0, bufsize);
for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
for (j = pr->from; j < pr->to; j++) {
buffer[i++] = pattern(j);
if (i >= bufsize)
goto stop;
}
}
stop:
/* Compare the images */
for (i = 0; i < bufsize; i++) {
KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
if (scratch[i] != buffer[i])
return;
}
KUNIT_SUCCEED();
}
/*
* Test the extraction of ITER_KVEC-type iterators.
*/
static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct page **bpages, *pagelist[8], **pages = pagelist;
struct kvec kvec[8];
u8 *buffer;
ssize_t len;
size_t bufsize, size = 0, npages;
int i, from;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
buffer = iov_kunit_create_buffer(test, &bpages, npages);
iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
buffer, bufsize, kvec_test_ranges);
size = iter.count;
pr = kvec_test_ranges;
from = pr->from;
do {
size_t offset0 = LONG_MAX;
for (i = 0; i < ARRAY_SIZE(pagelist); i++)
pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
ARRAY_SIZE(pagelist), 0, &offset0);
KUNIT_EXPECT_GE(test, len, 0);
if (len < 0)
break;
KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
KUNIT_EXPECT_LE(test, len, size);
KUNIT_EXPECT_EQ(test, iter.count, size - len);
size -= len;
if (len == 0)
break;
for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
struct page *p;
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
int ix;
KUNIT_ASSERT_GE(test, part, 0);
while (from == pr->to) {
pr++;
from = pr->from;
if (from < 0)
goto stop;
}
ix = from / PAGE_SIZE;
KUNIT_ASSERT_LT(test, ix, npages);
p = bpages[ix];
KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
from += part;
len -= part;
KUNIT_ASSERT_GE(test, len, 0);
if (len == 0)
break;
offset0 = 0;
}
if (test->status == KUNIT_FAILURE)
break;
} while (iov_iter_count(&iter) > 0);
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_SUCCEED();
}
/*
* Test the extraction of ITER_BVEC-type iterators.
*/
static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
{
const struct bvec_test_range *pr;
struct iov_iter iter;
struct page **bpages, *pagelist[8], **pages = pagelist;
struct bio_vec bvec[8];
ssize_t len;
size_t bufsize, size = 0, npages;
int i, from;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
iov_kunit_create_buffer(test, &bpages, npages);
iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
bpages, npages, bufsize, bvec_test_ranges);
size = iter.count;
pr = bvec_test_ranges;
from = pr->from;
do {
size_t offset0 = LONG_MAX;
for (i = 0; i < ARRAY_SIZE(pagelist); i++)
pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
ARRAY_SIZE(pagelist), 0, &offset0);
KUNIT_EXPECT_GE(test, len, 0);
if (len < 0)
break;
KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
KUNIT_EXPECT_LE(test, len, size);
KUNIT_EXPECT_EQ(test, iter.count, size - len);
size -= len;
if (len == 0)
break;
for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
struct page *p;
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
int ix;
KUNIT_ASSERT_GE(test, part, 0);
while (from == pr->to) {
pr++;
from = pr->from;
if (from < 0)
goto stop;
}
ix = pr->page + from / PAGE_SIZE;
KUNIT_ASSERT_LT(test, ix, npages);
p = bpages[ix];
KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
from += part;
len -= part;
KUNIT_ASSERT_GE(test, len, 0);
if (len == 0)
break;
offset0 = 0;
}
if (test->status == KUNIT_FAILURE)
break;
} while (iov_iter_count(&iter) > 0);
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_SUCCEED();
}
/*
* Test the extraction of ITER_XARRAY-type iterators.
*/
static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
{
const struct kvec_test_range *pr;
struct iov_iter iter;
struct xarray *xarray;
struct page **bpages, *pagelist[8], **pages = pagelist;
ssize_t len;
size_t bufsize, size = 0, npages;
int i, from;
bufsize = 0x100000;
npages = bufsize / PAGE_SIZE;
xarray = iov_kunit_create_xarray(test);
iov_kunit_create_buffer(test, &bpages, npages);
iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
from = pr->from;
size = pr->to - from;
KUNIT_ASSERT_LE(test, pr->to, bufsize);
iov_iter_xarray(&iter, WRITE, xarray, from, size);
do {
size_t offset0 = LONG_MAX;
for (i = 0; i < ARRAY_SIZE(pagelist); i++)
pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
ARRAY_SIZE(pagelist), 0, &offset0);
KUNIT_EXPECT_GE(test, len, 0);
if (len < 0)
break;
KUNIT_EXPECT_LE(test, len, size);
KUNIT_EXPECT_EQ(test, iter.count, size - len);
if (len == 0)
break;
size -= len;
KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
struct page *p;
ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
int ix;
KUNIT_ASSERT_GE(test, part, 0);
ix = from / PAGE_SIZE;
KUNIT_ASSERT_LT(test, ix, npages);
p = bpages[ix];
KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
from += part;
len -= part;
KUNIT_ASSERT_GE(test, len, 0);
if (len == 0)
break;
offset0 = 0;
}
if (test->status == KUNIT_FAILURE)
goto stop;
} while (iov_iter_count(&iter) > 0);
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
}
stop:
KUNIT_SUCCEED();
}
static struct kunit_case __refdata iov_kunit_cases[] = {
KUNIT_CASE(iov_kunit_copy_to_kvec),
KUNIT_CASE(iov_kunit_copy_from_kvec),
KUNIT_CASE(iov_kunit_copy_to_bvec),
KUNIT_CASE(iov_kunit_copy_from_bvec),
KUNIT_CASE(iov_kunit_copy_to_xarray),
KUNIT_CASE(iov_kunit_copy_from_xarray),
KUNIT_CASE(iov_kunit_extract_pages_kvec),
KUNIT_CASE(iov_kunit_extract_pages_bvec),
KUNIT_CASE(iov_kunit_extract_pages_xarray),
{}
};
static struct kunit_suite iov_kunit_suite = {
.name = "iov_iter",
.test_cases = iov_kunit_cases,
};
kunit_test_suites(&iov_kunit_suite);
| linux-master | lib/kunit_iov_iter.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/clz_ctz.c
*
* Copyright (C) 2013 Chanho Min <[email protected]>
*
* The functions in this file aren't called directly, but are required by
* GCC builtins such as __builtin_ctz, and therefore they can't be removed
* despite appearing unreferenced in kernel source.
*
* __c[lt]z[sd]i2 can be overridden by linking arch-specific versions.
*/
#include <linux/export.h>
#include <linux/kernel.h>
int __weak __ctzsi2(int val);
int __weak __ctzsi2(int val)
{
return __ffs(val);
}
EXPORT_SYMBOL(__ctzsi2);
int __weak __clzsi2(int val);
int __weak __clzsi2(int val)
{
return 32 - fls(val);
}
EXPORT_SYMBOL(__clzsi2);
int __weak __clzdi2(u64 val);
int __weak __clzdi2(u64 val)
{
return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
int __weak __ctzdi2(u64 val);
int __weak __ctzdi2(u64 val)
{
return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
| linux-master | lib/clz_ctz.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NETLINK Netlink attributes
*
* Authors: Thomas Graf <[email protected]>
* Alexey Kuznetsov <[email protected]>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/nospec.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/netlink.h>
/* For these data types, attribute length should be exactly the given
* size. However, to maintain compatibility with broken commands, if the
* attribute length does not match the expected size a warning is emitted
* to the user that the command is sending invalid data and needs to be fixed.
*/
static const u8 nla_attr_len[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_S8] = sizeof(s8),
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
};
static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_MSECS] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
[NLA_S8] = sizeof(s8),
[NLA_S16] = sizeof(s16),
[NLA_S32] = sizeof(s32),
[NLA_S64] = sizeof(s64),
};
/*
* Nested policies might refer back to the original
* policy in some cases, and userspace could try to
* abuse that and recurse by nesting in the right
* ways. Limit recursion to avoid this problem.
*/
#define MAX_POLICY_RECURSION_DEPTH 10
static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
struct nlattr **tb, unsigned int depth);
static int validate_nla_bitfield32(const struct nlattr *nla,
const u32 valid_flags_mask)
{
const struct nla_bitfield32 *bf = nla_data(nla);
if (!valid_flags_mask)
return -EINVAL;
/*disallow invalid bit selector */
if (bf->selector & ~valid_flags_mask)
return -EINVAL;
/*disallow invalid bit values */
if (bf->value & ~valid_flags_mask)
return -EINVAL;
/*disallow valid bit values that are not selected*/
if (bf->value & ~bf->selector)
return -EINVAL;
return 0;
}
static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack,
unsigned int validate, unsigned int depth)
{
const struct nlattr *entry;
int rem;
nla_for_each_attr(entry, head, len, rem) {
int ret;
if (nla_len(entry) == 0)
continue;
if (nla_len(entry) < NLA_HDRLEN) {
NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
"Array element too short");
return -ERANGE;
}
ret = __nla_validate_parse(nla_data(entry), nla_len(entry),
maxtype, policy, validate, extack,
NULL, depth + 1);
if (ret < 0)
return ret;
}
return 0;
}
void nla_get_range_unsigned(const struct nla_policy *pt,
struct netlink_range_validation *range)
{
WARN_ON_ONCE(pt->validation_type != NLA_VALIDATE_RANGE_PTR &&
(pt->min < 0 || pt->max < 0));
range->min = 0;
switch (pt->type) {
case NLA_U8:
range->max = U8_MAX;
break;
case NLA_U16:
case NLA_BE16:
case NLA_BINARY:
range->max = U16_MAX;
break;
case NLA_U32:
case NLA_BE32:
range->max = U32_MAX;
break;
case NLA_U64:
case NLA_MSECS:
range->max = U64_MAX;
break;
default:
WARN_ON_ONCE(1);
return;
}
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
range->min = pt->min;
range->max = pt->max;
break;
case NLA_VALIDATE_RANGE_PTR:
*range = *pt->range;
break;
case NLA_VALIDATE_MIN:
range->min = pt->min;
break;
case NLA_VALIDATE_MAX:
range->max = pt->max;
break;
default:
break;
}
}
static int nla_validate_range_unsigned(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack,
unsigned int validate)
{
struct netlink_range_validation range;
u64 value;
switch (pt->type) {
case NLA_U8:
value = nla_get_u8(nla);
break;
case NLA_U16:
value = nla_get_u16(nla);
break;
case NLA_U32:
value = nla_get_u32(nla);
break;
case NLA_U64:
value = nla_get_u64(nla);
break;
case NLA_MSECS:
value = nla_get_u64(nla);
break;
case NLA_BINARY:
value = nla_len(nla);
break;
case NLA_BE16:
value = ntohs(nla_get_be16(nla));
break;
case NLA_BE32:
value = ntohl(nla_get_be32(nla));
break;
default:
return -EINVAL;
}
nla_get_range_unsigned(pt, &range);
if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
pt->type == NLA_BINARY && value > range.max) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, pt->type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"invalid attribute length");
return -EINVAL;
}
/* this assumes min <= max (don't validate against min) */
return 0;
}
if (value < range.min || value > range.max) {
bool binary = pt->type == NLA_BINARY;
if (binary)
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"binary attribute size out of range");
else
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"integer out of range");
return -ERANGE;
}
return 0;
}
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range)
{
switch (pt->type) {
case NLA_S8:
range->min = S8_MIN;
range->max = S8_MAX;
break;
case NLA_S16:
range->min = S16_MIN;
range->max = S16_MAX;
break;
case NLA_S32:
range->min = S32_MIN;
range->max = S32_MAX;
break;
case NLA_S64:
range->min = S64_MIN;
range->max = S64_MAX;
break;
default:
WARN_ON_ONCE(1);
return;
}
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
range->min = pt->min;
range->max = pt->max;
break;
case NLA_VALIDATE_RANGE_PTR:
*range = *pt->range_signed;
break;
case NLA_VALIDATE_MIN:
range->min = pt->min;
break;
case NLA_VALIDATE_MAX:
range->max = pt->max;
break;
default:
break;
}
}
static int nla_validate_int_range_signed(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct netlink_range_validation_signed range;
s64 value;
switch (pt->type) {
case NLA_S8:
value = nla_get_s8(nla);
break;
case NLA_S16:
value = nla_get_s16(nla);
break;
case NLA_S32:
value = nla_get_s32(nla);
break;
case NLA_S64:
value = nla_get_s64(nla);
break;
default:
return -EINVAL;
}
nla_get_range_signed(pt, &range);
if (value < range.min || value > range.max) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"integer out of range");
return -ERANGE;
}
return 0;
}
static int nla_validate_int_range(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack,
unsigned int validate)
{
switch (pt->type) {
case NLA_U8:
case NLA_U16:
case NLA_U32:
case NLA_U64:
case NLA_MSECS:
case NLA_BINARY:
case NLA_BE16:
case NLA_BE32:
return nla_validate_range_unsigned(pt, nla, extack, validate);
case NLA_S8:
case NLA_S16:
case NLA_S32:
case NLA_S64:
return nla_validate_int_range_signed(pt, nla, extack);
default:
WARN_ON(1);
return -EINVAL;
}
}
static int nla_validate_mask(const struct nla_policy *pt,
const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
u64 value;
switch (pt->type) {
case NLA_U8:
value = nla_get_u8(nla);
break;
case NLA_U16:
value = nla_get_u16(nla);
break;
case NLA_U32:
value = nla_get_u32(nla);
break;
case NLA_U64:
value = nla_get_u64(nla);
break;
case NLA_BE16:
value = ntohs(nla_get_be16(nla));
break;
case NLA_BE32:
value = ntohl(nla_get_be32(nla));
break;
default:
return -EINVAL;
}
if (value & ~(u64)pt->mask) {
NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
return -EINVAL;
}
return 0;
}
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack, unsigned int depth)
{
u16 strict_start_type = policy[0].strict_start_type;
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
int err = -ERANGE;
if (strict_start_type && type >= strict_start_type)
validate |= NL_VALIDATE_STRICT;
if (type <= 0 || type > maxtype)
return 0;
type = array_index_nospec(type, maxtype + 1);
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"invalid attribute length");
return -EINVAL;
}
}
if (validate & NL_VALIDATE_NESTED) {
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"NLA_F_NESTED not expected");
return -EINVAL;
}
}
switch (pt->type) {
case NLA_REJECT:
if (extack && pt->reject_message) {
NL_SET_BAD_ATTR(extack, nla);
extack->_msg = pt->reject_message;
return -EINVAL;
}
err = -EINVAL;
goto out_err;
case NLA_FLAG:
if (attrlen > 0)
goto out_err;
break;
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
goto out_err;
err = validate_nla_bitfield32(nla, pt->bitfield32_valid);
if (err)
goto out_err;
break;
case NLA_NUL_STRING:
if (pt->len)
minlen = min_t(int, attrlen, pt->len + 1);
else
minlen = attrlen;
if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
err = -EINVAL;
goto out_err;
}
fallthrough;
case NLA_STRING:
if (attrlen < 1)
goto out_err;
if (pt->len) {
char *buf = nla_data(nla);
if (buf[attrlen - 1] == '\0')
attrlen--;
if (attrlen > pt->len)
goto out_err;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
goto out_err;
break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
if (pt->nested_policy) {
err = __nla_validate_parse(nla_data(nla), nla_len(nla),
pt->len, pt->nested_policy,
validate, extack, NULL,
depth + 1);
if (err < 0) {
/*
* return directly to preserve the inner
* error message/attribute pointer
*/
return err;
}
}
break;
case NLA_NESTED_ARRAY:
/* a nested array attribute is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
if (attrlen < NLA_HDRLEN)
goto out_err;
if (pt->nested_policy) {
int err;
err = nla_validate_array(nla_data(nla), nla_len(nla),
pt->len, pt->nested_policy,
extack, validate, depth);
if (err < 0) {
/*
* return directly to preserve the inner
* error message/attribute pointer
*/
return err;
}
}
break;
case NLA_UNSPEC:
if (validate & NL_VALIDATE_UNSPEC) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Unsupported attribute");
return -EINVAL;
}
if (attrlen < pt->len)
goto out_err;
break;
default:
if (pt->len)
minlen = pt->len;
else
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
goto out_err;
}
/* further validation */
switch (pt->validation_type) {
case NLA_VALIDATE_NONE:
/* nothing to do */
break;
case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
err = nla_validate_int_range(pt, nla, extack, validate);
if (err)
return err;
break;
case NLA_VALIDATE_MASK:
err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
case NLA_VALIDATE_FUNCTION:
if (pt->validate) {
err = pt->validate(nla, extack);
if (err)
return err;
}
break;
}
return 0;
out_err:
NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
"Attribute failed policy validation");
return err;
}
static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy,
unsigned int validate,
struct netlink_ext_ack *extack,
struct nlattr **tb, unsigned int depth)
{
const struct nlattr *nla;
int rem;
if (depth >= MAX_POLICY_RECURSION_DEPTH) {
NL_SET_ERR_MSG(extack,
"allowed policy recursion depth exceeded");
return -EINVAL;
}
if (tb)
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
if (type == 0 || type > maxtype) {
if (validate & NL_VALIDATE_MAXTYPE) {
NL_SET_ERR_MSG_ATTR(extack, nla,
"Unknown attribute type");
return -EINVAL;
}
continue;
}
type = array_index_nospec(type, maxtype + 1);
if (policy) {
int err = validate_nla(nla, maxtype, policy,
validate, extack, depth);
if (err < 0)
return err;
}
if (tb)
tb[type] = (struct nlattr *)nla;
}
if (unlikely(rem > 0)) {
pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
rem, current->comm);
NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
if (validate & NL_VALIDATE_TRAILING)
return -EINVAL;
}
return 0;
}
/**
* __nla_validate - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Validation depends on the validate flags passed, see
* &enum netlink_validation for more details on that.
* See documentation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
int __nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
extack, NULL, 0);
}
EXPORT_SYMBOL(__nla_validate);
/**
* nla_policy_len - Determine the max. length of a policy
* @p: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
* to allocated Netlink buffers roughly the size of the actual
* message.
*
* Returns 0 on success or a negative error code.
*/
int
nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_len[p->type])
len += nla_total_size(nla_attr_len[p->type]);
else if (nla_attr_minlen[p->type])
len += nla_total_size(nla_attr_minlen[p->type]);
}
return len;
}
EXPORT_SYMBOL(nla_policy_len);
/**
* __nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
* @validate: validation strictness
* @extack: extended ACK pointer
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type.
* Validation is controlled by the @validate parameter.
*
* Returns 0 on success or a negative error code.
*/
int __nla_parse(struct nlattr **tb, int maxtype,
const struct nlattr *head, int len,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
extack, tb, 0);
}
EXPORT_SYMBOL(__nla_parse);
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @attrtype: type of attribute to look for
*
* Returns the first attribute in the stream matching the specified type.
*/
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
{
const struct nlattr *nla;
int rem;
nla_for_each_attr(nla, head, len, rem)
if (nla_type(nla) == attrtype)
return (struct nlattr *)nla;
return NULL;
}
EXPORT_SYMBOL(nla_find);
/**
* nla_strscpy - Copy string attribute payload into a sized buffer
* @dst: Where to copy the string to.
* @nla: Attribute to copy the string from.
* @dstsize: Size of destination buffer.
*
* Copies at most dstsize - 1 bytes into the destination buffer.
* Unlike strlcpy the destination buffer is always padded out.
*
* Return:
* * srclen - Returns @nla length (not including the trailing %NUL).
* * -E2BIG - If @dstsize is 0 or greater than U16_MAX or @nla length greater
* than @dstsize.
*/
ssize_t nla_strscpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
ssize_t ret;
size_t len;
if (dstsize == 0 || WARN_ON_ONCE(dstsize > U16_MAX))
return -E2BIG;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
if (srclen >= dstsize) {
len = dstsize - 1;
ret = -E2BIG;
} else {
len = srclen;
ret = len;
}
memcpy(dst, src, len);
/* Zero pad end of dst. */
memset(dst + len, 0, dstsize - len);
return ret;
}
EXPORT_SYMBOL(nla_strscpy);
/**
* nla_strdup - Copy string attribute payload into a newly allocated buffer
* @nla: attribute to copy the string from
* @flags: the type of memory to allocate (see kmalloc).
*
* Returns a pointer to the allocated buffer or NULL on error.
*/
char *nla_strdup(const struct nlattr *nla, gfp_t flags)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla), *dst;
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
dst = kmalloc(srclen + 1, flags);
if (dst != NULL) {
memcpy(dst, src, srclen);
dst[srclen] = '\0';
}
return dst;
}
EXPORT_SYMBOL(nla_strdup);
/**
* nla_memcpy - Copy a netlink attribute into another memory area
* @dest: where to copy to memcpy
* @src: netlink attribute to copy from
* @count: size of the destination area
*
* Note: The number of bytes copied is limited by the length of
* attribute's payload. memcpy
*
* Returns the number of bytes copied.
*/
int nla_memcpy(void *dest, const struct nlattr *src, int count)
{
int minlen = min_t(int, count, nla_len(src));
memcpy(dest, nla_data(src), minlen);
if (count > minlen)
memset(dest + minlen, 0, count - minlen);
return minlen;
}
EXPORT_SYMBOL(nla_memcpy);
/**
* nla_memcmp - Compare an attribute with sized memory area
* @nla: netlink attribute
* @data: memory area
* @size: size of memory area
*/
int nla_memcmp(const struct nlattr *nla, const void *data,
size_t size)
{
int d = nla_len(nla) - size;
if (d == 0)
d = memcmp(nla_data(nla), data, size);
return d;
}
EXPORT_SYMBOL(nla_memcmp);
/**
* nla_strcmp - Compare a string attribute against a string
* @nla: netlink string attribute
* @str: another string
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
int len = strlen(str);
char *buf = nla_data(nla);
int attrlen = nla_len(nla);
int d;
while (attrlen > 0 && buf[attrlen - 1] == '\0')
attrlen--;
d = attrlen - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
EXPORT_SYMBOL(nla_strcmp);
#ifdef CONFIG_NET
/**
* __nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct nlattr *nla;
nla = skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
return nla;
}
EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @padattr: attribute type for the padding
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will have a 64-bit aligned nla_data() area.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(__nla_reserve_64bit);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the payload.
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
return skb_put_zero(skb, NLA_ALIGN(attrlen));
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
/**
* nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return NULL;
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_64bit - reserve room for attribute on the skb and align it
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @padattr: attribute type for the padding
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it. It also ensure that this
* attribute will have a 64-bit aligned nla_data() area.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen);
if (unlikely(skb_tailroom(skb) < len))
return NULL;
return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
}
EXPORT_SYMBOL(nla_reserve_64bit);
/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return NULL;
return __nla_reserve_nohdr(skb, attrlen);
}
EXPORT_SYMBOL(nla_reserve_nohdr);
/**
* __nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data)
{
struct nlattr *nla;
nla = __nla_reserve(skb, attrtype, attrlen);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
* @padattr: attribute type for the padding
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
struct nlattr *nla;
nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put_64bit);
/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute payload.
*/
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
void *start;
start = __nla_reserve_nohdr(skb, attrlen);
memcpy(start, data, attrlen);
}
EXPORT_SYMBOL(__nla_put_nohdr);
/**
* nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return -EMSGSIZE;
__nla_put(skb, attrtype, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put);
/**
* nla_put_64bit - Add a netlink attribute to a socket buffer and align it
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
* @padattr: attribute type for the padding
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
const void *data, int padattr)
{
size_t len;
if (nla_need_padding_for_64bit(skb))
len = nla_total_size_64bit(attrlen);
else
len = nla_total_size(attrlen);
if (unlikely(skb_tailroom(skb) < len))
return -EMSGSIZE;
__nla_put_64bit(skb, attrtype, attrlen, data, padattr);
return 0;
}
EXPORT_SYMBOL(nla_put_64bit);
/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
__nla_put_nohdr(skb, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put_nohdr);
/**
* nla_append - Add a netlink attribute without header or padding
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_append(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
skb_put_data(skb, data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
#endif
| linux-master | lib/nlattr.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
/*
* Merge two NULL-terminated pointer arrays into a newly allocated
* array, which is also NULL-terminated. Nomenclature is inspired by
* memset_p() and memcat() found elsewhere in the kernel source tree.
*/
void **__memcat_p(void **a, void **b)
{
void **p = a, **new;
int nr;
/* count the elements in both arrays */
for (nr = 0, p = a; *p; nr++, p++)
;
for (p = b; *p; nr++, p++)
;
/* one for the NULL-terminator */
nr++;
new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
if (!new)
return NULL;
/* nr -> last index; p points to NULL in b[] */
for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
new[nr] = *p;
return new;
}
EXPORT_SYMBOL_GPL(__memcat_p);
| linux-master | lib/memcat_p.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/percpu-refcount.h>
/*
* Initially, a percpu refcount is just a set of percpu counters. Initially, we
* don't try to detect the ref hitting 0 - which means that get/put can just
* increment or decrement the local counter. Note that the counter on a
* particular cpu can (and will) wrap - this is fine, when we go to shutdown the
* percpu counters will all sum to the correct value
*
* (More precisely: because modular arithmetic is commutative the sum of all the
* percpu_count vars will be equal to what it would have been if all the gets
* and puts were done to a single integer, even if some of the percpu integers
* overflow or underflow).
*
* The real trick to implementing percpu refcounts is shutdown. We can't detect
* the ref hitting 0 on every put - this would require global synchronization
* and defeat the whole purpose of using percpu refs.
*
* What we do is require the user to keep track of the initial refcount; we know
* the ref can't hit 0 before the user drops the initial ref, so as long as we
* convert to non percpu mode before the initial ref is dropped everything
* works.
*
* Converting to non percpu mode is done with some RCUish stuff in
* percpu_ref_kill. Additionally, we need a bias value so that the
* atomic_long_t can't hit 0 before we've added up all the percpu refs.
*/
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
static DEFINE_SPINLOCK(percpu_ref_switch_lock);
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned long __percpu *)
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
}
/**
* percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use
*
* Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless
* @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags
* change the start state to atomic with the latter setting the initial refcount
* to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
*
* Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill().
*/
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
unsigned int flags, gfp_t gfp)
{
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long));
unsigned long start_count = 0;
struct percpu_ref_data *data;
ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr)
return -ENOMEM;
data = kzalloc(sizeof(*ref->data), gfp);
if (!data) {
free_percpu((void __percpu *)ref->percpu_count_ptr);
ref->percpu_count_ptr = 0;
return -ENOMEM;
}
data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
data->allow_reinit = true;
} else {
start_count += PERCPU_COUNT_BIAS;
}
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
else
start_count++;
atomic_long_set(&data->count, start_count);
data->release = release;
data->confirm_switch = NULL;
data->ref = ref;
ref->data = data;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
static void __percpu_ref_exit(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
if (percpu_count) {
/* non-NULL confirm_switch indicates switching in progress */
WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
}
/**
* percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to exit
*
* This function exits @ref. The caller is responsible for ensuring that
* @ref is no longer in active use. The usual places to invoke this
* function from are the @ref->release() callback or in init failure path
* where percpu_ref_init() succeeded but other parts of the initialization
* of the embedding object failed.
*/
void percpu_ref_exit(struct percpu_ref *ref)
{
struct percpu_ref_data *data = ref->data;
unsigned long flags;
__percpu_ref_exit(ref);
if (!data)
return;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
__PERCPU_REF_FLAG_BITS;
ref->data = NULL;
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
kfree(data);
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
{
struct percpu_ref_data *data = container_of(rcu,
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
data->confirm_switch(ref);
data->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq);
if (!data->allow_reinit)
__percpu_ref_exit(ref);
/* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref);
}
static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
{
struct percpu_ref_data *data = container_of(rcu,
struct percpu_ref_data, rcu);
struct percpu_ref *ref = data->ref;
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
static atomic_t underflows;
unsigned long count = 0;
int cpu;
for_each_possible_cpu(cpu)
count += *per_cpu_ptr(percpu_count, cpu);
pr_debug("global %lu percpu %lu\n",
atomic_long_read(&data->count), count);
/*
* It's crucial that we sum the percpu counters _before_ adding the sum
* to &ref->count; since gets could be happening on one cpu while puts
* happen on another, adding a single cpu's count could cause
* @ref->count to hit 0 before we've got a consistent value - but the
* sum of all the counts will be consistent and correct.
*
* Subtracting the bias value then has to happen _after_ adding count to
* &ref->count; we need the bias value to prevent &ref->count from
* reaching 0 before we add the percpu counts. But doing it at the same
* time is equivalent and saves us atomic operations:
*/
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
"percpu ref (%ps) <= 0 (%ld) after switching to atomic",
data->release, atomic_long_read(&data->count)) &&
atomic_inc_return(&underflows) < 4) {
pr_err("%s(): percpu_ref underflow", __func__);
mem_dump_obj(data);
}
/* @ref is viewed as dead on all CPUs, send out switch confirmation */
percpu_ref_call_confirm_rcu(rcu);
}
static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
{
}
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
if (confirm_switch)
confirm_switch(ref);
return;
}
/* switching from percpu to atomic */
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
/*
* Non-NULL ->confirm_switch is used to indicate that switching is
* in progress. Use noop one if unspecified.
*/
ref->data->confirm_switch = confirm_switch ?:
percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
call_rcu_hurry(&ref->data->rcu,
percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu;
BUG_ON(!percpu_count);
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
if (WARN_ON_ONCE(!ref->data->allow_reinit))
return;
atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
/*
* Restore per-cpu operation. smp_store_release() is paired
* with READ_ONCE() in __ref_is_percpu() and guarantees that the
* zeroing is visible to all percpu accesses which can see the
* following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;
smp_store_release(&ref->percpu_count_ptr,
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
static void __percpu_ref_switch_mode(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
struct percpu_ref_data *data = ref->data;
lockdep_assert_held(&percpu_ref_switch_lock);
/*
* If the previous ATOMIC switching hasn't finished yet, wait for
* its completion. If the caller ensures that ATOMIC switching
* isn't in progress, this function can be called from any context.
*/
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
percpu_ref_switch_lock);
if (data->force_atomic || percpu_ref_is_dying(ref))
__percpu_ref_switch_to_atomic(ref, confirm_switch);
else
__percpu_ref_switch_to_percpu(ref);
}
/**
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
* @confirm_switch: optional confirmation callback
*
* There's no reason to use this function for the usual reference counting.
* Use percpu_ref_kill[_and_confirm]().
*
* Schedule switching of @ref to atomic mode. All its percpu counts will
* be collected to the main atomic counter. On completion, when all CPUs
* are guaraneed to be in atomic mode, @confirm_switch, which may not
* block, is invoked. This function may be invoked concurrently with all
* the get/put operations and can safely be mixed with kill and reinit
* operations. Note that @ref will stay in atomic mode across kill/reinit
* cycles until percpu_ref_switch_to_percpu() is called.
*
* This function may block if @ref is in the process of switching to atomic
* mode. If the caller ensures that @ref is not in the process of
* switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->data->force_atomic = true;
__percpu_ref_switch_mode(ref, confirm_switch);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
/**
* percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
*
* Schedule switching the ref to atomic mode, and wait for the
* switch to complete. Caller must ensure that no other thread
* will switch back to percpu mode.
*/
void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
{
percpu_ref_switch_to_atomic(ref, NULL);
wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
* @ref: percpu_ref to switch to percpu mode
*
* There's no reason to use this function for the usual reference counting.
* To re-use an expired ref, use percpu_ref_reinit().
*
* Switch @ref to percpu mode. This function may be invoked concurrently
* with all the get/put operations and can safely be mixed with kill and
* reinit operations. This function reverses the sticky atomic state set
* by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
* This function may block if @ref is in the process of switching to atomic
* mode. If the caller ensures that @ref is not in the process of
* switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
ref->data->force_atomic = false;
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
/**
* percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
* @ref: percpu_ref to kill
* @confirm_kill: optional confirmation callback
*
* Equivalent to percpu_ref_kill() but also schedules kill confirmation if
* @confirm_kill is not NULL. @confirm_kill, which may not block, will be
* called after @ref is seen as dead from all CPUs at which point all
* further invocations of percpu_ref_tryget_live() will fail. See
* percpu_ref_tryget_live() for details.
*
* This function normally doesn't block and can be called from any context
* but it may block if @confirm_kill is specified and @ref is in the
* process of switching to atomic mode by percpu_ref_switch_to_atomic().
*
* There are no implied RCU grace periods between kill and release.
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
WARN_ONCE(percpu_ref_is_dying(ref),
"%s called more than once on %ps!", __func__,
ref->data->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
__percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
/**
* percpu_ref_is_zero - test whether a percpu refcount reached zero
* @ref: percpu_ref to test
*
* Returns %true if @ref reached zero.
*
* This function is safe to call as long as @ref is between init and exit.
*/
bool percpu_ref_is_zero(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
unsigned long count, flags;
if (__ref_is_percpu(ref, &percpu_count))
return false;
/* protect us from being destroyed */
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
if (ref->data)
count = atomic_long_read(&ref->data->count);
else
count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
return count == 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
/**
* percpu_ref_reinit - re-initialize a percpu refcount
* @ref: perpcu_ref to re-initialize
*
* Re-initialize @ref so that it's in the same state as when it finished
* percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
* initialized successfully and reached 0 but not exited.
*
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
* this function is in progress.
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
percpu_ref_resurrect(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
/**
* percpu_ref_resurrect - modify a percpu refcount from dead to live
* @ref: perpcu_ref to resurrect
*
* Modify @ref so that it's in the same state as before percpu_ref_kill() was
* called. @ref must be dead but must not yet have exited.
*
* If @ref->release() frees @ref then the caller is responsible for
* guaranteeing that @ref->release() does not get called while this
* function is in progress.
*
* Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
* this function is in progress.
*/
void percpu_ref_resurrect(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
WARN_ON_ONCE(!percpu_ref_is_dying(ref));
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
__percpu_ref_switch_mode(ref, NULL);
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
| linux-master | lib/percpu-refcount.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Maple Tree implementation
* Copyright (c) 2018-2022 Oracle Corporation
* Authors: Liam R. Howlett <[email protected]>
* Matthew Wilcox <[email protected]>
*/
/*
* DOC: Interesting implementation details of the Maple Tree
*
* Each node type has a number of slots for entries and a number of slots for
* pivots. In the case of dense nodes, the pivots are implied by the position
* and are simply the slot index + the minimum of the node.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
* indicate that the tree is specifying ranges, Pivots may appear in the
* subtree with an entry attached to the value where as keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*
*
* The following illustrates the layout of a range64 nodes slots and pivots.
*
*
* Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
* ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
* │ │ │ │ │ │ │ │ └─ Implied maximum
* │ │ │ │ │ │ │ └─ Pivot 14
* │ │ │ │ │ │ └─ Pivot 13
* │ │ │ │ │ └─ Pivot 12
* │ │ │ │ └─ Pivot 11
* │ │ │ └─ Pivot 2
* │ │ └─ Pivot 1
* │ └─ Pivot 0
* └─ Implied minimum
*
* Slot contents:
* Internal (non-leaf) nodes contain pointers to other nodes.
* Leaf nodes contain entries.
*
* The location of interest is often referred to as an offset. All offsets have
* a slot, but the last offset has an implied pivot from the node above (or
* UINT_MAX for the root node.
*
* Ranges complicate certain write activities. When modifying any of
* the B-tree variants, it is known that one entry will either be added or
* deleted. When modifying the Maple Tree, one store operation may overwrite
* the entire data set, or one half of the tree, or the middle half of the tree.
*
*/
#include <linux/maple_tree.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/limits.h>
#include <asm/barrier.h>
#define CREATE_TRACE_POINTS
#include <trace/events/maple_tree.h>
#define MA_ROOT_PARENT 1
/*
* Maple state flags
* * MA_STATE_BULK - Bulk insert mode
* * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
* * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
*/
#define MA_STATE_BULK 1
#define MA_STATE_REBALANCE 2
#define MA_STATE_PREALLOC 4
#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
#define ma_mnode_ptr(x) ((struct maple_node *)(x))
#define ma_enode_ptr(x) ((struct maple_enode *)(x))
static struct kmem_cache *maple_node_cache;
#ifdef CONFIG_DEBUG_MAPLE_TREE
static const unsigned long mt_max[] = {
[maple_dense] = MAPLE_NODE_SLOTS,
[maple_leaf_64] = ULONG_MAX,
[maple_range_64] = ULONG_MAX,
[maple_arange_64] = ULONG_MAX,
};
#define mt_node_max(x) mt_max[mte_node_type(x)]
#endif
static const unsigned char mt_slots[] = {
[maple_dense] = MAPLE_NODE_SLOTS,
[maple_leaf_64] = MAPLE_RANGE64_SLOTS,
[maple_range_64] = MAPLE_RANGE64_SLOTS,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS,
};
#define mt_slot_count(x) mt_slots[mte_node_type(x)]
static const unsigned char mt_pivots[] = {
[maple_dense] = 0,
[maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
[maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
};
#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
static const unsigned char mt_min_slots[] = {
[maple_dense] = MAPLE_NODE_SLOTS / 2,
[maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
[maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
};
#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
struct maple_big_node {
struct maple_pnode *parent;
unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
union {
struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
struct {
unsigned long padding[MAPLE_BIG_NODE_GAPS];
unsigned long gap[MAPLE_BIG_NODE_GAPS];
};
};
unsigned char b_end;
enum maple_type type;
};
/*
* The maple_subtree_state is used to build a tree to replace a segment of an
* existing tree in a more atomic way. Any walkers of the older tree will hit a
* dead node and restart on updates.
*/
struct maple_subtree_state {
struct ma_state *orig_l; /* Original left side of subtree */
struct ma_state *orig_r; /* Original right side of subtree */
struct ma_state *l; /* New left side of subtree */
struct ma_state *m; /* New middle of subtree (rare) */
struct ma_state *r; /* New right side of subtree */
struct ma_topiary *free; /* nodes to be freed */
struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
struct maple_big_node *bn;
};
#ifdef CONFIG_KASAN_STACK
/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
#define noinline_for_kasan noinline_for_stack
#else
#define noinline_for_kasan inline
#endif
/* Functions */
static inline struct maple_node *mt_alloc_one(gfp_t gfp)
{
return kmem_cache_alloc(maple_node_cache, gfp);
}
static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
{
return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
}
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
}
static void mt_free_rcu(struct rcu_head *head)
{
struct maple_node *node = container_of(head, struct maple_node, rcu);
kmem_cache_free(maple_node_cache, node);
}
/*
* ma_free_rcu() - Use rcu callback to free a maple node
* @node: The node to free
*
* The maple tree uses the parent pointer to indicate this node is no longer in
* use and will be freed.
*/
static void ma_free_rcu(struct maple_node *node)
{
WARN_ON(node->parent != ma_parent_ptr(node));
call_rcu(&node->rcu, mt_free_rcu);
}
static void mas_set_height(struct ma_state *mas)
{
unsigned int new_flags = mas->tree->ma_flags;
new_flags &= ~MT_FLAGS_HEIGHT_MASK;
MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
mas->tree->ma_flags = new_flags;
}
static unsigned int mas_mt_height(struct ma_state *mas)
{
return mt_height(mas->tree);
}
static inline enum maple_type mte_node_type(const struct maple_enode *entry)
{
return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
MAPLE_NODE_TYPE_MASK;
}
static inline bool ma_is_dense(const enum maple_type type)
{
return type < maple_leaf_64;
}
static inline bool ma_is_leaf(const enum maple_type type)
{
return type < maple_range_64;
}
static inline bool mte_is_leaf(const struct maple_enode *entry)
{
return ma_is_leaf(mte_node_type(entry));
}
/*
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
static inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
static inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
}
static inline bool mas_is_ptr(const struct ma_state *mas)
{
return mas->node == MAS_ROOT;
}
static inline bool mas_is_start(const struct ma_state *mas)
{
return mas->node == MAS_START;
}
bool mas_is_err(struct ma_state *mas)
{
return xa_is_err(mas->node);
}
static inline bool mas_searchable(struct ma_state *mas)
{
if (mas_is_none(mas))
return false;
if (mas_is_ptr(mas))
return false;
return true;
}
static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
{
return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
}
/*
* mte_to_mat() - Convert a maple encoded node to a maple topiary node.
* @entry: The maple encoded node
*
* Return: a maple topiary pointer
*/
static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
{
return (struct maple_topiary *)
((unsigned long)entry & ~MAPLE_NODE_MASK);
}
/*
* mas_mn() - Get the maple state node.
* @mas: The maple state
*
* Return: the maple node (not encoded - bare pointer).
*/
static inline struct maple_node *mas_mn(const struct ma_state *mas)
{
return mte_to_node(mas->node);
}
/*
* mte_set_node_dead() - Set a maple encoded node as dead.
* @mn: The maple encoded node.
*/
static inline void mte_set_node_dead(struct maple_enode *mn)
{
mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
smp_wmb(); /* Needed for RCU */
}
/* Bit 1 indicates the root is a node */
#define MAPLE_ROOT_NODE 0x02
/* maple_type stored bit 3-6 */
#define MAPLE_ENODE_TYPE_SHIFT 0x03
/* Bit 2 means a NULL somewhere below */
#define MAPLE_ENODE_NULL 0x04
static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
enum maple_type type)
{
return (void *)((unsigned long)node |
(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
}
static inline void *mte_mk_root(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
}
static inline void *mte_safe_root(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
static inline void *mte_set_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
static inline void *mte_clear_full(const struct maple_enode *node)
{
return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
}
static inline bool mte_has_null(const struct maple_enode *node)
{
return (unsigned long)node & MAPLE_ENODE_NULL;
}
static inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
static inline bool mte_is_root(const struct maple_enode *node)
{
return ma_is_root(mte_to_node(node));
}
static inline bool mas_is_root_limits(const struct ma_state *mas)
{
return !mas->min && mas->max == ULONG_MAX;
}
static inline bool mt_is_alloc(struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
}
/*
* The Parent Pointer
* Excluding root, the parent pointer is 256B aligned like all other tree nodes.
* When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
* bit values need an extra bit to store the offset. This extra bit comes from
* a reuse of the last bit in the node type. This is possible by using bit 1 to
* indicate if bit 2 is part of the type or the slot.
*
* Note types:
* 0x??1 = Root
* 0x?00 = 16 bit nodes
* 0x010 = 32 bit nodes
* 0x110 = 64 bit nodes
*
* Slot size and alignment
* 0b??1 : Root
* 0b?00 : 16 bit values, type in 0-1, slot in 2-7
* 0b010 : 32 bit values, type in 0-2, slot in 3-7
* 0b110 : 64 bit values, type in 0-2, slot in 3-7
*/
#define MAPLE_PARENT_ROOT 0x01
#define MAPLE_PARENT_SLOT_SHIFT 0x03
#define MAPLE_PARENT_SLOT_MASK 0xF8
#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
#define MAPLE_PARENT_RANGE64 0x06
#define MAPLE_PARENT_RANGE32 0x04
#define MAPLE_PARENT_NOT_RANGE16 0x02
/*
* mte_parent_shift() - Get the parent shift for the slot storage.
* @parent: The parent pointer cast as an unsigned long
* Return: The shift into that pointer to the star to of the slot
*/
static inline unsigned long mte_parent_shift(unsigned long parent)
{
/* Note bit 1 == 0 means 16B */
if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
return MAPLE_PARENT_SLOT_SHIFT;
return MAPLE_PARENT_16B_SLOT_SHIFT;
}
/*
* mte_parent_slot_mask() - Get the slot mask for the parent.
* @parent: The parent pointer cast as an unsigned long.
* Return: The slot mask for that parent.
*/
static inline unsigned long mte_parent_slot_mask(unsigned long parent)
{
/* Note bit 1 == 0 means 16B */
if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
return MAPLE_PARENT_SLOT_MASK;
return MAPLE_PARENT_16B_SLOT_MASK;
}
/*
* mas_parent_type() - Return the maple_type of the parent from the stored
* parent type.
* @mas: The maple state
* @enode: The maple_enode to extract the parent's enum
* Return: The node->parent maple_type
*/
static inline
enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
{
unsigned long p_type;
p_type = (unsigned long)mte_to_node(enode)->parent;
if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
return 0;
p_type &= MAPLE_NODE_MASK;
p_type &= ~mte_parent_slot_mask(p_type);
switch (p_type) {
case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
if (mt_is_alloc(mas->tree))
return maple_arange_64;
return maple_range_64;
}
return 0;
}
/*
* mas_set_parent() - Set the parent node and encode the slot
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
*
* Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
* parent type.
*/
static inline
void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
const struct maple_enode *parent, unsigned char slot)
{
unsigned long val = (unsigned long)parent;
unsigned long shift;
unsigned long type;
enum maple_type p_type = mte_node_type(parent);
MAS_BUG_ON(mas, p_type == maple_dense);
MAS_BUG_ON(mas, p_type == maple_leaf_64);
switch (p_type) {
case maple_range_64:
case maple_arange_64:
shift = MAPLE_PARENT_SLOT_SHIFT;
type = MAPLE_PARENT_RANGE64;
break;
default:
case maple_dense:
case maple_leaf_64:
shift = type = 0;
break;
}
val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
val |= (slot << shift) | type;
mte_to_node(enode)->parent = ma_parent_ptr(val);
}
/*
* mte_parent_slot() - get the parent slot of @enode.
* @enode: The encoded maple node.
*
* Return: The slot in the parent node where @enode resides.
*/
static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
{
unsigned long val = (unsigned long)mte_to_node(enode)->parent;
if (val & MA_ROOT_PARENT)
return 0;
/*
* Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
* by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
*/
return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
}
/*
* mte_parent() - Get the parent of @node.
* @node: The encoded maple node.
*
* Return: The parent maple node.
*/
static inline struct maple_node *mte_parent(const struct maple_enode *enode)
{
return (void *)((unsigned long)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
}
/*
* ma_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
*
* Return: true if dead, false otherwise.
*/
static inline bool ma_dead_node(const struct maple_node *node)
{
struct maple_node *parent;
/* Do not reorder reads from the node prior to the parent check */
smp_rmb();
parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
return (parent == node);
}
/*
* mte_dead_node() - check if the @enode is dead.
* @enode: The encoded maple node
*
* Return: true if dead, false otherwise.
*/
static inline bool mte_dead_node(const struct maple_enode *enode)
{
struct maple_node *parent, *node;
node = mte_to_node(enode);
/* Do not reorder reads from the node prior to the parent check */
smp_rmb();
parent = mte_parent(enode);
return (parent == node);
}
/*
* mas_allocated() - Get the number of nodes allocated in a maple state.
* @mas: The maple state
*
* The ma_state alloc member is overloaded to hold a pointer to the first
* allocated node or to the number of requested nodes to allocate. If bit 0 is
* set, then the alloc contains the number of requested nodes. If there is an
* allocated node, then the total allocated nodes is in that node.
*
* Return: The total number of nodes allocated
*/
static inline unsigned long mas_allocated(const struct ma_state *mas)
{
if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
return 0;
return mas->alloc->total;
}
/*
* mas_set_alloc_req() - Set the requested number of allocations.
* @mas: the maple state
* @count: the number of allocations.
*
* The requested number of allocations is either in the first allocated node,
* located in @mas->alloc->request_count, or directly in @mas->alloc if there is
* no allocated node. Set the request either in the node or do the necessary
* encoding to store in @mas->alloc directly.
*/
static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
{
if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
if (!count)
mas->alloc = NULL;
else
mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
return;
}
mas->alloc->request_count = count;
}
/*
* mas_alloc_req() - get the requested number of allocations.
* @mas: The maple state
*
* The alloc count is either stored directly in @mas, or in
* @mas->alloc->request_count if there is at least one node allocated. Decode
* the request count if it's stored directly in @mas->alloc.
*
* Return: The allocation request count.
*/
static inline unsigned int mas_alloc_req(const struct ma_state *mas)
{
if ((unsigned long)mas->alloc & 0x1)
return (unsigned long)(mas->alloc) >> 1;
else if (mas->alloc)
return mas->alloc->request_count;
return 0;
}
/*
* ma_pivots() - Get a pointer to the maple node pivots.
* @node - the maple node
* @type - the node type
*
* In the event of a dead node, this array may be %NULL
*
* Return: A pointer to the maple node pivots
*/
static inline unsigned long *ma_pivots(struct maple_node *node,
enum maple_type type)
{
switch (type) {
case maple_arange_64:
return node->ma64.pivot;
case maple_range_64:
case maple_leaf_64:
return node->mr64.pivot;
case maple_dense:
return NULL;
}
return NULL;
}
/*
* ma_gaps() - Get a pointer to the maple node gaps.
* @node - the maple node
* @type - the node type
*
* Return: A pointer to the maple node gaps
*/
static inline unsigned long *ma_gaps(struct maple_node *node,
enum maple_type type)
{
switch (type) {
case maple_arange_64:
return node->ma64.gap;
case maple_range_64:
case maple_leaf_64:
case maple_dense:
return NULL;
}
return NULL;
}
/*
* mas_pivot() - Get the pivot at @piv of the maple encoded node.
* @mas: The maple state.
* @piv: The pivot.
*
* Return: the pivot at @piv of @mn.
*/
static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
{
struct maple_node *node = mas_mn(mas);
enum maple_type type = mte_node_type(mas->node);
if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
mas_set_err(mas, -EIO);
return 0;
}
switch (type) {
case maple_arange_64:
return node->ma64.pivot[piv];
case maple_range_64:
case maple_leaf_64:
return node->mr64.pivot[piv];
case maple_dense:
return 0;
}
return 0;
}
/*
* mas_safe_pivot() - get the pivot at @piv or mas->max.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
* @piv: The pivot to fetch
* @type: The maple node type
*
* Return: The pivot at @piv within the limit of the @pivots array, @mas->max
* otherwise.
*/
static inline unsigned long
mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
{
if (piv >= mt_pivots[type])
return mas->max;
return pivots[piv];
}
/*
* mas_safe_min() - Return the minimum for a given offset.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
* @offset: The offset into the pivot array
*
* Return: The minimum range value that is contained in @offset.
*/
static inline unsigned long
mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
{
if (likely(offset))
return pivots[offset - 1] + 1;
return mas->min;
}
/*
* mte_set_pivot() - Set a pivot to a value in an encoded maple node.
* @mn: The encoded maple node
* @piv: The pivot offset
* @val: The value of the pivot
*/
static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
unsigned long val)
{
struct maple_node *node = mte_to_node(mn);
enum maple_type type = mte_node_type(mn);
BUG_ON(piv >= mt_pivots[type]);
switch (type) {
default:
case maple_range_64:
case maple_leaf_64:
node->mr64.pivot[piv] = val;
break;
case maple_arange_64:
node->ma64.pivot[piv] = val;
break;
case maple_dense:
break;
}
}
/*
* ma_slots() - Get a pointer to the maple node slots.
* @mn: The maple node
* @mt: The maple node type
*
* Return: A pointer to the maple node slots
*/
static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{
switch (mt) {
default:
case maple_arange_64:
return mn->ma64.slot;
case maple_range_64:
case maple_leaf_64:
return mn->mr64.slot;
case maple_dense:
return mn->slot;
}
}
static inline bool mt_write_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
static inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
static inline void *mt_slot(const struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
unsigned char offset)
{
return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
/*
* mas_slot_locked() - Get the slot value when holding the maple tree lock.
* @mas: The maple state
* @slots: The pointer to the slots
* @offset: The offset into the slots array to fetch
*
* Return: The entry stored in @slots at the @offset.
*/
static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
unsigned char offset)
{
return mt_slot_locked(mas->tree, slots, offset);
}
/*
* mas_slot() - Get the slot value when not holding the maple tree lock.
* @mas: The maple state
* @slots: The pointer to the slots
* @offset: The offset into the slots array to fetch
*
* Return: The entry stored in @slots at the @offset
*/
static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
unsigned char offset)
{
return mt_slot(mas->tree, slots, offset);
}
/*
* mas_root() - Get the maple tree root.
* @mas: The maple state.
*
* Return: The pointer to the root of the tree
*/
static inline void *mas_root(struct ma_state *mas)
{
return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
static inline void *mt_root_locked(struct maple_tree *mt)
{
return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
}
/*
* mas_root_locked() - Get the maple tree root when holding the maple tree lock.
* @mas: The maple state.
*
* Return: The pointer to the root of the tree
*/
static inline void *mas_root_locked(struct ma_state *mas)
{
return mt_root_locked(mas->tree);
}
static inline struct maple_metadata *ma_meta(struct maple_node *mn,
enum maple_type mt)
{
switch (mt) {
case maple_arange_64:
return &mn->ma64.meta;
default:
return &mn->mr64.meta;
}
}
/*
* ma_set_meta() - Set the metadata information of a node.
* @mn: The maple node
* @mt: The maple node type
* @offset: The offset of the highest sub-gap in this node.
* @end: The end of the data in this node.
*/
static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
unsigned char offset, unsigned char end)
{
struct maple_metadata *meta = ma_meta(mn, mt);
meta->gap = offset;
meta->end = end;
}
/*
* mt_clear_meta() - clear the metadata information of a node, if it exists
* @mt: The maple tree
* @mn: The maple node
* @type: The maple node type
* @offset: The offset of the highest sub-gap in this node.
* @end: The end of the data in this node.
*/
static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
enum maple_type type)
{
struct maple_metadata *meta;
unsigned long *pivots;
void __rcu **slots;
void *next;
switch (type) {
case maple_range_64:
pivots = mn->mr64.pivot;
if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
slots = mn->mr64.slot;
next = mt_slot_locked(mt, slots,
MAPLE_RANGE64_SLOTS - 1);
if (unlikely((mte_to_node(next) &&
mte_node_type(next))))
return; /* no metadata, could be node */
}
fallthrough;
case maple_arange_64:
meta = ma_meta(mn, type);
break;
default:
return;
}
meta->gap = 0;
meta->end = 0;
}
/*
* ma_meta_end() - Get the data end of a node from the metadata
* @mn: The maple node
* @mt: The maple node type
*/
static inline unsigned char ma_meta_end(struct maple_node *mn,
enum maple_type mt)
{
struct maple_metadata *meta = ma_meta(mn, mt);
return meta->end;
}
/*
* ma_meta_gap() - Get the largest gap location of a node from the metadata
* @mn: The maple node
* @mt: The maple node type
*/
static inline unsigned char ma_meta_gap(struct maple_node *mn,
enum maple_type mt)
{
return mn->ma64.meta.gap;
}
/*
* ma_set_meta_gap() - Set the largest gap location in a nodes metadata
* @mn: The maple node
* @mn: The maple node type
* @offset: The location of the largest gap.
*/
static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
unsigned char offset)
{
struct maple_metadata *meta = ma_meta(mn, mt);
meta->gap = offset;
}
/*
* mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
* @mat - the ma_topiary, a linked list of dead nodes.
* @dead_enode - the node to be marked as dead and added to the tail of the list
*
* Add the @dead_enode to the linked list in @mat.
*/
static inline void mat_add(struct ma_topiary *mat,
struct maple_enode *dead_enode)
{
mte_set_node_dead(dead_enode);
mte_to_mat(dead_enode)->next = NULL;
if (!mat->tail) {
mat->tail = mat->head = dead_enode;
return;
}
mte_to_mat(mat->tail)->next = dead_enode;
mat->tail = dead_enode;
}
static void mt_free_walk(struct rcu_head *head);
static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free);
/*
* mas_mat_destroy() - Free all nodes and subtrees in a dead list.
* @mas - the maple state
* @mat - the ma_topiary linked list of dead nodes to free.
*
* Destroy walk a dead list.
*/
static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
{
struct maple_enode *next;
struct maple_node *node;
bool in_rcu = mt_in_rcu(mas->tree);
while (mat->head) {
next = mte_to_mat(mat->head)->next;
node = mte_to_node(mat->head);
mt_destroy_walk(mat->head, mas->tree, !in_rcu);
if (in_rcu)
call_rcu(&node->rcu, mt_free_walk);
mat->head = next;
}
}
/*
* mas_descend() - Descend into the slot stored in the ma_state.
* @mas - the maple state.
*
* Note: Not RCU safe, only use in write side or debug code.
*/
static inline void mas_descend(struct ma_state *mas)
{
enum maple_type type;
unsigned long *pivots;
struct maple_node *node;
void __rcu **slots;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
if (mas->offset)
mas->min = pivots[mas->offset - 1] + 1;
mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
mas->node = mas_slot(mas, slots, mas->offset);
}
/*
* mte_set_gap() - Set a maple node gap.
* @mn: The encoded maple node
* @gap: The offset of the gap to set
* @val: The gap value
*/
static inline void mte_set_gap(const struct maple_enode *mn,
unsigned char gap, unsigned long val)
{
switch (mte_node_type(mn)) {
default:
break;
case maple_arange_64:
mte_to_node(mn)->ma64.gap[gap] = val;
break;
}
}
/*
* mas_ascend() - Walk up a level of the tree.
* @mas: The maple state
*
* Sets the @mas->max and @mas->min to the correct values when walking up. This
* may cause several levels of walking up to find the correct min and max.
* May find a dead node which will cause a premature return.
* Return: 1 on dead node, 0 otherwise
*/
static int mas_ascend(struct ma_state *mas)
{
struct maple_enode *p_enode; /* parent enode. */
struct maple_enode *a_enode; /* ancestor enode. */
struct maple_node *a_node; /* ancestor node. */
struct maple_node *p_node; /* parent node. */
unsigned char a_slot;
enum maple_type a_type;
unsigned long min, max;
unsigned long *pivots;
bool set_max = false, set_min = false;
a_node = mas_mn(mas);
if (ma_is_root(a_node)) {
mas->offset = 0;
return 0;
}
p_node = mte_parent(mas->node);
if (unlikely(a_node == p_node))
return 1;
a_type = mas_parent_type(mas, mas->node);
mas->offset = mte_parent_slot(mas->node);
a_enode = mt_mk_node(p_node, a_type);
/* Check to make sure all parent information is still accurate */
if (p_node != mte_parent(mas->node))
return 1;
mas->node = a_enode;
if (mte_is_root(a_enode)) {
mas->max = ULONG_MAX;
mas->min = 0;
return 0;
}
if (!mas->min)
set_min = true;
if (mas->max == ULONG_MAX)
set_max = true;
min = 0;
max = ULONG_MAX;
do {
p_enode = a_enode;
a_type = mas_parent_type(mas, p_enode);
a_node = mte_parent(p_enode);
a_slot = mte_parent_slot(p_enode);
a_enode = mt_mk_node(a_node, a_type);
pivots = ma_pivots(a_node, a_type);
if (unlikely(ma_dead_node(a_node)))
return 1;
if (!set_min && a_slot) {
set_min = true;
min = pivots[a_slot - 1] + 1;
}
if (!set_max && a_slot < mt_pivots[a_type]) {
set_max = true;
max = pivots[a_slot];
}
if (unlikely(ma_dead_node(a_node)))
return 1;
if (unlikely(ma_is_root(a_node)))
break;
} while (!set_min || !set_max);
mas->max = max;
mas->min = min;
return 0;
}
/*
* mas_pop_node() - Get a previously allocated maple node from the maple state.
* @mas: The maple state
*
* Return: A pointer to a maple node.
*/
static inline struct maple_node *mas_pop_node(struct ma_state *mas)
{
struct maple_alloc *ret, *node = mas->alloc;
unsigned long total = mas_allocated(mas);
unsigned int req = mas_alloc_req(mas);
/* nothing or a request pending. */
if (WARN_ON(!total))
return NULL;
if (total == 1) {
/* single allocation in this ma_state */
mas->alloc = NULL;
ret = node;
goto single_node;
}
if (node->node_count == 1) {
/* Single allocation in this node. */
mas->alloc = node->slot[0];
mas->alloc->total = node->total - 1;
ret = node;
goto new_head;
}
node->total--;
ret = node->slot[--node->node_count];
node->slot[node->node_count] = NULL;
single_node:
new_head:
if (req) {
req++;
mas_set_alloc_req(mas, req);
}
memset(ret, 0, sizeof(*ret));
return (struct maple_node *)ret;
}
/*
* mas_push_node() - Push a node back on the maple state allocation.
* @mas: The maple state
* @used: The used maple node
*
* Stores the maple node back into @mas->alloc for reuse. Updates allocated and
* requested node count as necessary.
*/
static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
{
struct maple_alloc *reuse = (struct maple_alloc *)used;
struct maple_alloc *head = mas->alloc;
unsigned long count;
unsigned int requested = mas_alloc_req(mas);
count = mas_allocated(mas);
reuse->request_count = 0;
reuse->node_count = 0;
if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
head->slot[head->node_count++] = reuse;
head->total++;
goto done;
}
reuse->total = 1;
if ((head) && !((unsigned long)head & 0x1)) {
reuse->slot[0] = head;
reuse->node_count = 1;
reuse->total += head->total;
}
mas->alloc = reuse;
done:
if (requested > 1)
mas_set_alloc_req(mas, requested - 1);
}
/*
* mas_alloc_nodes() - Allocate nodes into a maple state
* @mas: The maple state
* @gfp: The GFP Flags
*/
static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
{
struct maple_alloc *node;
unsigned long allocated = mas_allocated(mas);
unsigned int requested = mas_alloc_req(mas);
unsigned int count;
void **slots = NULL;
unsigned int max_req = 0;
if (!requested)
return;
mas_set_alloc_req(mas, 0);
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
WARN_ON(!allocated);
}
if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
node = (struct maple_alloc *)mt_alloc_one(gfp);
if (!node)
goto nomem_one;
if (allocated) {
node->slot[0] = mas->alloc;
node->node_count = 1;
} else {
node->node_count = 0;
}
mas->alloc = node;
node->total = ++allocated;
requested--;
}
node = mas->alloc;
node->request_count = 0;
while (requested) {
max_req = MAPLE_ALLOC_SLOTS - node->node_count;
slots = (void **)&node->slot[node->node_count];
max_req = min(requested, max_req);
count = mt_alloc_bulk(gfp, max_req, slots);
if (!count)
goto nomem_bulk;
if (node->node_count == 0) {
node->slot[0]->node_count = 0;
node->slot[0]->request_count = 0;
}
node->node_count += count;
allocated += count;
node = node->slot[0];
requested -= count;
}
mas->alloc->total = allocated;
return;
nomem_bulk:
/* Clean up potential freed allocations on bulk failure */
memset(slots, 0, max_req * sizeof(unsigned long));
nomem_one:
mas_set_alloc_req(mas, requested);
if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
mas->alloc->total = allocated;
mas_set_err(mas, -ENOMEM);
}
/*
* mas_free() - Free an encoded maple node
* @mas: The maple state
* @used: The encoded maple node to free.
*
* Uses rcu free if necessary, pushes @used back on the maple state allocations
* otherwise.
*/
static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
{
struct maple_node *tmp = mte_to_node(used);
if (mt_in_rcu(mas->tree))
ma_free_rcu(tmp);
else
mas_push_node(mas, tmp);
}
/*
* mas_node_count() - Check if enough nodes are allocated and request more if
* there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
* @gfp: the gfp flags
*/
static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
{
unsigned long allocated = mas_allocated(mas);
if (allocated < count) {
mas_set_alloc_req(mas, count - allocated);
mas_alloc_nodes(mas, gfp);
}
}
/*
* mas_node_count() - Check if enough nodes are allocated and request more if
* there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
*
* Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
*/
static void mas_node_count(struct ma_state *mas, int count)
{
return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
}
/*
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
* If mas->node == MAS_START, then set the min, max and depth to
* defaults.
*
* Return:
* - If mas->node is an error or not MAS_START, return NULL.
* - If it's an empty tree: NULL & mas->node == MAS_NONE
* - If it's a single entry: The entry & mas->node == MAS_ROOT
* - If it's a tree: NULL & mas->node == safe root node.
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
if (likely(mas_is_start(mas))) {
struct maple_enode *root;
mas->min = 0;
mas->max = ULONG_MAX;
retry:
mas->depth = 0;
root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 1;
mas->node = mte_safe_root(root);
mas->offset = 0;
if (mte_dead_node(mas->node))
goto retry;
return NULL;
}
/* empty tree */
if (unlikely(!root)) {
mas->node = MAS_NONE;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
}
/* Single entry tree */
mas->node = MAS_ROOT;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */
if (mas->index > 0)
return NULL;
return root;
}
return NULL;
}
/*
* ma_data_end() - Find the end of the data in a node.
* @node: The maple node
* @type: The maple node type
* @pivots: The array of pivots in the node
* @max: The maximum value in the node
*
* Uses metadata to find the end of the data when possible.
* Return: The zero indexed last slot with data (may be null).
*/
static inline unsigned char ma_data_end(struct maple_node *node,
enum maple_type type,
unsigned long *pivots,
unsigned long max)
{
unsigned char offset;
if (!pivots)
return 0;
if (type == maple_arange_64)
return ma_meta_end(node, type);
offset = mt_pivots[type] - 1;
if (likely(!pivots[offset]))
return ma_meta_end(node, type);
if (likely(pivots[offset] == max))
return offset;
return mt_pivots[type];
}
/*
* mas_data_end() - Find the end of the data (slot).
* @mas: the maple state
*
* This method is optimized to check the metadata of a node if the node type
* supports data end metadata.
*
* Return: The zero indexed last slot with data (may be null).
*/
static inline unsigned char mas_data_end(struct ma_state *mas)
{
enum maple_type type;
struct maple_node *node;
unsigned char offset;
unsigned long *pivots;
type = mte_node_type(mas->node);
node = mas_mn(mas);
if (type == maple_arange_64)
return ma_meta_end(node, type);
pivots = ma_pivots(node, type);
if (unlikely(ma_dead_node(node)))
return 0;
offset = mt_pivots[type] - 1;
if (likely(!pivots[offset]))
return ma_meta_end(node, type);
if (likely(pivots[offset] == mas->max))
return offset;
return mt_pivots[type];
}
/*
* mas_leaf_max_gap() - Returns the largest gap in a leaf node
* @mas - the maple state
*
* Return: The maximum gap in the leaf.
*/
static unsigned long mas_leaf_max_gap(struct ma_state *mas)
{
enum maple_type mt;
unsigned long pstart, gap, max_gap;
struct maple_node *mn;
unsigned long *pivots;
void __rcu **slots;
unsigned char i;
unsigned char max_piv;
mt = mte_node_type(mas->node);
mn = mas_mn(mas);
slots = ma_slots(mn, mt);
max_gap = 0;
if (unlikely(ma_is_dense(mt))) {
gap = 0;
for (i = 0; i < mt_slots[mt]; i++) {
if (slots[i]) {
if (gap > max_gap)
max_gap = gap;
gap = 0;
} else {
gap++;
}
}
if (gap > max_gap)
max_gap = gap;
return max_gap;
}
/*
* Check the first implied pivot optimizes the loop below and slot 1 may
* be skipped if there is a gap in slot 0.
*/
pivots = ma_pivots(mn, mt);
if (likely(!slots[0])) {
max_gap = pivots[0] - mas->min + 1;
i = 2;
} else {
i = 1;
}
/* reduce max_piv as the special case is checked before the loop */
max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
/*
* Check end implied pivot which can only be a gap on the right most
* node.
*/
if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
gap = ULONG_MAX - pivots[max_piv];
if (gap > max_gap)
max_gap = gap;
}
for (; i <= max_piv; i++) {
/* data == no gap. */
if (likely(slots[i]))
continue;
pstart = pivots[i - 1];
gap = pivots[i] - pstart;
if (gap > max_gap)
max_gap = gap;
/* There cannot be two gaps in a row. */
i++;
}
return max_gap;
}
/*
* ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
* @node: The maple node
* @gaps: The pointer to the gaps
* @mt: The maple node type
* @*off: Pointer to store the offset location of the gap.
*
* Uses the metadata data end to scan backwards across set gaps.
*
* Return: The maximum gap value
*/
static inline unsigned long
ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
unsigned char *off)
{
unsigned char offset, i;
unsigned long max_gap = 0;
i = offset = ma_meta_end(node, mt);
do {
if (gaps[i] > max_gap) {
max_gap = gaps[i];
offset = i;
}
} while (i--);
*off = offset;
return max_gap;
}
/*
* mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
* @mas: The maple state.
*
* Return: The gap value.
*/
static inline unsigned long mas_max_gap(struct ma_state *mas)
{
unsigned long *gaps;
unsigned char offset;
enum maple_type mt;
struct maple_node *node;
mt = mte_node_type(mas->node);
if (ma_is_leaf(mt))
return mas_leaf_max_gap(mas);
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
offset = ma_meta_gap(node, mt);
gaps = ma_gaps(node, mt);
return gaps[offset];
}
/*
* mas_parent_gap() - Set the parent gap and any gaps above, as needed
* @mas: The maple state
* @offset: The gap offset in the parent to set
* @new: The new gap value.
*
* Set the parent gap then continue to set the gap upwards, using the metadata
* of the parent to see if it is necessary to check the node above.
*/
static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
unsigned long new)
{
unsigned long meta_gap = 0;
struct maple_node *pnode;
struct maple_enode *penode;
unsigned long *pgaps;
unsigned char meta_offset;
enum maple_type pmt;
pnode = mte_parent(mas->node);
pmt = mas_parent_type(mas, mas->node);
penode = mt_mk_node(pnode, pmt);
pgaps = ma_gaps(pnode, pmt);
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
meta_offset = ma_meta_gap(pnode, pmt);
meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
if (meta_gap == new)
return;
if (offset != meta_offset) {
if (meta_gap > new)
return;
ma_set_meta_gap(pnode, pmt, offset);
} else if (new < meta_gap) {
new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
ma_set_meta_gap(pnode, pmt, meta_offset);
}
if (ma_is_root(pnode))
return;
/* Go to the parent node. */
pnode = mte_parent(penode);
pmt = mas_parent_type(mas, penode);
pgaps = ma_gaps(pnode, pmt);
offset = mte_parent_slot(penode);
penode = mt_mk_node(pnode, pmt);
goto ascend;
}
/*
* mas_update_gap() - Update a nodes gaps and propagate up if necessary.
* @mas - the maple state.
*/
static inline void mas_update_gap(struct ma_state *mas)
{
unsigned char pslot;
unsigned long p_gap;
unsigned long max_gap;
if (!mt_is_alloc(mas->tree))
return;
if (mte_is_root(mas->node))
return;
max_gap = mas_max_gap(mas);
pslot = mte_parent_slot(mas->node);
p_gap = ma_gaps(mte_parent(mas->node),
mas_parent_type(mas, mas->node))[pslot];
if (p_gap != max_gap)
mas_parent_gap(mas, pslot, max_gap);
}
/*
* mas_adopt_children() - Set the parent pointer of all nodes in @parent to
* @parent with the slot encoded.
* @mas - the maple state (for the tree)
* @parent - the maple encoded node containing the children.
*/
static inline void mas_adopt_children(struct ma_state *mas,
struct maple_enode *parent)
{
enum maple_type type = mte_node_type(parent);
struct maple_node *node = mte_to_node(parent);
void __rcu **slots = ma_slots(node, type);
unsigned long *pivots = ma_pivots(node, type);
struct maple_enode *child;
unsigned char offset;
offset = ma_data_end(node, type, pivots, mas->max);
do {
child = mas_slot_locked(mas, slots, offset);
mas_set_parent(mas, child, parent, offset);
} while (offset--);
}
/*
* mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
* node as dead.
* @mas - the maple state with the new node
* @old_enode - The old maple encoded node to replace.
*/
static inline void mas_put_in_tree(struct ma_state *mas,
struct maple_enode *old_enode)
__must_hold(mas->tree->ma_lock)
{
unsigned char offset;
void __rcu **slots;
if (mte_is_root(mas->node)) {
mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
mas_set_height(mas);
} else {
offset = mte_parent_slot(mas->node);
slots = ma_slots(mte_parent(mas->node),
mas_parent_type(mas, mas->node));
rcu_assign_pointer(slots[offset], mas->node);
}
mte_set_node_dead(old_enode);
}
/*
* mas_replace_node() - Replace a node by putting it in the tree, marking it
* dead, and freeing it.
* the parent encoding to locate the maple node in the tree.
* @mas - the ma_state with @mas->node pointing to the new node.
* @old_enode - The old maple encoded node.
*/
static inline void mas_replace_node(struct ma_state *mas,
struct maple_enode *old_enode)
__must_hold(mas->tree->ma_lock)
{
mas_put_in_tree(mas, old_enode);
mas_free(mas, old_enode);
}
/*
* mas_find_child() - Find a child who has the parent @mas->node.
* @mas: the maple state with the parent.
* @child: the maple state to store the child.
*/
static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
__must_hold(mas->tree->ma_lock)
{
enum maple_type mt;
unsigned char offset;
unsigned char end;
unsigned long *pivots;
struct maple_enode *entry;
struct maple_node *node;
void __rcu **slots;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
end = ma_data_end(node, mt, pivots, mas->max);
for (offset = mas->offset; offset <= end; offset++) {
entry = mas_slot_locked(mas, slots, offset);
if (mte_parent(entry) == node) {
*child = *mas;
mas->offset = offset + 1;
child->offset = offset;
mas_descend(child);
child->offset = 0;
return true;
}
}
return false;
}
/*
* mab_shift_right() - Shift the data in mab right. Note, does not clean out the
* old data or set b_node->b_end.
* @b_node: the maple_big_node
* @shift: the shift count
*/
static inline void mab_shift_right(struct maple_big_node *b_node,
unsigned char shift)
{
unsigned long size = b_node->b_end * sizeof(unsigned long);
memmove(b_node->pivot + shift, b_node->pivot, size);
memmove(b_node->slot + shift, b_node->slot, size);
if (b_node->type == maple_arange_64)
memmove(b_node->gap + shift, b_node->gap, size);
}
/*
* mab_middle_node() - Check if a middle node is needed (unlikely)
* @b_node: the maple_big_node that contains the data.
* @size: the amount of data in the b_node
* @split: the potential split location
* @slot_count: the size that can be stored in a single node being considered.
*
* Return: true if a middle node is required.
*/
static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
unsigned char slot_count)
{
unsigned char size = b_node->b_end;
if (size >= 2 * slot_count)
return true;
if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
return true;
return false;
}
/*
* mab_no_null_split() - ensure the split doesn't fall on a NULL
* @b_node: the maple_big_node with the data
* @split: the suggested split location
* @slot_count: the number of slots in the node being considered.
*
* Return: the split location.
*/
static inline int mab_no_null_split(struct maple_big_node *b_node,
unsigned char split, unsigned char slot_count)
{
if (!b_node->slot[split]) {
/*
* If the split is less than the max slot && the right side will
* still be sufficient, then increment the split on NULL.
*/
if ((split < slot_count - 1) &&
(b_node->b_end - split) > (mt_min_slots[b_node->type]))
split++;
else
split--;
}
return split;
}
/*
* mab_calc_split() - Calculate the split location and if there needs to be two
* splits.
* @bn: The maple_big_node with the data
* @mid_split: The second split, if required. 0 otherwise.
*
* Return: The first split location. The middle split is set in @mid_split.
*/
static inline int mab_calc_split(struct ma_state *mas,
struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
{
unsigned char b_end = bn->b_end;
int split = b_end / 2; /* Assume equal split. */
unsigned char slot_min, slot_count = mt_slots[bn->type];
/*
* To support gap tracking, all NULL entries are kept together and a node cannot
* end on a NULL entry, with the exception of the left-most leaf. The
* limitation means that the split of a node must be checked for this condition
* and be able to put more data in one direction or the other.
*/
if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
*mid_split = 0;
split = b_end - mt_min_slots[bn->type];
if (!ma_is_leaf(bn->type))
return split;
mas->mas_flags |= MA_STATE_REBALANCE;
if (!bn->slot[split])
split--;
return split;
}
/*
* Although extremely rare, it is possible to enter what is known as the 3-way
* split scenario. The 3-way split comes about by means of a store of a range
* that overwrites the end and beginning of two full nodes. The result is a set
* of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
* also be located in different parent nodes which are also full. This can
* carry upwards all the way to the root in the worst case.
*/
if (unlikely(mab_middle_node(bn, split, slot_count))) {
split = b_end / 3;
*mid_split = split * 2;
} else {
slot_min = mt_min_slots[bn->type];
*mid_split = 0;
/*
* Avoid having a range less than the slot count unless it
* causes one node to be deficient.
* NOTE: mt_min_slots is 1 based, b_end and split are zero.
*/
while ((split < slot_count - 1) &&
((bn->pivot[split] - min) < slot_count - 1) &&
(b_end - split > slot_min))
split++;
}
/* Avoid ending a node on a NULL entry */
split = mab_no_null_split(bn, split, slot_count);
if (unlikely(*mid_split))
*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
return split;
}
/*
* mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
* and set @b_node->b_end to the next free slot.
* @mas: The maple state
* @mas_start: The starting slot to copy
* @mas_end: The end slot to copy (inclusively)
* @b_node: The maple_big_node to place the data
* @mab_start: The starting location in maple_big_node to store the data.
*/
static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
unsigned char mas_end, struct maple_big_node *b_node,
unsigned char mab_start)
{
enum maple_type mt;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots, *gaps;
int i = mas_start, j = mab_start;
unsigned char piv_end;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
if (!i) {
b_node->pivot[j] = pivots[i++];
if (unlikely(i > mas_end))
goto complete;
j++;
}
piv_end = min(mas_end, mt_pivots[mt]);
for (; i < piv_end; i++, j++) {
b_node->pivot[j] = pivots[i];
if (unlikely(!b_node->pivot[j]))
break;
if (unlikely(mas->max == b_node->pivot[j]))
goto complete;
}
if (likely(i <= mas_end))
b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
complete:
b_node->b_end = ++j;
j -= mab_start;
slots = ma_slots(node, mt);
memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
gaps = ma_gaps(node, mt);
memcpy(b_node->gap + mab_start, gaps + mas_start,
sizeof(unsigned long) * j);
}
}
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
* @mas: The maple state
* @node: The maple node
* @pivots: pointer to the maple node pivots
* @mt: The maple type
* @end: The assumed end
*
* Note, end may be incremented within this function but not modified at the
* source. This is fine since the metadata is the last thing to be stored in a
* node during a write.
*/
static inline void mas_leaf_set_meta(struct ma_state *mas,
struct maple_node *node, unsigned long *pivots,
enum maple_type mt, unsigned char end)
{
/* There is no room for metadata already */
if (mt_pivots[mt] <= end)
return;
if (pivots[end] && pivots[end] < mas->max)
end++;
if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
/*
* mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
* @b_node: the maple_big_node that has the data
* @mab_start: the start location in @b_node.
* @mab_end: The end location in @b_node (inclusively)
* @mas: The maple state with the maple encoded node.
*/
static inline void mab_mas_cp(struct maple_big_node *b_node,
unsigned char mab_start, unsigned char mab_end,
struct ma_state *mas, bool new_max)
{
int i, j = 0;
enum maple_type mt = mte_node_type(mas->node);
struct maple_node *node = mte_to_node(mas->node);
void __rcu **slots = ma_slots(node, mt);
unsigned long *pivots = ma_pivots(node, mt);
unsigned long *gaps = NULL;
unsigned char end;
if (mab_end - mab_start > mt_pivots[mt])
mab_end--;
if (!pivots[mt_pivots[mt] - 1])
slots[mt_pivots[mt]] = NULL;
i = mab_start;
do {
pivots[j++] = b_node->pivot[i++];
} while (i <= mab_end && likely(b_node->pivot[i]));
memcpy(slots, b_node->slot + mab_start,
sizeof(void *) * (i - mab_start));
if (new_max)
mas->max = b_node->pivot[i - 1];
end = j - 1;
if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
unsigned long max_gap = 0;
unsigned char offset = 0;
gaps = ma_gaps(node, mt);
do {
gaps[--j] = b_node->gap[--i];
if (gaps[j] > max_gap) {
offset = j;
max_gap = gaps[j];
}
} while (j);
ma_set_meta(node, mt, offset, end);
} else {
mas_leaf_set_meta(mas, node, pivots, mt, end);
}
}
/*
* mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
* @mas: The maple state
* @end: The maple node end
* @mt: The maple node type
*/
static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
enum maple_type mt)
{
if (!(mas->mas_flags & MA_STATE_BULK))
return;
if (mte_is_root(mas->node))
return;
if (end > mt_min_slots[mt]) {
mas->mas_flags &= ~MA_STATE_REBALANCE;
return;
}
}
/*
* mas_store_b_node() - Store an @entry into the b_node while also copying the
* data from a maple encoded node.
* @wr_mas: the maple write state
* @b_node: the maple_big_node to fill with data
* @offset_end: the offset to end copying
*
* Return: The actual end of the data stored in @b_node
*/
static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node, unsigned char offset_end)
{
unsigned char slot;
unsigned char b_end;
/* Possible underflow of piv will wrap back to 0 before use. */
unsigned long piv;
struct ma_state *mas = wr_mas->mas;
b_node->type = wr_mas->type;
b_end = 0;
slot = mas->offset;
if (slot) {
/* Copy start data up to insert. */
mas_mab_cp(mas, 0, slot - 1, b_node, 0);
b_end = b_node->b_end;
piv = b_node->pivot[b_end - 1];
} else
piv = mas->min - 1;
if (piv + 1 < mas->index) {
/* Handle range starting after old range */
b_node->slot[b_end] = wr_mas->content;
if (!wr_mas->content)
b_node->gap[b_end] = mas->index - 1 - piv;
b_node->pivot[b_end++] = mas->index - 1;
}
/* Store the new entry. */
mas->offset = b_end;
b_node->slot[b_end] = wr_mas->entry;
b_node->pivot[b_end] = mas->last;
/* Appended. */
if (mas->last >= mas->max)
goto b_end;
/* Handle new range ending before old range ends */
piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
if (piv > mas->last) {
if (piv == ULONG_MAX)
mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
if (offset_end != slot)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
offset_end);
b_node->slot[++b_end] = wr_mas->content;
if (!wr_mas->content)
b_node->gap[b_end] = piv - mas->last + 1;
b_node->pivot[b_end] = piv;
}
slot = offset_end + 1;
if (slot > wr_mas->node_end)
goto b_end;
/* Copy end data to the end of the node. */
mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
b_node->b_end--;
return;
b_end:
b_node->b_end = b_end;
}
/*
* mas_prev_sibling() - Find the previous node with the same parent.
* @mas: the maple state
*
* Return: True if there is a previous sibling, false otherwise.
*/
static inline bool mas_prev_sibling(struct ma_state *mas)
{
unsigned int p_slot = mte_parent_slot(mas->node);
if (mte_is_root(mas->node))
return false;
if (!p_slot)
return false;
mas_ascend(mas);
mas->offset = p_slot - 1;
mas_descend(mas);
return true;
}
/*
* mas_next_sibling() - Find the next node with the same parent.
* @mas: the maple state
*
* Return: true if there is a next sibling, false otherwise.
*/
static inline bool mas_next_sibling(struct ma_state *mas)
{
MA_STATE(parent, mas->tree, mas->index, mas->last);
if (mte_is_root(mas->node))
return false;
parent = *mas;
mas_ascend(&parent);
parent.offset = mte_parent_slot(mas->node) + 1;
if (parent.offset > mas_data_end(&parent))
return false;
*mas = parent;
mas_descend(mas);
return true;
}
/*
* mte_node_or_node() - Return the encoded node or MAS_NONE.
* @enode: The encoded maple node.
*
* Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
*
* Return: @enode or MAS_NONE
*/
static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
{
if (enode)
return enode;
return ma_enode_ptr(MAS_NONE);
}
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
*/
static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char count, offset;
if (unlikely(ma_is_dense(wr_mas->type))) {
wr_mas->r_max = wr_mas->r_min = mas->index;
mas->offset = mas->index = mas->min;
return;
}
wr_mas->node = mas_mn(wr_mas->mas);
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
wr_mas->pivots, mas->max);
offset = mas->offset;
while (offset < count && mas->index > wr_mas->pivots[offset])
offset++;
wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
wr_mas->offset_end = mas->offset = offset;
}
/*
* mast_rebalance_next() - Rebalance against the next node
* @mast: The maple subtree state
* @old_r: The encoded maple node to the right (next node).
*/
static inline void mast_rebalance_next(struct maple_subtree_state *mast)
{
unsigned char b_end = mast->bn->b_end;
mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
mast->bn, b_end);
mast->orig_r->last = mast->orig_r->max;
}
/*
* mast_rebalance_prev() - Rebalance against the previous node
* @mast: The maple subtree state
* @old_l: The encoded maple node to the left (previous node)
*/
static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
{
unsigned char end = mas_data_end(mast->orig_l) + 1;
unsigned char b_end = mast->bn->b_end;
mab_shift_right(mast->bn, end);
mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
mast->l->min = mast->orig_l->min;
mast->orig_l->index = mast->orig_l->min;
mast->bn->b_end = end + b_end;
mast->l->offset += end;
}
/*
* mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
* the node to the right. Checking the nodes to the right then the left at each
* level upwards until root is reached.
* Data is copied into the @mast->bn.
* @mast: The maple_subtree_state.
*/
static inline
bool mast_spanning_rebalance(struct maple_subtree_state *mast)
{
struct ma_state r_tmp = *mast->orig_r;
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
r_tmp = *mast->orig_r;
l_tmp = *mast->orig_l;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
depth++;
if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
mast->orig_r->offset++;
do {
mas_descend(mast->orig_r);
mast->orig_r->offset = 0;
} while (--depth);
mast_rebalance_next(mast);
*mast->orig_l = l_tmp;
return true;
} else if (mast->orig_l->offset != 0) {
mast->orig_l->offset--;
do {
mas_descend(mast->orig_l);
mast->orig_l->offset =
mas_data_end(mast->orig_l);
} while (--depth);
mast_rebalance_prev(mast);
*mast->orig_r = r_tmp;
return true;
}
} while (!mte_is_root(mast->orig_r->node));
*mast->orig_r = r_tmp;
*mast->orig_l = l_tmp;
return false;
}
/*
* mast_ascend() - Ascend the original left and right maple states.
* @mast: the maple subtree state.
*
* Ascend the original left and right sides. Set the offsets to point to the
* data already in the new tree (@mast->l and @mast->r).
*/
static inline void mast_ascend(struct maple_subtree_state *mast)
{
MA_WR_STATE(wr_mas, mast->orig_r, NULL);
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
mast->orig_r->offset = 0;
mast->orig_r->index = mast->r->max;
/* last should be larger than or equal to index */
if (mast->orig_r->last < mast->orig_r->index)
mast->orig_r->last = mast->orig_r->index;
wr_mas.type = mte_node_type(mast->orig_r->node);
mas_wr_node_walk(&wr_mas);
/* Set up the left side of things */
mast->orig_l->offset = 0;
mast->orig_l->index = mast->l->min;
wr_mas.mas = mast->orig_l;
wr_mas.type = mte_node_type(mast->orig_l->node);
mas_wr_node_walk(&wr_mas);
mast->bn->type = wr_mas.type;
}
/*
* mas_new_ma_node() - Create and return a new maple node. Helper function.
* @mas: the maple state with the allocations.
* @b_node: the maple_big_node with the type encoding.
*
* Use the node type from the maple_big_node to allocate a new node from the
* ma_state. This function exists mainly for code readability.
*
* Return: A new maple encoded node
*/
static inline struct maple_enode
*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
{
return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
}
/*
* mas_mab_to_node() - Set up right and middle nodes
*
* @mas: the maple state that contains the allocations.
* @b_node: the node which contains the data.
* @left: The pointer which will have the left node
* @right: The pointer which may have the right node
* @middle: the pointer which may have the middle node (rare)
* @mid_split: the split location for the middle node
*
* Return: the split of left.
*/
static inline unsigned char mas_mab_to_node(struct ma_state *mas,
struct maple_big_node *b_node, struct maple_enode **left,
struct maple_enode **right, struct maple_enode **middle,
unsigned char *mid_split, unsigned long min)
{
unsigned char split = 0;
unsigned char slot_count = mt_slots[b_node->type];
*left = mas_new_ma_node(mas, b_node);
*right = NULL;
*middle = NULL;
*mid_split = 0;
if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
split = mab_calc_split(mas, b_node, mid_split, min);
*right = mas_new_ma_node(mas, b_node);
}
if (*mid_split)
*middle = mas_new_ma_node(mas, b_node);
return split;
}
/*
* mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
* pointer.
* @b_node - the big node to add the entry
* @mas - the maple state to get the pivot (mas->max)
* @entry - the entry to add, if NULL nothing happens.
*/
static inline void mab_set_b_end(struct maple_big_node *b_node,
struct ma_state *mas,
void *entry)
{
if (!entry)
return;
b_node->slot[b_node->b_end] = entry;
if (mt_is_alloc(mas->tree))
b_node->gap[b_node->b_end] = mas_max_gap(mas);
b_node->pivot[b_node->b_end++] = mas->max;
}
/*
* mas_set_split_parent() - combine_then_separate helper function. Sets the parent
* of @mas->node to either @left or @right, depending on @slot and @split
*
* @mas - the maple state with the node that needs a parent
* @left - possible parent 1
* @right - possible parent 2
* @slot - the slot the mas->node was placed
* @split - the split location between @left and @right
*/
static inline void mas_set_split_parent(struct ma_state *mas,
struct maple_enode *left,
struct maple_enode *right,
unsigned char *slot, unsigned char split)
{
if (mas_is_none(mas))
return;
if ((*slot) <= split)
mas_set_parent(mas, mas->node, left, *slot);
else if (right)
mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
(*slot)++;
}
/*
* mte_mid_split_check() - Check if the next node passes the mid-split
* @**l: Pointer to left encoded maple node.
* @**m: Pointer to middle encoded maple node.
* @**r: Pointer to right encoded maple node.
* @slot: The offset
* @*split: The split location.
* @mid_split: The middle split.
*/
static inline void mte_mid_split_check(struct maple_enode **l,
struct maple_enode **r,
struct maple_enode *right,
unsigned char slot,
unsigned char *split,
unsigned char mid_split)
{
if (*r == right)
return;
if (slot < mid_split)
return;
*l = *r;
*r = right;
*split = mid_split;
}
/*
* mast_set_split_parents() - Helper function to set three nodes parents. Slot
* is taken from @mast->l.
* @mast - the maple subtree state
* @left - the left node
* @right - the right node
* @split - the split location.
*/
static inline void mast_set_split_parents(struct maple_subtree_state *mast,
struct maple_enode *left,
struct maple_enode *middle,
struct maple_enode *right,
unsigned char split,
unsigned char mid_split)
{
unsigned char slot;
struct maple_enode *l = left;
struct maple_enode *r = right;
if (mas_is_none(mast->l))
return;
if (middle)
r = middle;
slot = mast->l->offset;
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->l, l, r, &slot, split);
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->m, l, r, &slot, split);
mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
mas_set_split_parent(mast->r, l, r, &slot, split);
}
/*
* mas_topiary_node() - Dispose of a singe node
* @mas: The maple state for pushing nodes
* @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
*
* The node will either be RCU freed or pushed back on the maple state.
*/
static inline void mas_topiary_node(struct ma_state *mas,
struct maple_enode *enode, bool in_rcu)
{
struct maple_node *tmp;
if (enode == MAS_NONE)
return;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
if (in_rcu)
ma_free_rcu(tmp);
else
mas_push_node(mas, tmp);
}
/*
* mas_topiary_replace() - Replace the data with new data, then repair the
* parent links within the new tree. Iterate over the dead sub-tree and collect
* the dead subtrees and topiary the nodes that are no longer of use.
*
* The new tree will have up to three children with the correct parent. Keep
* track of the new entries as they need to be followed to find the next level
* of new entries.
*
* The old tree will have up to three children with the old parent. Keep track
* of the old entries as they may have more nodes below replaced. Nodes within
* [index, last] are dead subtrees, others need to be freed and followed.
*
* @mas: The maple state pointing at the new data
* @old_enode: The maple encoded node being replaced
*
*/
static inline void mas_topiary_replace(struct ma_state *mas,
struct maple_enode *old_enode)
{
struct ma_state tmp[3], tmp_next[3];
MA_TOPIARY(subtrees, mas->tree);
bool in_rcu;
int i, n;
/* Place data in tree & then mark node as old */
mas_put_in_tree(mas, old_enode);
/* Update the parent pointers in the tree */
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[1].node = MAS_NONE;
tmp[2].node = MAS_NONE;
while (!mte_is_leaf(tmp[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
if (mas_is_none(&tmp[i]))
continue;
while (n < 3) {
if (!mas_find_child(&tmp[i], &tmp_next[n]))
break;
n++;
}
mas_adopt_children(&tmp[i], tmp[i].node);
}
if (MAS_WARN_ON(mas, n == 0))
break;
while (n < 3)
tmp_next[n++].node = MAS_NONE;
for (i = 0; i < 3; i++)
tmp[i] = tmp_next[i];
}
/* Collect the old nodes that need to be discarded */
if (mte_is_leaf(old_enode))
return mas_free(mas, old_enode);
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[0].node = old_enode;
tmp[1].node = MAS_NONE;
tmp[2].node = MAS_NONE;
in_rcu = mt_in_rcu(mas->tree);
do {
n = 0;
for (i = 0; i < 3; i++) {
if (mas_is_none(&tmp[i]))
continue;
while (n < 3) {
if (!mas_find_child(&tmp[i], &tmp_next[n]))
break;
if ((tmp_next[n].min >= tmp_next->index) &&
(tmp_next[n].max <= tmp_next->last)) {
mat_add(&subtrees, tmp_next[n].node);
tmp_next[n].node = MAS_NONE;
} else {
n++;
}
}
}
if (MAS_WARN_ON(mas, n == 0))
break;
while (n < 3)
tmp_next[n++].node = MAS_NONE;
for (i = 0; i < 3; i++) {
mas_topiary_node(mas, tmp[i].node, in_rcu);
tmp[i] = tmp_next[i];
}
} while (!mte_is_leaf(tmp[0].node));
for (i = 0; i < 3; i++)
mas_topiary_node(mas, tmp[i].node, in_rcu);
mas_mat_destroy(mas, &subtrees);
}
/*
* mas_wmb_replace() - Write memory barrier and replace
* @mas: The maple state
* @old: The old maple encoded node that is being replaced.
*
* Updates gap as necessary.
*/
static inline void mas_wmb_replace(struct ma_state *mas,
struct maple_enode *old_enode)
{
/* Insert the new data in the tree */
mas_topiary_replace(mas, old_enode);
if (mte_is_leaf(mas->node))
return;
mas_update_gap(mas);
}
/*
* mast_cp_to_nodes() - Copy data out to nodes.
* @mast: The maple subtree state
* @left: The left encoded maple node
* @middle: The middle encoded maple node
* @right: The right encoded maple node
* @split: The location to split between left and (middle ? middle : right)
* @mid_split: The location to split between middle and right.
*/
static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
struct maple_enode *left, struct maple_enode *middle,
struct maple_enode *right, unsigned char split, unsigned char mid_split)
{
bool new_lmax = true;
mast->l->node = mte_node_or_none(left);
mast->m->node = mte_node_or_none(middle);
mast->r->node = mte_node_or_none(right);
mast->l->min = mast->orig_l->min;
if (split == mast->bn->b_end) {
mast->l->max = mast->orig_r->max;
new_lmax = false;
}
mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
if (middle) {
mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
mast->m->min = mast->bn->pivot[split] + 1;
split = mid_split;
}
mast->r->max = mast->orig_r->max;
if (right) {
mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
mast->r->min = mast->bn->pivot[split] + 1;
}
}
/*
* mast_combine_cp_left - Copy in the original left side of the tree into the
* combined data set in the maple subtree state big node.
* @mast: The maple subtree state
*/
static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
{
unsigned char l_slot = mast->orig_l->offset;
if (!l_slot)
return;
mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
}
/*
* mast_combine_cp_right: Copy in the original right side of the tree into the
* combined data set in the maple subtree state big node.
* @mast: The maple subtree state
*/
static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
{
if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
return;
mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
mt_slot_count(mast->orig_r->node), mast->bn,
mast->bn->b_end);
mast->orig_r->last = mast->orig_r->max;
}
/*
* mast_sufficient: Check if the maple subtree state has enough data in the big
* node to create at least one sufficient node
* @mast: the maple subtree state
*/
static inline bool mast_sufficient(struct maple_subtree_state *mast)
{
if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
return true;
return false;
}
/*
* mast_overflow: Check if there is too much data in the subtree state for a
* single node.
* @mast: The maple subtree state
*/
static inline bool mast_overflow(struct maple_subtree_state *mast)
{
if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
return true;
return false;
}
static inline void *mtree_range_walk(struct ma_state *mas)
{
unsigned long *pivots;
unsigned char offset;
struct maple_node *node;
struct maple_enode *next, *last;
enum maple_type type;
void __rcu **slots;
unsigned char end;
unsigned long max, min;
unsigned long prev_max, prev_min;
next = mas->node;
min = mas->min;
max = mas->max;
do {
offset = 0;
last = next;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
if (unlikely(ma_dead_node(node)))
goto dead_node;
if (pivots[offset] >= mas->index) {
prev_max = max;
prev_min = min;
max = pivots[offset];
goto next;
}
do {
offset++;
} while ((offset < end) && (pivots[offset] < mas->index));
prev_min = min;
min = pivots[offset - 1] + 1;
prev_max = max;
if (likely(offset < end && pivots[offset]))
max = pivots[offset];
next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
if (unlikely(ma_dead_node(node)))
goto dead_node;
} while (!ma_is_leaf(type));
mas->offset = offset;
mas->index = min;
mas->last = max;
mas->min = prev_min;
mas->max = prev_max;
mas->node = last;
return (void *)next;
dead_node:
mas_reset(mas);
return NULL;
}
/*
* mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
* @mas: The starting maple state
* @mast: The maple_subtree_state, keeps track of 4 maple states.
* @count: The estimated count of iterations needed.
*
* Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
* is hit. First @b_node is split into two entries which are inserted into the
* next iteration of the loop. @b_node is returned populated with the final
* iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
* nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
* to account of what has been copied into the new sub-tree. The update of
* orig_l_mas->last is used in mas_consume to find the slots that will need to
* be either freed or destroyed. orig_l_mas->depth keeps track of the height of
* the new sub-tree in case the sub-tree becomes the full tree.
*
* Return: the number of elements in b_node during the last loop.
*/
static int mas_spanning_rebalance(struct ma_state *mas,
struct maple_subtree_state *mast, unsigned char count)
{
unsigned char split, mid_split;
unsigned char slot = 0;
struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
struct maple_enode *old_enode;
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(m_mas, mas->tree, mas->index, mas->index);
/*
* The tree needs to be rebalanced and leaves need to be kept at the same level.
* Rebalancing is done by use of the ``struct maple_topiary``.
*/
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
mast_spanning_rebalance(mast);
l_mas.depth = 0;
/*
* Each level of the tree is examined and balanced, pushing data to the left or
* right, or rebalancing against left or right nodes is employed to avoid
* rippling up the tree to limit the amount of churn. Once a new sub-section of
* the tree is created, there may be a mix of new and old nodes. The old nodes
* will have the incorrect parent pointers and currently be in two trees: the
* original tree and the partially new tree. To remedy the parent pointers in
* the old tree, the new data is swapped into the active tree and a walk down
* the tree is performed and the parent pointers are updated.
* See mas_topiary_replace() for more information.
*/
while (count--) {
mast->bn->b_end--;
mast->bn->type = mte_node_type(mast->orig_l->node);
split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
&mid_split, mast->orig_l->min);
mast_set_split_parents(mast, left, middle, right, split,
mid_split);
mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
/*
* Copy data from next level in the tree to mast->bn from next
* iteration
*/
memset(mast->bn, 0, sizeof(struct maple_big_node));
mast->bn->type = mte_node_type(left);
l_mas.depth++;
/* Root already stored in l->node. */
if (mas_is_root_limits(mast->l))
goto new_root;
mast_ascend(mast);
mast_combine_cp_left(mast);
l_mas.offset = mast->bn->b_end;
mab_set_b_end(mast->bn, &l_mas, left);
mab_set_b_end(mast->bn, &m_mas, middle);
mab_set_b_end(mast->bn, &r_mas, right);
/* Copy anything necessary out of the right node. */
mast_combine_cp_right(mast);
mast->orig_l->last = mast->orig_l->max;
if (mast_sufficient(mast))
continue;
if (mast_overflow(mast))
continue;
/* May be a new root stored in mast->bn */
if (mas_is_root_limits(mast->orig_l))
break;
mast_spanning_rebalance(mast);
/* rebalancing from other nodes may require another loop. */
if (!count)
count++;
}
l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
mte_node_type(mast->orig_l->node));
l_mas.depth++;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
mas_set_parent(mas, middle, l_mas.node, ++slot);
if (right)
mas_set_parent(mas, right, l_mas.node, ++slot);
if (mas_is_root_limits(mast->l)) {
new_root:
mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
while (!mte_is_root(mast->orig_l->node))
mast_ascend(mast);
} else {
mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
}
old_enode = mast->orig_l->node;
mas->depth = l_mas.depth;
mas->node = l_mas.node;
mas->min = l_mas.min;
mas->max = l_mas.max;
mas->offset = l_mas.offset;
mas_wmb_replace(mas, old_enode);
mtree_range_walk(mas);
return mast->bn->b_end;
}
/*
* mas_rebalance() - Rebalance a given node.
* @mas: The maple state
* @b_node: The big maple node.
*
* Rebalance two nodes into a single node or two new nodes that are sufficient.
* Continue upwards until tree is sufficient.
*
* Return: the number of elements in b_node during the last loop.
*/
static inline int mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
char empty_count = mas_mt_height(mas);
struct maple_subtree_state mast;
unsigned char shift, b_end = ++b_node->b_end;
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
/*
* Rebalancing occurs if a node is insufficient. Data is rebalanced
* against the node to the right if it exists, otherwise the node to the
* left of this node is rebalanced against this node. If rebalancing
* causes just one node to be produced instead of two, then the parent
* is also examined and rebalanced if it is insufficient. Every level
* tries to combine the data in the same way. If one node contains the
* entire range of the tree, then that node is used as a new root node.
*/
mas_node_count(mas, empty_count * 2 - 1);
if (mas_is_err(mas))
return 0;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
mast.bn = b_node;
mast.bn->type = mte_node_type(mas->node);
l_mas = r_mas = *mas;
if (mas_next_sibling(&r_mas)) {
mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
r_mas.last = r_mas.index = r_mas.max;
} else {
mas_prev_sibling(&l_mas);
shift = mas_data_end(&l_mas) + 1;
mab_shift_right(b_node, shift);
mas->offset += shift;
mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
b_node->b_end = shift + b_end;
l_mas.index = l_mas.last = l_mas.min;
}
return mas_spanning_rebalance(mas, &mast, empty_count);
}
/*
* mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
* state.
* @mas: The maple state
* @end: The end of the left-most node.
*
* During a mass-insert event (such as forking), it may be necessary to
* rebalance the left-most node when it is not sufficient.
*/
static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
{
enum maple_type mt = mte_node_type(mas->node);
struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
struct maple_enode *eparent, *old_eparent;
unsigned char offset, tmp, split = mt_slots[mt] / 2;
void __rcu **l_slots, **slots;
unsigned long *l_pivs, *pivs, gap;
bool in_rcu = mt_in_rcu(mas->tree);
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
l_mas = *mas;
mas_prev_sibling(&l_mas);
/* set up node. */
if (in_rcu) {
/* Allocate for both left and right as well as parent. */
mas_node_count(mas, 3);
if (mas_is_err(mas))
return;
newnode = mas_pop_node(mas);
} else {
newnode = &reuse;
}
node = mas_mn(mas);
newnode->parent = node->parent;
slots = ma_slots(newnode, mt);
pivs = ma_pivots(newnode, mt);
left = mas_mn(&l_mas);
l_slots = ma_slots(left, mt);
l_pivs = ma_pivots(left, mt);
if (!l_slots[split])
split++;
tmp = mas_data_end(&l_mas) - split;
memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
pivs[tmp] = l_mas.max;
memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
l_mas.max = l_pivs[split];
mas->min = l_mas.max + 1;
old_eparent = mt_mk_node(mte_parent(l_mas.node),
mas_parent_type(&l_mas, l_mas.node));
tmp += end;
if (!in_rcu) {
unsigned char max_p = mt_pivots[mt];
unsigned char max_s = mt_slots[mt];
if (tmp < max_p)
memset(pivs + tmp, 0,
sizeof(unsigned long) * (max_p - tmp));
if (tmp < mt_slots[mt])
memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
memcpy(node, newnode, sizeof(struct maple_node));
ma_set_meta(node, mt, 0, tmp - 1);
mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
l_pivs[split]);
/* Remove data from l_pivs. */
tmp = split + 1;
memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
ma_set_meta(left, mt, 0, split);
eparent = old_eparent;
goto done;
}
/* RCU requires replacing both l_mas, mas, and parent. */
mas->node = mt_mk_node(newnode, mt);
ma_set_meta(newnode, mt, 0, tmp);
new_left = mas_pop_node(mas);
new_left->parent = left->parent;
mt = mte_node_type(l_mas.node);
slots = ma_slots(new_left, mt);
pivs = ma_pivots(new_left, mt);
memcpy(slots, l_slots, sizeof(void *) * split);
memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
ma_set_meta(new_left, mt, 0, split);
l_mas.node = mt_mk_node(new_left, mt);
/* replace parent. */
offset = mte_parent_slot(mas->node);
mt = mas_parent_type(&l_mas, l_mas.node);
parent = mas_pop_node(mas);
slots = ma_slots(parent, mt);
pivs = ma_pivots(parent, mt);
memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
rcu_assign_pointer(slots[offset], mas->node);
rcu_assign_pointer(slots[offset - 1], l_mas.node);
pivs[offset - 1] = l_mas.max;
eparent = mt_mk_node(parent, mt);
done:
gap = mas_leaf_max_gap(mas);
mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
gap = mas_leaf_max_gap(&l_mas);
mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
mas_ascend(mas);
if (in_rcu) {
mas_replace_node(mas, old_eparent);
mas_adopt_children(mas, mas->node);
}
mas_update_gap(mas);
}
/*
* mas_split_final_node() - Split the final node in a subtree operation.
* @mast: the maple subtree state
* @mas: The maple state
* @height: The height of the tree in case it's a new root.
*/
static inline bool mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas, int height)
{
struct maple_enode *ancestor;
if (mte_is_root(mas->node)) {
if (mt_is_alloc(mas->tree))
mast->bn->type = maple_arange_64;
else
mast->bn->type = maple_range_64;
mas->depth = height;
}
/*
* Only a single node is used here, could be root.
* The Big_node data should just fit in a single node.
*/
ancestor = mas_new_ma_node(mas, mast->bn);
mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
mast->l->node = ancestor;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
return true;
}
/*
* mast_fill_bnode() - Copy data into the big node in the subtree state
* @mast: The maple subtree state
* @mas: the maple state
* @skip: The number of entries to skip for new nodes insertion.
*/
static inline void mast_fill_bnode(struct maple_subtree_state *mast,
struct ma_state *mas,
unsigned char skip)
{
bool cp = true;
unsigned char split;
memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
mast->bn->b_end = 0;
if (mte_is_root(mas->node)) {
cp = false;
} else {
mas_ascend(mas);
mas->offset = mte_parent_slot(mas->node);
}
if (cp && mast->l->offset)
mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
split = mast->bn->b_end;
mab_set_b_end(mast->bn, mast->l, mast->l->node);
mast->r->offset = mast->bn->b_end;
mab_set_b_end(mast->bn, mast->r, mast->r->node);
if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
cp = false;
if (cp)
mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
mast->bn, mast->bn->b_end);
mast->bn->b_end--;
mast->bn->type = mte_node_type(mas->node);
}
/*
* mast_split_data() - Split the data in the subtree state big node into regular
* nodes.
* @mast: The maple subtree state
* @mas: The maple state
* @split: The location to split the big node
*/
static inline void mast_split_data(struct maple_subtree_state *mast,
struct ma_state *mas, unsigned char split)
{
unsigned char p_slot;
mab_mas_cp(mast->bn, 0, split, mast->l, true);
mte_set_pivot(mast->r->node, 0, mast->r->max);
mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
mast->l->offset = mte_parent_slot(mas->node);
mast->l->max = mast->bn->pivot[split];
mast->r->min = mast->l->max + 1;
if (mte_is_leaf(mas->node))
return;
p_slot = mast->orig_l->offset;
mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
&p_slot, split);
mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
&p_slot, split);
}
/*
* mas_push_data() - Instead of splitting a node, it is beneficial to push the
* data to the right or left node if there is room.
* @mas: The maple state
* @height: The current height of the maple state
* @mast: The maple subtree state
* @left: Push left or not.
*
* Keeping the height of the tree low means faster lookups.
*
* Return: True if pushed, false otherwise.
*/
static inline bool mas_push_data(struct ma_state *mas, int height,
struct maple_subtree_state *mast, bool left)
{
unsigned char slot_total = mast->bn->b_end;
unsigned char end, space, split;
MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
tmp_mas = *mas;
tmp_mas.depth = mast->l->depth;
if (left && !mas_prev_sibling(&tmp_mas))
return false;
else if (!left && !mas_next_sibling(&tmp_mas))
return false;
end = mas_data_end(&tmp_mas);
slot_total += end;
space = 2 * mt_slot_count(mas->node) - 2;
/* -2 instead of -1 to ensure there isn't a triple split */
if (ma_is_leaf(mast->bn->type))
space--;
if (mas->max == ULONG_MAX)
space--;
if (slot_total >= space)
return false;
/* Get the data; Fill mast->bn */
mast->bn->b_end++;
if (left) {
mab_shift_right(mast->bn, end + 1);
mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
mast->bn->b_end = slot_total + 1;
} else {
mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
}
/* Configure mast for splitting of mast->bn */
split = mt_slots[mast->bn->type] - 2;
if (left) {
/* Switch mas to prev node */
*mas = tmp_mas;
/* Start using mast->l for the left side. */
tmp_mas.node = mast->l->node;
*mast->l = tmp_mas;
} else {
tmp_mas.node = mast->r->node;
*mast->r = tmp_mas;
split = slot_total - split;
}
split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
/* Update parent slot for split calculation. */
if (left)
mast->orig_l->offset += end + 1;
mast_split_data(mast, mas, split);
mast_fill_bnode(mast, mas, 2);
mas_split_final_node(mast, mas, height + 1);
return true;
}
/*
* mas_split() - Split data that is too big for one node into two.
* @mas: The maple state
* @b_node: The maple big node
* Return: 1 on success, 0 on failure.
*/
static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
{
struct maple_subtree_state mast;
int height = 0;
unsigned char mid_split, split = 0;
struct maple_enode *old;
/*
* Splitting is handled differently from any other B-tree; the Maple
* Tree splits upwards. Splitting up means that the split operation
* occurs when the walk of the tree hits the leaves and not on the way
* down. The reason for splitting up is that it is impossible to know
* how much space will be needed until the leaf is (or leaves are)
* reached. Since overwriting data is allowed and a range could
* overwrite more than one range or result in changing one entry into 3
* entries, it is impossible to know if a split is required until the
* data is examined.
*
* Splitting is a balancing act between keeping allocations to a minimum
* and avoiding a 'jitter' event where a tree is expanded to make room
* for an entry followed by a contraction when the entry is removed. To
* accomplish the balance, there are empty slots remaining in both left
* and right nodes after a split.
*/
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
trace_ma_op(__func__, mas);
mas->depth = mas_mt_height(mas);
/* Allocation failures will happen early. */
mas_node_count(mas, 1 + mas->depth * 2);
if (mas_is_err(mas))
return 0;
mast.l = &l_mas;
mast.r = &r_mas;
mast.orig_l = &prev_l_mas;
mast.orig_r = &prev_r_mas;
mast.bn = b_node;
while (height++ <= mas->depth) {
if (mt_slots[b_node->type] > b_node->b_end) {
mas_split_final_node(&mast, mas, height);
break;
}
l_mas = r_mas = *mas;
l_mas.node = mas_new_ma_node(mas, b_node);
r_mas.node = mas_new_ma_node(mas, b_node);
/*
* Another way that 'jitter' is avoided is to terminate a split up early if the
* left or right node has space to spare. This is referred to as "pushing left"
* or "pushing right" and is similar to the B* tree, except the nodes left or
* right can rarely be reused due to RCU, but the ripple upwards is halted which
* is a significant savings.
*/
/* Try to push left. */
if (mas_push_data(mas, height, &mast, true))
break;
/* Try to push right. */
if (mas_push_data(mas, height, &mast, false))
break;
split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
mast_split_data(&mast, mas, split);
/*
* Usually correct, mab_mas_cp in the above call overwrites
* r->max.
*/
mast.r->max = mas->max;
mast_fill_bnode(&mast, mas, 1);
prev_l_mas = *mast.l;
prev_r_mas = *mast.r;
}
/* Set the original node as dead */
old = mas->node;
mas->node = l_mas.node;
mas_wmb_replace(mas, old);
mtree_range_walk(mas);
return 1;
}
/*
* mas_reuse_node() - Reuse the node to store the data.
* @wr_mas: The maple write state
* @bn: The maple big node
* @end: The end of the data.
*
* Will always return false in RCU mode.
*
* Return: True if node was reused, false otherwise.
*/
static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
struct maple_big_node *bn, unsigned char end)
{
/* Need to be rcu safe. */
if (mt_in_rcu(wr_mas->mas->tree))
return false;
if (end > bn->b_end) {
int clear = mt_slots[wr_mas->type] - bn->b_end;
memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
}
mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
return true;
}
/*
* mas_commit_b_node() - Commit the big node into the tree.
* @wr_mas: The maple write state
* @b_node: The maple big node
* @end: The end of the data.
*/
static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
struct maple_big_node *b_node, unsigned char end)
{
struct maple_node *node;
struct maple_enode *old_enode;
unsigned char b_end = b_node->b_end;
enum maple_type b_type = b_node->type;
old_enode = wr_mas->mas->node;
if ((b_end < mt_min_slots[b_type]) &&
(!mte_is_root(old_enode)) &&
(mas_mt_height(wr_mas->mas) > 1))
return mas_rebalance(wr_mas->mas, b_node);
if (b_end >= mt_slots[b_type])
return mas_split(wr_mas->mas, b_node);
if (mas_reuse_node(wr_mas, b_node, end))
goto reuse_node;
mas_node_count(wr_mas->mas, 1);
if (mas_is_err(wr_mas->mas))
return 0;
node = mas_pop_node(wr_mas->mas);
node->parent = mas_mn(wr_mas->mas)->parent;
wr_mas->mas->node = mt_mk_node(node, b_type);
mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
mas_replace_node(wr_mas->mas, old_enode);
reuse_node:
mas_update_gap(wr_mas->mas);
return 1;
}
/*
* mas_root_expand() - Expand a root to a node
* @mas: The maple state
* @entry: The entry to store into the tree
*/
static inline int mas_root_expand(struct ma_state *mas, void *entry)
{
void *contents = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots;
int slot = 0;
mas_node_count(mas, 1);
if (unlikely(mas_is_err(mas)))
return 0;
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
if (mas->index) {
if (contents) {
rcu_assign_pointer(slots[slot], contents);
if (likely(mas->index > 1))
slot++;
}
pivots[slot++] = mas->index - 1;
}
rcu_assign_pointer(slots[slot], entry);
mas->offset = slot;
pivots[slot] = mas->last;
if (mas->last != ULONG_MAX)
pivots[++slot] = ULONG_MAX;
mas->depth = 1;
mas_set_height(mas);
ma_set_meta(node, maple_leaf_64, 0, slot);
/* swap the new root into the tree */
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
return slot;
}
static inline void mas_store_root(struct ma_state *mas, void *entry)
{
if (likely((mas->last != 0) || (mas->index != 0)))
mas_root_expand(mas, entry);
else if (((unsigned long) (entry) & 3) == 2)
mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->node = MAS_START;
}
}
/*
* mas_is_span_wr() - Check if the write needs to be treated as a write that
* spans the node.
* @mas: The maple state
* @piv: The pivot value being written
* @type: The maple node type
* @entry: The data to write
*
* Spanning writes are writes that start in one node and end in another OR if
* the write of a %NULL will cause the node to end with a %NULL.
*
* Return: True if this is a spanning write, false otherwise.
*/
static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
{
unsigned long max = wr_mas->r_max;
unsigned long last = wr_mas->mas->last;
enum maple_type type = wr_mas->type;
void *entry = wr_mas->entry;
/* Contained in this pivot, fast path */
if (last < max)
return false;
if (ma_is_leaf(type)) {
max = wr_mas->mas->max;
if (last < max)
return false;
}
if (last == max) {
/*
* The last entry of leaf node cannot be NULL unless it is the
* rightmost node (writing ULONG_MAX), otherwise it spans slots.
*/
if (entry || last == ULONG_MAX)
return false;
}
trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
return true;
}
static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
{
wr_mas->type = mte_node_type(wr_mas->mas->node);
mas_wr_node_walk(wr_mas);
wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
}
static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
{
wr_mas->mas->max = wr_mas->r_max;
wr_mas->mas->min = wr_mas->r_min;
wr_mas->mas->node = wr_mas->content;
wr_mas->mas->offset = 0;
wr_mas->mas->depth++;
}
/*
* mas_wr_walk() - Walk the tree for a write.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
*
* Return: True if it's contained in a node, false on spanning write.
*/
static bool mas_wr_walk(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
while (true) {
mas_wr_walk_descend(wr_mas);
if (unlikely(mas_is_span_wr(wr_mas)))
return false;
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
mas_wr_walk_traverse(wr_mas);
}
return true;
}
static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
while (true) {
mas_wr_walk_descend(wr_mas);
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
mas_wr_walk_traverse(wr_mas);
}
return true;
}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
* @l_wr_mas: The left maple write state
* @r_wr_mas: The right maple write state
*/
static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
struct ma_wr_state *r_wr_mas)
{
struct ma_state *r_mas = r_wr_mas->mas;
struct ma_state *l_mas = l_wr_mas->mas;
unsigned char l_slot;
l_slot = l_mas->offset;
if (!l_wr_mas->content)
l_mas->index = l_wr_mas->r_min;
if ((l_mas->index == l_wr_mas->r_min) &&
(l_slot &&
!mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
if (l_slot > 1)
l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
else
l_mas->index = l_mas->min;
l_mas->offset = l_slot - 1;
}
if (!r_wr_mas->content) {
if (r_mas->last < r_wr_mas->r_max)
r_mas->last = r_wr_mas->r_max;
r_mas->offset++;
} else if ((r_mas->last == r_wr_mas->r_max) &&
(r_mas->last < r_mas->max) &&
!mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
r_wr_mas->type, r_mas->offset + 1);
r_mas->offset++;
}
}
static inline void *mas_state_walk(struct ma_state *mas)
{
void *entry;
entry = mas_start(mas);
if (mas_is_none(mas))
return NULL;
if (mas_is_ptr(mas))
return entry;
return mtree_range_walk(mas);
}
/*
* mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
* to date.
*
* @mas: The maple state.
*
* Note: Leaves mas in undesirable state.
* Return: The entry for @mas->index or %NULL on dead node.
*/
static inline void *mtree_lookup_walk(struct ma_state *mas)
{
unsigned long *pivots;
unsigned char offset;
struct maple_node *node;
struct maple_enode *next;
enum maple_type type;
void __rcu **slots;
unsigned char end;
unsigned long max;
next = mas->node;
max = ULONG_MAX;
do {
offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
if (unlikely(ma_dead_node(node)))
goto dead_node;
do {
if (pivots[offset] >= mas->index) {
max = pivots[offset];
break;
}
} while (++offset < end);
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
if (unlikely(ma_dead_node(node)))
goto dead_node;
} while (!ma_is_leaf(type));
return (void *)next;
dead_node:
mas_reset(mas);
return NULL;
}
static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
/*
* mas_new_root() - Create a new root node that only contains the entry passed
* in.
* @mas: The maple state
* @entry: The entry to store.
*
* Only valid when the index == 0 and the last == ULONG_MAX
*
* Return 0 on error, 1 on success.
*/
static inline int mas_new_root(struct ma_state *mas, void *entry)
{
struct maple_enode *root = mas_root_locked(mas);
enum maple_type type = maple_leaf_64;
struct maple_node *node;
void __rcu **slots;
unsigned long *pivots;
if (!entry && !mas->index && mas->last == ULONG_MAX) {
mas->depth = 0;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, entry);
mas->node = MAS_START;
goto done;
}
mas_node_count(mas, 1);
if (mas_is_err(mas))
return 0;
node = mas_pop_node(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
mas->depth = 1;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
done:
if (xa_is_node(root))
mte_destroy_walk(root, mas->tree);
return 1;
}
/*
* mas_wr_spanning_store() - Create a subtree with the store operation completed
* and new nodes where necessary, then place the sub-tree in the actual tree.
* Note that mas is expected to point to the node which caused the store to
* span.
* @wr_mas: The maple write state
*
* Return: 0 on error, positive on success.
*/
static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
{
struct maple_subtree_state mast;
struct maple_big_node b_node;
struct ma_state *mas;
unsigned char height;
/* Left and Right side of spanning store */
MA_STATE(l_mas, NULL, 0, 0);
MA_STATE(r_mas, NULL, 0, 0);
MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
/*
* A store operation that spans multiple nodes is called a spanning
* store and is handled early in the store call stack by the function
* mas_is_span_wr(). When a spanning store is identified, the maple
* state is duplicated. The first maple state walks the left tree path
* to ``index``, the duplicate walks the right tree path to ``last``.
* The data in the two nodes are combined into a single node, two nodes,
* or possibly three nodes (see the 3-way split above). A ``NULL``
* written to the last entry of a node is considered a spanning store as
* a rebalance is required for the operation to complete and an overflow
* of data may happen.
*/
mas = wr_mas->mas;
trace_ma_op(__func__, mas);
if (unlikely(!mas->index && mas->last == ULONG_MAX))
return mas_new_root(mas, wr_mas->entry);
/*
* Node rebalancing may occur due to this store, so there may be three new
* entries per level plus a new root.
*/
height = mas_mt_height(mas);
mas_node_count(mas, 1 + height * 3);
if (mas_is_err(mas))
return 0;
/*
* Set up right side. Need to get to the next offset after the spanning
* store to ensure it's not NULL and to combine both the next node and
* the node with the start together.
*/
r_mas = *mas;
/* Avoid overflow, walk to next slot in the tree. */
if (r_mas.last + 1)
r_mas.last++;
r_mas.index = r_mas.last;
mas_wr_walk_index(&r_wr_mas);
r_mas.last = r_mas.index = mas->last;
/* Set up left side. */
l_mas = *mas;
mas_wr_walk_index(&l_wr_mas);
if (!wr_mas->entry) {
mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
mas->offset = l_mas.offset;
mas->index = l_mas.index;
mas->last = l_mas.last = r_mas.last;
}
/* expanding NULLs may make this cover the entire range */
if (!l_mas.index && r_mas.last == ULONG_MAX) {
mas_set_range(mas, 0, ULONG_MAX);
return mas_new_root(mas, wr_mas->entry);
}
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
/* Copy r_mas into b_node. */
if (r_mas.offset <= r_wr_mas.node_end)
mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
&b_node, b_node.b_end + 1);
else
b_node.b_end++;
/* Stop spanning searches by searching for just index. */
l_mas.index = l_mas.last = mas->index;
mast.bn = &b_node;
mast.orig_l = &l_mas;
mast.orig_r = &r_mas;
/* Combine l_mas and r_mas and split them up evenly again. */
return mas_spanning_rebalance(mas, &mast, height + 1);
}
/*
* mas_wr_node_store() - Attempt to store the value in a node
* @wr_mas: The maple write state
*
* Attempts to reuse the node, but may allocate.
*
* Return: True if stored, false otherwise
*/
static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas = wr_mas->mas;
void __rcu **dst_slots;
unsigned long *dst_pivots;
unsigned char dst_offset, offset_end = wr_mas->offset_end;
struct maple_node reuse, *newnode;
unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
bool in_rcu = mt_in_rcu(mas->tree);
/* Check if there is enough data. The room is enough. */
if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
!(mas->mas_flags & MA_STATE_BULK))
return false;
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
/* set up node. */
if (in_rcu) {
mas_node_count(mas, 1);
if (mas_is_err(mas))
return false;
newnode = mas_pop_node(mas);
} else {
memset(&reuse, 0, sizeof(struct maple_node));
newnode = &reuse;
}
newnode->parent = mas_mn(mas)->parent;
dst_pivots = ma_pivots(newnode, wr_mas->type);
dst_slots = ma_slots(newnode, wr_mas->type);
/* Copy from start to insert point */
memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
/* Handle insert of new range starting after old range */
if (wr_mas->r_min < mas->index) {
rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
dst_pivots[mas->offset++] = mas->index - 1;
}
/* Store the new entry and range end. */
if (mas->offset < node_pivots)
dst_pivots[mas->offset] = mas->last;
rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
/*
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
if (offset_end > wr_mas->node_end)
goto done;
dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
copy_size = wr_mas->node_end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size);
memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
sizeof(unsigned long) * (copy_size - 1));
if (new_end < node_pivots)
dst_pivots[new_end] = mas->max;
done:
mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
if (in_rcu) {
struct maple_enode *old_enode = mas->node;
mas->node = mt_mk_node(newnode, wr_mas->type);
mas_replace_node(mas, old_enode);
} else {
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
return true;
}
/*
* mas_wr_slot_store: Attempt to store a value in a slot.
* @wr_mas: the maple write state
*
* Return: True if stored, false otherwise
*/
static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char offset = mas->offset;
void __rcu **slots = wr_mas->slots;
bool gap = false;
gap |= !mt_slot_locked(mas->tree, slots, offset);
gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
if (wr_mas->offset_end - offset == 1) {
if (mas->index == wr_mas->r_min) {
/* Overwriting the range and a part of the next one */
rcu_assign_pointer(slots[offset], wr_mas->entry);
wr_mas->pivots[offset] = mas->last;
} else {
/* Overwriting a part of the range and the next one */
rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
wr_mas->pivots[offset] = mas->index - 1;
mas->offset++; /* Keep mas accurate. */
}
} else if (!mt_in_rcu(mas->tree)) {
/*
* Expand the range, only partially overwriting the previous and
* next ranges
*/
gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
wr_mas->pivots[offset] = mas->index - 1;
wr_mas->pivots[offset + 1] = mas->last;
mas->offset++; /* Keep mas accurate. */
} else {
return false;
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
/*
* Only update gap when the new entry is empty or there is an empty
* entry in the original two ranges.
*/
if (!wr_mas->entry || gap)
mas_update_gap(mas);
return true;
}
static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
if (!wr_mas->slots[wr_mas->offset_end]) {
/* If this one is null, the next and prev are not */
mas->last = wr_mas->end_piv;
} else {
/* Check next slot(s) if we are overwriting the end */
if ((mas->last == wr_mas->end_piv) &&
(wr_mas->node_end != wr_mas->offset_end) &&
!wr_mas->slots[wr_mas->offset_end + 1]) {
wr_mas->offset_end++;
if (wr_mas->offset_end == wr_mas->node_end)
mas->last = mas->max;
else
mas->last = wr_mas->pivots[wr_mas->offset_end];
wr_mas->end_piv = mas->last;
}
}
if (!wr_mas->content) {
/* If this one is null, the next and prev are not */
mas->index = wr_mas->r_min;
} else {
/* Check prev slot if we are overwriting the start */
if (mas->index == wr_mas->r_min && mas->offset &&
!wr_mas->slots[mas->offset - 1]) {
mas->offset--;
wr_mas->r_min = mas->index =
mas_safe_min(mas, wr_mas->pivots, mas->offset);
wr_mas->r_max = wr_mas->pivots[mas->offset];
}
}
}
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
while ((wr_mas->offset_end < wr_mas->node_end) &&
(wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
wr_mas->offset_end++;
if (wr_mas->offset_end < wr_mas->node_end)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
if (!wr_mas->entry)
mas_wr_extend_null(wr_mas);
}
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char new_end = wr_mas->node_end + 2;
new_end -= wr_mas->offset_end - mas->offset;
if (wr_mas->r_min == mas->index)
new_end--;
if (wr_mas->end_piv == mas->last)
new_end--;
return new_end;
}
/*
* mas_wr_append: Attempt to append
* @wr_mas: the maple write state
* @new_end: The end of the node after the modification
*
* This is currently unsafe in rcu mode since the end of the node may be cached
* by readers while the node contents may be updated which could result in
* inaccurate information.
*
* Return: True if appended, false otherwise
*/
static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
unsigned char new_end)
{
struct ma_state *mas;
void __rcu **slots;
unsigned char end;
mas = wr_mas->mas;
if (mt_in_rcu(mas->tree))
return false;
if (mas->offset != wr_mas->node_end)
return false;
end = wr_mas->node_end;
if (mas->offset != end)
return false;
if (new_end < mt_pivots[wr_mas->type]) {
wr_mas->pivots[new_end] = wr_mas->pivots[end];
ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
}
slots = wr_mas->slots;
if (new_end == end + 1) {
if (mas->last == wr_mas->r_max) {
/* Append to end of range */
rcu_assign_pointer(slots[new_end], wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1;
mas->offset = new_end;
} else {
/* Append to start of range */
rcu_assign_pointer(slots[new_end], wr_mas->content);
wr_mas->pivots[end] = mas->last;
rcu_assign_pointer(slots[end], wr_mas->entry);
}
} else {
/* Append to the range without touching any boundaries. */
rcu_assign_pointer(slots[new_end], wr_mas->content);
wr_mas->pivots[end + 1] = mas->last;
rcu_assign_pointer(slots[end + 1], wr_mas->entry);
wr_mas->pivots[end] = mas->index - 1;
mas->offset = end + 1;
}
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return true;
}
/*
* mas_wr_bnode() - Slow path for a modification.
* @wr_mas: The write maple state
*
* This is where split, rebalance end up.
*/
static void mas_wr_bnode(struct ma_wr_state *wr_mas)
{
struct maple_big_node b_node;
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
}
static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
unsigned char new_end;
/* Direct replacement */
if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
if (!!wr_mas->entry ^ !!wr_mas->content)
mas_update_gap(mas);
return;
}
/*
* new_end exceeds the size of the maple node and cannot enter the fast
* path.
*/
new_end = mas_wr_new_end(wr_mas);
if (new_end >= mt_slots[wr_mas->type])
goto slow_path;
/* Attempt to append */
if (mas_wr_append(wr_mas, new_end))
return;
if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
return;
if (mas_wr_node_store(wr_mas, new_end))
return;
if (mas_is_err(mas))
return;
slow_path:
mas_wr_bnode(wr_mas);
}
/*
* mas_wr_store_entry() - Internal call to store a value
* @mas: The maple state
* @entry: The entry to store.
*
* Return: The contents that was stored at the index.
*/
static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
wr_mas->content = mas_start(mas);
if (mas_is_none(mas) || mas_is_ptr(mas)) {
mas_store_root(mas, wr_mas->entry);
return wr_mas->content;
}
if (unlikely(!mas_wr_walk(wr_mas))) {
mas_wr_spanning_store(wr_mas);
return wr_mas->content;
}
/* At this point, we are at the leaf node that needs to be altered. */
mas_wr_end_piv(wr_mas);
/* New root for a single pointer */
if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
mas_new_root(mas, wr_mas->entry);
return wr_mas->content;
}
mas_wr_modify(wr_mas);
return wr_mas->content;
}
/**
* mas_insert() - Internal call to insert a value
* @mas: The maple state
* @entry: The entry to store
*
* Return: %NULL or the contents that already exists at the requested index
* otherwise. The maple state needs to be checked for error conditions.
*/
static inline void *mas_insert(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
/*
* Inserting a new range inserts either 0, 1, or 2 pivots within the
* tree. If the insert fits exactly into an existing gap with a value
* of NULL, then the slot only needs to be written with the new value.
* If the range being inserted is adjacent to another range, then only a
* single pivot needs to be inserted (as well as writing the entry). If
* the new range is within a gap but does not touch any other ranges,
* then two pivots need to be inserted: the start - 1, and the end. As
* usual, the entry must be written. Most operations require a new node
* to be allocated and replace an existing node to ensure RCU safety,
* when in RCU mode. The exception to requiring a newly allocated node
* is when inserting at the end of a node (appending). When done
* carefully, appending can reuse the node in place.
*/
wr_mas.content = mas_start(mas);
if (wr_mas.content)
goto exists;
if (mas_is_none(mas) || mas_is_ptr(mas)) {
mas_store_root(mas, entry);
return NULL;
}
/* spanning writes always overwrite something */
if (!mas_wr_walk(&wr_mas))
goto exists;
/* At this point, we are at the leaf node that needs to be altered. */
wr_mas.offset_end = mas->offset;
wr_mas.end_piv = wr_mas.r_max;
if (wr_mas.content || (mas->last > wr_mas.r_max))
goto exists;
if (!entry)
return NULL;
mas_wr_modify(&wr_mas);
return wr_mas.content;
exists:
mas_set_err(mas, -EEXIST);
return wr_mas.content;
}
static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
mas_set(mas, index);
mas_state_walk(mas);
if (mas_is_start(mas))
goto retry;
}
static inline bool mas_rewalk_if_dead(struct ma_state *mas,
struct maple_node *node, const unsigned long index)
{
if (unlikely(ma_dead_node(node))) {
mas_rewalk(mas, index);
return true;
}
return false;
}
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
* tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
* @mas: The maple state
* @min: The lower limit to search
*
* The prev node value will be mas->node[mas->offset] or MAS_NONE.
* Return: 1 if the node is dead, 0 otherwise.
*/
static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
{
enum maple_type mt;
int offset, level;
void __rcu **slots;
struct maple_node *node;
unsigned long *pivots;
unsigned long max;
node = mas_mn(mas);
if (!mas->min)
goto no_entry;
max = mas->min - 1;
if (max < min)
goto no_entry;
level = 0;
do {
if (ma_is_root(node))
goto no_entry;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
return 1;
offset = mas->offset;
level++;
node = mas_mn(mas);
} while (!offset);
offset--;
mt = mte_node_type(mas->node);
while (level > 1) {
level--;
slots = ma_slots(node, mt);
mas->node = mas_slot(mas, slots, offset);
if (unlikely(ma_dead_node(node)))
return 1;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
pivots = ma_pivots(node, mt);
offset = ma_data_end(node, mt, pivots, max);
if (unlikely(ma_dead_node(node)))
return 1;
}
slots = ma_slots(node, mt);
mas->node = mas_slot(mas, slots, offset);
pivots = ma_pivots(node, mt);
if (unlikely(ma_dead_node(node)))
return 1;
if (likely(offset))
mas->min = pivots[offset - 1] + 1;
mas->max = max;
mas->offset = mas_data_end(mas);
if (unlikely(mte_dead_node(mas->node)))
return 1;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
mas->node = MAS_NONE;
return 0;
}
/*
* mas_prev_slot() - Get the entry in the previous slot
*
* @mas: The maple state
* @max: The minimum starting range
*
* Return: The entry in the previous slot which is possibly NULL
*/
static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
{
void *entry;
void __rcu **slots;
unsigned long pivot;
enum maple_type type;
unsigned long *pivots;
struct maple_node *node;
unsigned long save_point = mas->index;
retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
again:
if (mas->min <= min) {
pivot = mas_safe_min(mas, pivots, mas->offset);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (pivot <= min)
return NULL;
}
if (likely(mas->offset)) {
mas->offset--;
mas->last = mas->index - 1;
mas->index = mas_safe_min(mas, pivots, mas->offset);
} else {
if (mas_prev_node(mas, min)) {
mas_rewalk(mas, save_point);
goto retry;
}
if (mas_is_none(mas))
return NULL;
mas->last = mas->max;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
mas->index = pivots[mas->offset - 1] + 1;
}
slots = ma_slots(node, type);
entry = mas_slot(mas, slots, mas->offset);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (likely(entry))
return entry;
if (!empty)
goto again;
return entry;
}
/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
* @max: The maximum pivot value to check.
*
* The next value will be mas->node[mas->offset] or MAS_NONE.
* Return: 1 on dead node, 0 otherwise.
*/
static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
unsigned long max)
{
unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
int level = 0;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
if (mas->max >= max)
goto no_entry;
min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node))
goto no_entry;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
return 1;
level++;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
pivots = ma_pivots(node, mt);
node_end = ma_data_end(node, mt, pivots, mas->max);
if (unlikely(ma_dead_node(node)))
return 1;
} while (unlikely(mas->offset == node_end));
slots = ma_slots(node, mt);
mas->offset++;
enode = mas_slot(mas, slots, mas->offset);
if (unlikely(ma_dead_node(node)))
return 1;
if (level > 1)
mas->offset = 0;
while (unlikely(level > 1)) {
level--;
mas->node = enode;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt);
enode = mas_slot(mas, slots, 0);
if (unlikely(ma_dead_node(node)))
return 1;
}
if (!mas->offset)
pivots = ma_pivots(node, mt);
mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
if (unlikely(ma_dead_node(node)))
return 1;
mas->node = enode;
mas->min = min;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
mas->node = MAS_NONE;
return 0;
}
/*
* mas_next_slot() - Get the entry in the next slot
*
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
*
* Return: The entry in the next slot which is possibly NULL
*/
static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
void __rcu **slots;
unsigned long *pivots;
unsigned long pivot;
enum maple_type type;
struct maple_node *node;
unsigned char data_end;
unsigned long save_point = mas->last;
void *entry;
retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
data_end = ma_data_end(node, type, pivots, mas->max);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
again:
if (mas->max >= max) {
if (likely(mas->offset < data_end))
pivot = pivots[mas->offset];
else
return NULL; /* must be mas->max */
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (pivot >= max)
return NULL;
}
if (likely(mas->offset < data_end)) {
mas->index = pivots[mas->offset] + 1;
mas->offset++;
if (likely(mas->offset < data_end))
mas->last = pivots[mas->offset];
else
mas->last = mas->max;
} else {
if (mas_next_node(mas, node, max)) {
mas_rewalk(mas, save_point);
goto retry;
}
if (mas_is_none(mas))
return NULL;
mas->offset = 0;
mas->index = mas->min;
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
mas->last = pivots[0];
}
slots = ma_slots(node, type);
entry = mt_slot(mas->tree, slots, mas->offset);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (entry)
return entry;
if (!empty) {
if (!mas->offset)
data_end = 2;
goto again;
}
return entry;
}
/*
* mas_next_entry() - Internal function to get the next entry.
* @mas: The maple state
* @limit: The maximum range start.
*
* Set the @mas->node to the next entry and the range_start to
* the beginning value for the entry. Does not check beyond @limit.
* Sets @mas->index and @mas->last to the limit if it is hit.
* Restarts on dead nodes.
*
* Return: the next entry or %NULL.
*/
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
if (mas->last >= limit)
return NULL;
return mas_next_slot(mas, limit, false);
}
/*
* mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
* highest gap address of a given size in a given node and descend.
* @mas: The maple state
* @size: The needed size.
*
* Return: True if found in a leaf, false otherwise.
*
*/
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
unsigned long *gap_min, unsigned long *gap_max)
{
enum maple_type type = mte_node_type(mas->node);
struct maple_node *node = mas_mn(mas);
unsigned long *pivots, *gaps;
void __rcu **slots;
unsigned long gap = 0;
unsigned long max, min;
unsigned char offset;
if (unlikely(mas_is_err(mas)))
return true;
if (ma_is_dense(type)) {
/* dense nodes. */
mas->offset = (unsigned char)(mas->index - mas->min);
return true;
}
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
gaps = ma_gaps(node, type);
offset = mas->offset;
min = mas_safe_min(mas, pivots, offset);
/* Skip out of bounds. */
while (mas->last < min)
min = mas_safe_min(mas, pivots, --offset);
max = mas_safe_pivot(mas, pivots, offset, type);
while (mas->index <= max) {
gap = 0;
if (gaps)
gap = gaps[offset];
else if (!mas_slot(mas, slots, offset))
gap = max - min + 1;
if (gap) {
if ((size <= gap) && (size <= mas->last - min + 1))
break;
if (!gaps) {
/* Skip the next slot, it cannot be a gap. */
if (offset < 2)
goto ascend;
offset -= 2;
max = pivots[offset];
min = mas_safe_min(mas, pivots, offset);
continue;
}
}
if (!offset)
goto ascend;
offset--;
max = min - 1;
min = mas_safe_min(mas, pivots, offset);
}
if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
goto no_space;
if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
*gap_min = min;
*gap_max = min + gap - 1;
return true;
}
/* descend, only happens under lock. */
mas->node = mas_slot(mas, slots, offset);
mas->min = min;
mas->max = max;
mas->offset = mas_data_end(mas);
return false;
ascend:
if (!mte_is_root(mas->node))
return false;
no_space:
mas_set_err(mas, -EBUSY);
return false;
}
static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
{
enum maple_type type = mte_node_type(mas->node);
unsigned long pivot, min, gap = 0;
unsigned char offset, data_end;
unsigned long *gaps, *pivots;
void __rcu **slots;
struct maple_node *node;
bool found = false;
if (ma_is_dense(type)) {
mas->offset = (unsigned char)(mas->index - mas->min);
return true;
}
node = mas_mn(mas);
pivots = ma_pivots(node, type);
slots = ma_slots(node, type);
gaps = ma_gaps(node, type);
offset = mas->offset;
min = mas_safe_min(mas, pivots, offset);
data_end = ma_data_end(node, type, pivots, mas->max);
for (; offset <= data_end; offset++) {
pivot = mas_safe_pivot(mas, pivots, offset, type);
/* Not within lower bounds */
if (mas->index > pivot)
goto next_slot;
if (gaps)
gap = gaps[offset];
else if (!mas_slot(mas, slots, offset))
gap = min(pivot, mas->last) - max(mas->index, min) + 1;
else
goto next_slot;
if (gap >= size) {
if (ma_is_leaf(type)) {
found = true;
goto done;
}
if (mas->index <= pivot) {
mas->node = mas_slot(mas, slots, offset);
mas->min = min;
mas->max = pivot;
offset = 0;
break;
}
}
next_slot:
min = pivot + 1;
if (mas->last <= pivot) {
mas_set_err(mas, -EBUSY);
return true;
}
}
if (mte_is_root(mas->node))
found = true;
done:
mas->offset = offset;
return found;
}
/**
* mas_walk() - Search for @mas->index in the tree.
* @mas: The maple state.
*
* mas->index and mas->last will be set to the range if there is a value. If
* mas->node is MAS_NONE, reset to MAS_START.
*
* Return: the entry at the location or %NULL.
*/
void *mas_walk(struct ma_state *mas)
{
void *entry;
if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
mas->node = MAS_START;
retry:
entry = mas_state_walk(mas);
if (mas_is_start(mas)) {
goto retry;
} else if (mas_is_none(mas)) {
mas->index = 0;
mas->last = ULONG_MAX;
} else if (mas_is_ptr(mas)) {
if (!mas->index) {
mas->last = 0;
return entry;
}
mas->index = 1;
mas->last = ULONG_MAX;
mas->node = MAS_NONE;
return NULL;
}
return entry;
}
EXPORT_SYMBOL_GPL(mas_walk);
static inline bool mas_rewind_node(struct ma_state *mas)
{
unsigned char slot;
do {
if (mte_is_root(mas->node)) {
slot = mas->offset;
if (!slot)
return false;
} else {
mas_ascend(mas);
slot = mas->offset;
}
} while (!slot);
mas->offset = --slot;
return true;
}
/*
* mas_skip_node() - Internal function. Skip over a node.
* @mas: The maple state.
*
* Return: true if there is another node, false otherwise.
*/
static inline bool mas_skip_node(struct ma_state *mas)
{
if (mas_is_err(mas))
return false;
do {
if (mte_is_root(mas->node)) {
if (mas->offset >= mas_data_end(mas)) {
mas_set_err(mas, -EBUSY);
return false;
}
} else {
mas_ascend(mas);
}
} while (mas->offset >= mas_data_end(mas));
mas->offset++;
return true;
}
/*
* mas_awalk() - Allocation walk. Search from low address to high, for a gap of
* @size
* @mas: The maple state
* @size: The size of the gap required
*
* Search between @mas->index and @mas->last for a gap of @size.
*/
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
{
struct maple_enode *last = NULL;
/*
* There are 4 options:
* go to child (descend)
* go back to parent (ascend)
* no gap found. (return, slot == MAPLE_NODE_SLOTS)
* found the gap. (return, slot != MAPLE_NODE_SLOTS)
*/
while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
if (last == mas->node)
mas_skip_node(mas);
else
last = mas->node;
}
}
/*
* mas_sparse_area() - Internal function. Return upper or lower limit when
* searching for a gap in an empty tree.
* @mas: The maple state
* @min: the minimum range
* @max: The maximum range
* @size: The size of the gap
* @fwd: Searching forward or back
*/
static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size, bool fwd)
{
if (!unlikely(mas_is_none(mas)) && min == 0) {
min++;
/*
* At this time, min is increased, we need to recheck whether
* the size is satisfied.
*/
if (min > max || max - min + 1 < size)
return -EBUSY;
}
/* mas_is_ptr */
if (fwd) {
mas->index = min;
mas->last = min + size - 1;
} else {
mas->last = max;
mas->index = max - size + 1;
}
return 0;
}
/*
* mas_empty_area() - Get the lowest address within the range that is
* sufficient for the size requested.
* @mas: The maple state
* @min: The lowest value of the range
* @max: The highest value of the range
* @size: The size needed
*/
int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
unsigned char offset;
unsigned long *pivots;
enum maple_type mt;
if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas))
mas_start(mas);
else if (mas->offset >= 2)
mas->offset -= 2;
else if (!mas_skip_node(mas))
return -EBUSY;
/* Empty set */
if (mas_is_none(mas) || mas_is_ptr(mas))
return mas_sparse_area(mas, min, max, size, true);
/* The start of the window can only be within these values */
mas->index = min;
mas->last = max;
mas_awalk(mas, size);
if (unlikely(mas_is_err(mas)))
return xa_err(mas->node);
offset = mas->offset;
if (unlikely(offset == MAPLE_NODE_SLOTS))
return -EBUSY;
mt = mte_node_type(mas->node);
pivots = ma_pivots(mas_mn(mas), mt);
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area);
/*
* mas_empty_area_rev() - Get the highest address within the range that is
* sufficient for the size requested.
* @mas: The maple state
* @min: The lowest value of the range
* @max: The highest value of the range
* @size: The size needed
*/
int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
unsigned long max, unsigned long size)
{
struct maple_enode *last = mas->node;
if (min > max)
return -EINVAL;
if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas)) {
mas_start(mas);
mas->offset = mas_data_end(mas);
} else if (mas->offset >= 2) {
mas->offset -= 2;
} else if (!mas_rewind_node(mas)) {
return -EBUSY;
}
/* Empty set. */
if (mas_is_none(mas) || mas_is_ptr(mas))
return mas_sparse_area(mas, min, max, size, false);
/* The start of the window can only be within these values. */
mas->index = min;
mas->last = max;
while (!mas_rev_awalk(mas, size, &min, &max)) {
if (last == mas->node) {
if (!mas_rewind_node(mas))
return -EBUSY;
} else {
last = mas->node;
}
}
if (mas_is_err(mas))
return xa_err(mas->node);
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
return -EBUSY;
/* Trim the upper limit to the max. */
if (max < mas->last)
mas->last = max;
mas->index = mas->last - size + 1;
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
/*
* mte_dead_leaves() - Mark all leaves of a node as dead.
* @mas: The maple state
* @slots: Pointer to the slot array
* @type: The maple node type
*
* Must hold the write lock.
*
* Return: The number of leaves marked as dead.
*/
static inline
unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
void __rcu **slots)
{
struct maple_node *node;
enum maple_type type;
void *entry;
int offset;
for (offset = 0; offset < mt_slot_count(enode); offset++) {
entry = mt_slot(mt, slots, offset);
type = mte_node_type(entry);
node = mte_to_node(entry);
/* Use both node and type to catch LE & BE metadata */
if (!node || !type)
break;
mte_set_node_dead(entry);
node->type = type;
rcu_assign_pointer(slots[offset], node);
}
return offset;
}
/**
* mte_dead_walk() - Walk down a dead tree to just before the leaves
* @enode: The maple encoded node
* @offset: The starting offset
*
* Note: This can only be used from the RCU callback context.
*/
static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
{
struct maple_node *node, *next;
void __rcu **slots = NULL;
next = mte_to_node(*enode);
do {
*enode = ma_enode_ptr(next);
node = mte_to_node(*enode);
slots = ma_slots(node, node->type);
next = rcu_dereference_protected(slots[offset],
lock_is_held(&rcu_callback_map));
offset = 0;
} while (!ma_is_leaf(next->type));
return slots;
}
/**
* mt_free_walk() - Walk & free a tree in the RCU callback context
* @head: The RCU head that's within the node.
*
* Note: This can only be used from the RCU callback context.
*/
static void mt_free_walk(struct rcu_head *head)
{
void __rcu **slots;
struct maple_node *node, *start;
struct maple_enode *enode;
unsigned char offset;
enum maple_type type;
node = container_of(head, struct maple_node, rcu);
if (ma_is_leaf(node->type))
goto free_leaf;
start = node;
enode = mt_mk_node(node, node->type);
slots = mte_dead_walk(&enode, 0);
node = mte_to_node(enode);
do {
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
enode = node->piv_parent;
if (mte_to_node(enode) == node)
goto free_leaf;
type = mte_node_type(enode);
slots = ma_slots(mte_to_node(enode), type);
if ((offset < mt_slots[type]) &&
rcu_dereference_protected(slots[offset],
lock_is_held(&rcu_callback_map)))
slots = mte_dead_walk(&enode, offset);
node = mte_to_node(enode);
} while ((node != start) || (node->slot_len < offset));
slots = ma_slots(node, node->type);
mt_free_bulk(node->slot_len, slots);
free_leaf:
mt_free_rcu(&node->rcu);
}
static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
{
struct maple_node *node;
struct maple_enode *next = *enode;
void __rcu **slots = NULL;
enum maple_type type;
unsigned char next_offset = 0;
do {
*enode = next;
node = mte_to_node(*enode);
type = mte_node_type(*enode);
slots = ma_slots(node, type);
next = mt_slot_locked(mt, slots, next_offset);
if ((mte_dead_node(next)))
next = mt_slot_locked(mt, slots, ++next_offset);
mte_set_node_dead(*enode);
node->type = type;
node->piv_parent = prev;
node->parent_slot = offset;
offset = next_offset;
next_offset = 0;
prev = *enode;
} while (!mte_is_leaf(next));
return slots;
}
static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
bool free)
{
void __rcu **slots;
struct maple_node *node = mte_to_node(enode);
struct maple_enode *start;
if (mte_is_leaf(enode)) {
node->type = mte_node_type(enode);
goto free_leaf;
}
start = enode;
slots = mte_destroy_descend(&enode, mt, start, 0);
node = mte_to_node(enode); // Updated in the above call.
do {
enum maple_type type;
unsigned char offset;
struct maple_enode *parent, *tmp;
node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
offset = node->parent_slot + 1;
enode = node->piv_parent;
if (mte_to_node(enode) == node)
goto free_leaf;
type = mte_node_type(enode);
slots = ma_slots(mte_to_node(enode), type);
if (offset >= mt_slots[type])
goto next;
tmp = mt_slot_locked(mt, slots, offset);
if (mte_node_type(tmp) && mte_to_node(tmp)) {
parent = enode;
enode = tmp;
slots = mte_destroy_descend(&enode, mt, parent, offset);
}
next:
node = mte_to_node(enode);
} while (start != enode);
node = mte_to_node(enode);
node->slot_len = mte_dead_leaves(enode, mt, slots);
if (free)
mt_free_bulk(node->slot_len, slots);
free_leaf:
if (free)
mt_free_rcu(&node->rcu);
else
mt_clear_meta(mt, node, node->type);
}
/*
* mte_destroy_walk() - Free a tree or sub-tree.
* @enode: the encoded maple node (maple_enode) to start
* @mt: the tree to free - needed for node types.
*
* Must hold the write lock.
*/
static inline void mte_destroy_walk(struct maple_enode *enode,
struct maple_tree *mt)
{
struct maple_node *node = mte_to_node(enode);
if (mt_in_rcu(mt)) {
mt_destroy_walk(enode, mt, false);
call_rcu(&node->rcu, mt_free_walk);
} else {
mt_destroy_walk(enode, mt, true);
}
}
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
if (mas_is_start(wr_mas->mas))
return;
if (unlikely(mas_is_paused(wr_mas->mas)))
goto reset;
if (unlikely(mas_is_none(wr_mas->mas)))
goto reset;
/*
* A less strict version of mas_is_span_wr() where we allow spanning
* writes within this node. This is to stop partial walks in
* mas_prealloc() from being reset.
*/
if (wr_mas->mas->last > wr_mas->mas->max)
goto reset;
if (wr_mas->entry)
return;
if (mte_is_leaf(wr_mas->mas->node) &&
wr_mas->mas->last == wr_mas->mas->max)
goto reset;
return;
reset:
mas_reset(wr_mas->mas);
}
/* Interface */
/**
* mas_store() - Store an @entry.
* @mas: The maple state.
* @entry: The entry to store.
*
* The @mas->index and @mas->last is used to set the range for the @entry.
* Note: The @mas should have pre-allocated entries to ensure there is memory to
* store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
*
* Return: the first entry between mas->index and mas->last or %NULL.
*/
void *mas_store(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MAS_WARN_ON(mas, mas->index > mas->last))
pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
if (mas->index > mas->last) {
mas_set_err(mas, -EINVAL);
return NULL;
}
#endif
/*
* Storing is the same operation as insert with the added caveat that it
* can overwrite entries. Although this seems simple enough, one may
* want to examine what happens if a single store operation was to
* overwrite multiple entries within a self-balancing B-Tree.
*/
mas_wr_store_setup(&wr_mas);
mas_wr_store_entry(&wr_mas);
return wr_mas.content;
}
EXPORT_SYMBOL_GPL(mas_store);
/**
* mas_store_gfp() - Store a value into the tree.
* @mas: The maple state
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations if necessary.
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
retry:
mas_wr_store_entry(&wr_mas);
if (unlikely(mas_nomem(mas, gfp)))
goto retry;
if (unlikely(mas_is_err(mas)))
return xa_err(mas->node);
return 0;
}
EXPORT_SYMBOL_GPL(mas_store_gfp);
/**
* mas_store_prealloc() - Store a value into the tree using memory
* preallocated in the maple state.
* @mas: The maple state
* @entry: The entry to store.
*/
void mas_store_prealloc(struct ma_state *mas, void *entry)
{
MA_WR_STATE(wr_mas, mas, entry);
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
mas_destroy(mas);
}
EXPORT_SYMBOL_GPL(mas_store_prealloc);
/**
* mas_preallocate() - Preallocate enough nodes for a store operation
* @mas: The maple state
* @entry: The entry that will be stored
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated.
*/
int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
{
MA_WR_STATE(wr_mas, mas, entry);
unsigned char node_size;
int request = 1;
int ret;
if (unlikely(!mas->index && mas->last == ULONG_MAX))
goto ask_now;
mas_wr_store_setup(&wr_mas);
wr_mas.content = mas_start(mas);
/* Root expand */
if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
goto ask_now;
if (unlikely(!mas_wr_walk(&wr_mas))) {
/* Spanning store, use worst case for now */
request = 1 + mas_mt_height(mas) * 3;
goto ask_now;
}
/* At this point, we are at the leaf node that needs to be altered. */
/* Exact fit, no nodes needed. */
if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
return 0;
mas_wr_end_piv(&wr_mas);
node_size = mas_wr_new_end(&wr_mas);
if (node_size >= mt_slots[wr_mas.type]) {
/* Split, worst case for now. */
request = 1 + mas_mt_height(mas) * 2;
goto ask_now;
}
/* New root needs a singe node */
if (unlikely(mte_is_root(mas->node)))
goto ask_now;
/* Potential spanning rebalance collapsing a node, use worst-case */
if (node_size - 1 <= mt_min_slots[wr_mas.type])
request = mas_mt_height(mas) * 2 - 1;
/* node store, slot store needs one node */
ask_now:
mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
if (likely(!mas_is_err(mas)))
return 0;
mas_set_alloc_req(mas, 0);
ret = xa_err(mas->node);
mas_reset(mas);
mas_destroy(mas);
mas_reset(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_preallocate);
/*
* mas_destroy() - destroy a maple state.
* @mas: The maple state
*
* Upon completion, check the left-most node and rebalance against the node to
* the right if necessary. Frees any allocated nodes associated with this maple
* state.
*/
void mas_destroy(struct ma_state *mas)
{
struct maple_alloc *node;
unsigned long total;
/*
* When using mas_for_each() to insert an expected number of elements,
* it is possible that the number inserted is less than the expected
* number. To fix an invalid final node, a check is performed here to
* rebalance the previous node with the final node.
*/
if (mas->mas_flags & MA_STATE_REBALANCE) {
unsigned char end;
mas_start(mas);
mtree_range_walk(mas);
end = mas_data_end(mas) + 1;
if (end < mt_min_slot_count(mas->node) - 1)
mas_destroy_rebalance(mas, end);
mas->mas_flags &= ~MA_STATE_REBALANCE;
}
mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
total = mas_allocated(mas);
while (total) {
node = mas->alloc;
mas->alloc = node->slot[0];
if (node->node_count > 1) {
size_t count = node->node_count - 1;
mt_free_bulk(count, (void __rcu **)&node->slot[1]);
total -= count;
}
kmem_cache_free(maple_node_cache, node);
total--;
}
mas->alloc = NULL;
}
EXPORT_SYMBOL_GPL(mas_destroy);
/*
* mas_expected_entries() - Set the expected number of entries that will be inserted.
* @mas: The maple state
* @nr_entries: The number of expected entries.
*
* This will attempt to pre-allocate enough nodes to store the expected number
* of entries. The allocations will occur using the bulk allocator interface
* for speed. Please call mas_destroy() on the @mas after inserting the entries
* to ensure any unused nodes are freed.
*
* Return: 0 on success, -ENOMEM if memory could not be allocated.
*/
int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
{
int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
struct maple_enode *enode = mas->node;
int nr_nodes;
int ret;
/*
* Sometimes it is necessary to duplicate a tree to a new tree, such as
* forking a process and duplicating the VMAs from one tree to a new
* tree. When such a situation arises, it is known that the new tree is
* not going to be used until the entire tree is populated. For
* performance reasons, it is best to use a bulk load with RCU disabled.
* This allows for optimistic splitting that favours the left and reuse
* of nodes during the operation.
*/
/* Optimize splitting for bulk insert in-order */
mas->mas_flags |= MA_STATE_BULK;
/*
* Avoid overflow, assume a gap between each entry and a trailing null.
* If this is wrong, it just means allocation can happen during
* insertion of entries.
*/
nr_nodes = max(nr_entries, nr_entries * 2 + 1);
if (!mt_is_alloc(mas->tree))
nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
/* Leaves; reduce slots to keep space for expansion */
nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
mas_node_count(mas, nr_nodes + 3);
/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
if (!mas_is_err(mas))
return 0;
ret = xa_err(mas->node);
mas->node = enode;
mas_destroy(mas);
return ret;
}
EXPORT_SYMBOL_GPL(mas_expected_entries);
static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
bool was_none = mas_is_none(mas);
if (mas_is_none(mas) || mas_is_paused(mas))
mas->node = MAS_START;
if (mas_is_start(mas))
*entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
if (mas_is_ptr(mas)) {
*entry = NULL;
if (was_none && mas->index == 0) {
mas->index = mas->last = 0;
return true;
}
mas->index = 1;
mas->last = ULONG_MAX;
mas->node = MAS_NONE;
return true;
}
if (mas_is_none(mas))
return true;
return false;
}
/**
* mas_next() - Get the next entry.
* @mas: The maple state
* @max: The maximum index to check.
*
* Returns the next entry after @mas->index.
* Must hold rcu_read_lock or the write lock.
* Can return the zero entry.
*
* Return: The next entry or %NULL
*/
void *mas_next(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_next_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_next);
/**
* mas_next_range() - Advance the maple state to the next range
* @mas: The maple state
* @max: The maximum index to check.
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
* Can return the zero entry.
*
* Return: The next entry or %NULL
*/
void *mas_next_range(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_next_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
/**
* mt_next() - get the next value in the maple tree
* @mt: The maple tree
* @index: The start index
* @max: The maximum index to check
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* Return: The entry higher than @index or %NULL if nothing is found.
*/
void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
entry = mas_next(&mas, max);
rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL_GPL(mt_next);
static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
if (mas->index <= min)
goto none;
if (mas_is_none(mas) || mas_is_paused(mas))
mas->node = MAS_START;
if (mas_is_start(mas)) {
mas_walk(mas);
if (!mas->index)
goto none;
}
if (unlikely(mas_is_ptr(mas))) {
if (!mas->index)
goto none;
mas->index = mas->last = 0;
*entry = mas_root(mas);
return true;
}
if (mas_is_none(mas)) {
if (mas->index) {
/* Walked to out-of-range pointer? */
mas->index = mas->last = 0;
mas->node = MAS_ROOT;
*entry = mas_root(mas);
return true;
}
return true;
}
return false;
none:
mas->node = MAS_NONE;
return true;
}
/**
* mas_prev() - Get the previous entry
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
*/
void *mas_prev(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_prev_setup(mas, min, &entry))
return entry;
return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_prev);
/**
* mas_prev_range() - Advance to the previous range
* @mas: The maple state
* @min: The minimum value to check.
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
* Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
*/
void *mas_prev_range(struct ma_state *mas, unsigned long min)
{
void *entry = NULL;
if (mas_prev_setup(mas, min, &entry))
return entry;
return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
/**
* mt_prev() - get the previous value in the maple tree
* @mt: The maple tree
* @index: The start index
* @min: The minimum index to check
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* Return: The entry before @index or %NULL if nothing is found.
*/
void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
entry = mas_prev(&mas, min);
rcu_read_unlock();
return entry;
}
EXPORT_SYMBOL_GPL(mt_prev);
/**
* mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
* @mas: The maple state to pause
*
* Some users need to pause a walk and drop the lock they're holding in
* order to yield to a higher priority thread or carry out an operation
* on an entry. Those users should call this function before they drop
* the lock. It resets the @mas to be suitable for the next iteration
* of the loop after the user has reacquired the lock. If most entries
* found during a walk require you to call mas_pause(), the mt_for_each()
* iterator may be more appropriate.
*
*/
void mas_pause(struct ma_state *mas)
{
mas->node = MAS_PAUSE;
}
EXPORT_SYMBOL_GPL(mas_pause);
/**
* mas_find_setup() - Internal function to set up mas_find*().
* @mas: The maple state
* @max: The maximum index
* @entry: Pointer to the entry
*
* Returns: True if entry is the answer, false otherwise.
*/
static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
*entry = NULL;
if (unlikely(mas_is_none(mas))) {
if (unlikely(mas->last >= max))
return true;
mas->index = mas->last;
mas->node = MAS_START;
} else if (unlikely(mas_is_paused(mas))) {
if (unlikely(mas->last >= max))
return true;
mas->node = MAS_START;
mas->index = ++mas->last;
} else if (unlikely(mas_is_ptr(mas)))
goto ptr_out_of_range;
if (unlikely(mas_is_start(mas))) {
/* First run or continue */
if (mas->index > max)
return true;
*entry = mas_walk(mas);
if (*entry)
return true;
}
if (unlikely(!mas_searchable(mas))) {
if (unlikely(mas_is_ptr(mas)))
goto ptr_out_of_range;
return true;
}
if (mas->index == max)
return true;
return false;
ptr_out_of_range:
mas->node = MAS_NONE;
mas->index = 1;
mas->last = ULONG_MAX;
return true;
}
/**
* mas_find() - On the first call, find the entry at or after mas->index up to
* %max. Otherwise, find the entry after mas->index.
* @mas: The maple state
* @max: The maximum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->node to MAS_NONE.
*
* Return: The entry or %NULL.
*/
void *mas_find(struct ma_state *mas, unsigned long max)
{
void *entry = NULL;
if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_find);
/**
* mas_find_range() - On the first call, find the entry at or after
* mas->index up to %max. Otherwise, advance to the next slot mas->index.
* @mas: The maple state
* @max: The maximum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->node to MAS_NONE.
*
* Return: The entry or %NULL.
*/
void *mas_find_range(struct ma_state *mas, unsigned long max)
{
void *entry;
if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_find_range);
/**
* mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
* @mas: The maple state
* @min: The minimum index
* @entry: Pointer to the entry
*
* Returns: True if entry is the answer, false otherwise.
*/
static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
*entry = NULL;
if (unlikely(mas_is_none(mas))) {
if (mas->index <= min)
goto none;
mas->last = mas->index;
mas->node = MAS_START;
}
if (unlikely(mas_is_paused(mas))) {
if (unlikely(mas->index <= min)) {
mas->node = MAS_NONE;
return true;
}
mas->node = MAS_START;
mas->last = --mas->index;
}
if (unlikely(mas_is_start(mas))) {
/* First run or continue */
if (mas->index < min)
return true;
*entry = mas_walk(mas);
if (*entry)
return true;
}
if (unlikely(!mas_searchable(mas))) {
if (mas_is_ptr(mas))
goto none;
if (mas_is_none(mas)) {
/*
* Walked to the location, and there was nothing so the
* previous location is 0.
*/
mas->last = mas->index = 0;
mas->node = MAS_ROOT;
*entry = mas_root(mas);
return true;
}
}
if (mas->index < min)
return true;
return false;
none:
mas->node = MAS_NONE;
return true;
}
/**
* mas_find_rev: On the first call, find the first non-null entry at or below
* mas->index down to %min. Otherwise find the first non-null entry below
* mas->index down to %min.
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->node to MAS_NONE.
*
* Return: The entry or %NULL.
*/
void *mas_find_rev(struct ma_state *mas, unsigned long min)
{
void *entry;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
/**
* mas_find_range_rev: On the first call, find the first non-null entry at or
* below mas->index down to %min. Otherwise advance to the previous slot after
* mas->index down to %min.
* @mas: The maple state
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
* May set @mas->node to MAS_NONE.
*
* Return: The entry or %NULL.
*/
void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
{
void *entry;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
/**
* mas_erase() - Find the range in which index resides and erase the entire
* range.
* @mas: The maple state
*
* Must hold the write lock.
* Searches for @mas->index, sets @mas->index and @mas->last to the range and
* erases that range.
*
* Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
*/
void *mas_erase(struct ma_state *mas)
{
void *entry;
MA_WR_STATE(wr_mas, mas, NULL);
if (mas_is_none(mas) || mas_is_paused(mas))
mas->node = MAS_START;
/* Retry unnecessary when holding the write lock. */
entry = mas_state_walk(mas);
if (!entry)
return NULL;
write_retry:
/* Must reset to ensure spanning writes of last slot are detected */
mas_reset(mas);
mas_wr_store_setup(&wr_mas);
mas_wr_store_entry(&wr_mas);
if (mas_nomem(mas, GFP_KERNEL))
goto write_retry;
return entry;
}
EXPORT_SYMBOL_GPL(mas_erase);
/**
* mas_nomem() - Check if there was an error allocating and do the allocation
* if necessary If there are allocations, then free them.
* @mas: The maple state
* @gfp: The GFP_FLAGS to use for allocations
* Return: true on allocation, false otherwise.
*/
bool mas_nomem(struct ma_state *mas, gfp_t gfp)
__must_hold(mas->tree->ma_lock)
{
if (likely(mas->node != MA_ERROR(-ENOMEM))) {
mas_destroy(mas);
return false;
}
if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
mtree_unlock(mas->tree);
mas_alloc_nodes(mas, gfp);
mtree_lock(mas->tree);
} else {
mas_alloc_nodes(mas, gfp);
}
if (!mas_allocated(mas))
return false;
mas->node = MAS_START;
return true;
}
void __init maple_tree_init(void)
{
maple_node_cache = kmem_cache_create("maple_node",
sizeof(struct maple_node), sizeof(struct maple_node),
SLAB_PANIC, NULL);
}
/**
* mtree_load() - Load a value stored in a maple tree
* @mt: The maple tree
* @index: The index to load
*
* Return: the entry or %NULL
*/
void *mtree_load(struct maple_tree *mt, unsigned long index)
{
MA_STATE(mas, mt, index, index);
void *entry;
trace_ma_read(__func__, &mas);
rcu_read_lock();
retry:
entry = mas_start(&mas);
if (unlikely(mas_is_none(&mas)))
goto unlock;
if (unlikely(mas_is_ptr(&mas))) {
if (index)
entry = NULL;
goto unlock;
}
entry = mtree_lookup_walk(&mas);
if (!entry && unlikely(mas_is_start(&mas)))
goto retry;
unlock:
rcu_read_unlock();
if (xa_is_zero(entry))
return NULL;
return entry;
}
EXPORT_SYMBOL(mtree_load);
/**
* mtree_store_range() - Store an entry at a given range.
* @mt: The maple tree
* @index: The start of the range
* @last: The end of the range
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mtree_store_range(struct maple_tree *mt, unsigned long index,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(mas, mt, index, last);
MA_WR_STATE(wr_mas, &mas, entry);
trace_ma_write(__func__, &mas, 0, entry);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (index > last)
return -EINVAL;
mtree_lock(mt);
retry:
mas_wr_store_entry(&wr_mas);
if (mas_nomem(&mas, gfp))
goto retry;
mtree_unlock(mt);
if (mas_is_err(&mas))
return xa_err(mas.node);
return 0;
}
EXPORT_SYMBOL(mtree_store_range);
/**
* mtree_store() - Store an entry at a given index.
* @mt: The maple tree
* @index: The index to store the value
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations
*
* Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
* be allocated.
*/
int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
gfp_t gfp)
{
return mtree_store_range(mt, index, index, entry, gfp);
}
EXPORT_SYMBOL(mtree_store);
/**
* mtree_insert_range() - Insert an entry at a given range if there is no value.
* @mt: The maple tree
* @first: The start of the range
* @last: The end of the range
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
* request, -ENOMEM if memory could not be allocated.
*/
int mtree_insert_range(struct maple_tree *mt, unsigned long first,
unsigned long last, void *entry, gfp_t gfp)
{
MA_STATE(ms, mt, first, last);
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (first > last)
return -EINVAL;
mtree_lock(mt);
retry:
mas_insert(&ms, entry);
if (mas_nomem(&ms, gfp))
goto retry;
mtree_unlock(mt);
if (mas_is_err(&ms))
return xa_err(ms.node);
return 0;
}
EXPORT_SYMBOL(mtree_insert_range);
/**
* mtree_insert() - Insert an entry at a given index if there is no value.
* @mt: The maple tree
* @index : The index to store the value
* @entry: The entry to store
* @gfp: The GFP_FLAGS to use for allocations.
*
* Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
* request, -ENOMEM if memory could not be allocated.
*/
int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
gfp_t gfp)
{
return mtree_insert_range(mt, index, index, entry, gfp);
}
EXPORT_SYMBOL(mtree_insert);
int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
mtree_lock(mt);
retry:
ret = mas_empty_area(&mas, min, max, size);
if (ret)
goto unlock;
mas_insert(&mas, entry);
/*
* mas_nomem() may release the lock, causing the allocated area
* to be unavailable, so try to allocate a free area again.
*/
if (mas_nomem(&mas, gfp))
goto retry;
if (mas_is_err(&mas))
ret = xa_err(mas.node);
else
*startp = mas.index;
unlock:
mtree_unlock(mt);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_range);
int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
void *entry, unsigned long size, unsigned long min,
unsigned long max, gfp_t gfp)
{
int ret = 0;
MA_STATE(mas, mt, 0, 0);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
mtree_lock(mt);
retry:
ret = mas_empty_area_rev(&mas, min, max, size);
if (ret)
goto unlock;
mas_insert(&mas, entry);
/*
* mas_nomem() may release the lock, causing the allocated area
* to be unavailable, so try to allocate a free area again.
*/
if (mas_nomem(&mas, gfp))
goto retry;
if (mas_is_err(&mas))
ret = xa_err(mas.node);
else
*startp = mas.index;
unlock:
mtree_unlock(mt);
return ret;
}
EXPORT_SYMBOL(mtree_alloc_rrange);
/**
* mtree_erase() - Find an index and erase the entire range.
* @mt: The maple tree
* @index: The index to erase
*
* Erasing is the same as a walk to an entry then a store of a NULL to that
* ENTIRE range. In fact, it is implemented as such using the advanced API.
*
* Return: The entry stored at the @index or %NULL
*/
void *mtree_erase(struct maple_tree *mt, unsigned long index)
{
void *entry = NULL;
MA_STATE(mas, mt, index, index);
trace_ma_op(__func__, &mas);
mtree_lock(mt);
entry = mas_erase(&mas);
mtree_unlock(mt);
return entry;
}
EXPORT_SYMBOL(mtree_erase);
/**
* __mt_destroy() - Walk and free all nodes of a locked maple tree.
* @mt: The maple tree
*
* Note: Does not handle locking.
*/
void __mt_destroy(struct maple_tree *mt)
{
void *root = mt_root_locked(mt);
rcu_assign_pointer(mt->ma_root, NULL);
if (xa_is_node(root))
mte_destroy_walk(root, mt);
mt->ma_flags = 0;
}
EXPORT_SYMBOL_GPL(__mt_destroy);
/**
* mtree_destroy() - Destroy a maple tree
* @mt: The maple tree
*
* Frees all resources used by the tree. Handles locking.
*/
void mtree_destroy(struct maple_tree *mt)
{
mtree_lock(mt);
__mt_destroy(mt);
mtree_unlock(mt);
}
EXPORT_SYMBOL(mtree_destroy);
/**
* mt_find() - Search from the start up until an entry is found.
* @mt: The maple tree
* @index: Pointer which contains the start location of the search
* @max: The maximum value of the search range
*
* Takes RCU read lock internally to protect the search, which does not
* protect the returned pointer after dropping RCU read lock.
* See also: Documentation/core-api/maple_tree.rst
*
* In case that an entry is found @index is updated to point to the next
* possible entry independent whether the found entry is occupying a
* single index or a range if indices.
*
* Return: The entry at or after the @index or %NULL
*/
void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
{
MA_STATE(mas, mt, *index, *index);
void *entry;
#ifdef CONFIG_DEBUG_MAPLE_TREE
unsigned long copy = *index;
#endif
trace_ma_read(__func__, &mas);
if ((*index) > max)
return NULL;
rcu_read_lock();
retry:
entry = mas_state_walk(&mas);
if (mas_is_start(&mas))
goto retry;
if (unlikely(xa_is_zero(entry)))
entry = NULL;
if (entry)
goto unlock;
while (mas_searchable(&mas) && (mas.last < max)) {
entry = mas_next_entry(&mas, max);
if (likely(entry && !xa_is_zero(entry)))
break;
}
if (unlikely(xa_is_zero(entry)))
entry = NULL;
unlock:
rcu_read_unlock();
if (likely(entry)) {
*index = mas.last + 1;
#ifdef CONFIG_DEBUG_MAPLE_TREE
if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
pr_err("index not increased! %lx <= %lx\n",
*index, copy);
#endif
}
return entry;
}
EXPORT_SYMBOL(mt_find);
/**
* mt_find_after() - Search from the start up until an entry is found.
* @mt: The maple tree
* @index: Pointer which contains the start location of the search
* @max: The maximum value to check
*
* Same as mt_find() except that it checks @index for 0 before
* searching. If @index == 0, the search is aborted. This covers a wrap
* around of @index to 0 in an iterator loop.
*
* Return: The entry at or after the @index or %NULL
*/
void *mt_find_after(struct maple_tree *mt, unsigned long *index,
unsigned long max)
{
if (!(*index))
return NULL;
return mt_find(mt, index, max);
}
EXPORT_SYMBOL(mt_find_after);
#ifdef CONFIG_DEBUG_MAPLE_TREE
atomic_t maple_tree_tests_run;
EXPORT_SYMBOL_GPL(maple_tree_tests_run);
atomic_t maple_tree_tests_passed;
EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
#ifndef __KERNEL__
extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
void mt_set_non_kernel(unsigned int val)
{
kmem_cache_set_non_kernel(maple_node_cache, val);
}
extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
unsigned long mt_get_alloc_size(void)
{
return kmem_cache_get_alloc(maple_node_cache);
}
extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
void mt_zero_nr_tallocated(void)
{
kmem_cache_zero_nr_tallocated(maple_node_cache);
}
extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
unsigned int mt_nr_tallocated(void)
{
return kmem_cache_nr_tallocated(maple_node_cache);
}
extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
unsigned int mt_nr_allocated(void)
{
return kmem_cache_nr_allocated(maple_node_cache);
}
/*
* mas_dead_node() - Check if the maple state is pointing to a dead node.
* @mas: The maple state
* @index: The index to restore in @mas.
*
* Used in test code.
* Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
*/
static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
{
if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
return 0;
if (likely(!mte_dead_node(mas->node)))
return 0;
mas_rewalk(mas, index);
return 1;
}
void mt_cache_shrink(void)
{
}
#else
/*
* mt_cache_shrink() - For testing, don't use this.
*
* Certain testcases can trigger an OOM when combined with other memory
* debugging configuration options. This function is used to reduce the
* possibility of an out of memory even due to kmem_cache objects remaining
* around for longer than usual.
*/
void mt_cache_shrink(void)
{
kmem_cache_shrink(maple_node_cache);
}
EXPORT_SYMBOL_GPL(mt_cache_shrink);
#endif /* not defined __KERNEL__ */
/*
* mas_get_slot() - Get the entry in the maple state node stored at @offset.
* @mas: The maple state
* @offset: The offset into the slot array to fetch.
*
* Return: The entry stored at @offset.
*/
static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
unsigned char offset)
{
return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
offset);
}
/* Depth first search, post-order */
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
struct maple_enode *p = MAS_NONE, *mn = mas->node;
unsigned long p_min, p_max;
mas_next_node(mas, mas_mn(mas), max);
if (!mas_is_none(mas))
return;
if (mte_is_root(mn))
return;
mas->node = mn;
mas_ascend(mas);
do {
p = mas->node;
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
} while (!mas_is_none(mas));
mas->node = p;
mas->max = p_max;
mas->min = p_min;
}
/* Tree validations */
static void mt_dump_node(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format);
static void mt_dump_range(unsigned long min, unsigned long max,
unsigned int depth, enum mt_dump_format format)
{
static const char spaces[] = " ";
switch(format) {
case mt_dump_hex:
if (min == max)
pr_info("%.*s%lx: ", depth * 2, spaces, min);
else
pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
break;
default:
case mt_dump_dec:
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
else
pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
}
}
static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
unsigned int depth, enum mt_dump_format format)
{
mt_dump_range(min, max, depth, format);
if (xa_is_value(entry))
pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
xa_to_value(entry), entry);
else if (xa_is_zero(entry))
pr_cont("zero (%ld)\n", xa_to_internal(entry));
else if (mt_is_reserved(entry))
pr_cont("UNKNOWN ENTRY (%p)\n", entry);
else
pr_cont("%p\n", entry);
}
static void mt_dump_range64(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_range_64 *node = &mte_to_node(entry)->mr64;
bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
pr_cont(" contents: ");
for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
switch(format) {
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
}
pr_cont("%p\n", node->slot[i]);
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
unsigned long last = max;
if (i < (MAPLE_RANGE64_SLOTS - 1))
last = node->pivot[i];
else if (!node->slot[i] && max != mt_node_max(entry))
break;
if (last == 0 && i > 0)
break;
if (leaf)
mt_dump_entry(mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
else if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
switch(format) {
case mt_dump_hex:
pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
default:
case mt_dump_dec:
pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
}
}
first = last + 1;
}
}
static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
bool leaf = mte_is_leaf(entry);
unsigned long first = min;
int i;
pr_cont(" contents: ");
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
switch (format) {
case mt_dump_hex:
pr_cont("%lx ", node->gap[i]);
break;
default:
case mt_dump_dec:
pr_cont("%lu ", node->gap[i]);
}
}
pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
switch (format) {
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
}
pr_cont("%p\n", node->slot[i]);
for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
unsigned long last = max;
if (i < (MAPLE_ARANGE64_SLOTS - 1))
last = node->pivot[i];
else if (!node->slot[i])
break;
if (last == 0 && i > 0)
break;
if (leaf)
mt_dump_entry(mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
else if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
break;
}
first = last + 1;
}
}
static void mt_dump_node(const struct maple_tree *mt, void *entry,
unsigned long min, unsigned long max, unsigned int depth,
enum mt_dump_format format)
{
struct maple_node *node = mte_to_node(entry);
unsigned int type = mte_node_type(entry);
unsigned int i;
mt_dump_range(min, max, depth, format);
pr_cont("node %p depth %d type %d parent %p", node, depth, type,
node ? node->parent : NULL);
switch (type) {
case maple_dense:
pr_cont("\n");
for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
if (min + i > max)
pr_cont("OUT OF RANGE: ");
mt_dump_entry(mt_slot(mt, node->slot, i),
min + i, min + i, depth, format);
}
break;
case maple_leaf_64:
case maple_range_64:
mt_dump_range64(mt, entry, min, max, depth, format);
break;
case maple_arange_64:
mt_dump_arange64(mt, entry, min, max, depth, format);
break;
default:
pr_cont(" UNKNOWN TYPE\n");
}
}
void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
{
void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
pr_info("maple_tree(%p) flags %X, height %u root %p\n",
mt, mt->ma_flags, mt_height(mt), entry);
if (!xa_is_node(entry))
mt_dump_entry(entry, 0, 0, 0, format);
else if (entry)
mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
}
EXPORT_SYMBOL_GPL(mt_dump);
/*
* Calculate the maximum gap in a node and check if that's what is reported in
* the parent (unless root).
*/
static void mas_validate_gaps(struct ma_state *mas)
{
struct maple_enode *mte = mas->node;
struct maple_node *p_mn, *node = mte_to_node(mte);
enum maple_type mt = mte_node_type(mas->node);
unsigned long gap = 0, max_gap = 0;
unsigned long p_end, p_start = mas->min;
unsigned char p_slot, offset;
unsigned long *gaps = NULL;
unsigned long *pivots = ma_pivots(node, mt);
unsigned int i;
if (ma_is_dense(mt)) {
for (i = 0; i < mt_slot_count(mte); i++) {
if (mas_get_slot(mas, i)) {
if (gap > max_gap)
max_gap = gap;
gap = 0;
continue;
}
gap++;
}
goto counted;
}
gaps = ma_gaps(node, mt);
for (i = 0; i < mt_slot_count(mte); i++) {
p_end = mas_safe_pivot(mas, pivots, i, mt);
if (!gaps) {
if (!mas_get_slot(mas, i))
gap = p_end - p_start + 1;
} else {
void *entry = mas_get_slot(mas, i);
gap = gaps[i];
MT_BUG_ON(mas->tree, !entry);
if (gap > p_end - p_start + 1) {
pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
mas_mn(mas), i, gap, p_end, p_start,
p_end - p_start + 1);
MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
}
}
if (gap > max_gap)
max_gap = gap;
p_start = p_end + 1;
if (p_end >= mas->max)
break;
}
counted:
if (mt == maple_arange_64) {
offset = ma_meta_gap(node, mt);
if (offset > i) {
pr_err("gap offset %p[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
}
if (gaps[offset] != max_gap) {
pr_err("gap %p[%u] is not the largest gap %lu\n",
node, offset, max_gap);
MT_BUG_ON(mas->tree, 1);
}
MT_BUG_ON(mas->tree, !gaps);
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
pr_err("gap %p[%u] beyond node limit != 0\n",
node, i);
MT_BUG_ON(mas->tree, 1);
}
}
}
if (mte_is_root(mte))
return;
p_slot = mte_parent_slot(mas->node);
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
mt_dump(mas->tree, mt_dump_hex);
MT_BUG_ON(mas->tree, 1);
}
}
static void mas_validate_parent_slot(struct ma_state *mas)
{
struct maple_node *parent;
struct maple_enode *node;
enum maple_type p_type;
unsigned char p_slot;
void __rcu **slots;
int i;
if (mte_is_root(mas->node))
return;
p_slot = mte_parent_slot(mas->node);
p_type = mas_parent_type(mas, mas->node);
parent = mte_parent(mas->node);
slots = ma_slots(parent, p_type);
MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
/* Check prev/next parent slot for duplicate node entry */
for (i = 0; i < mt_slots[p_type]; i++) {
node = mas_slot(mas, slots, i);
if (i == p_slot) {
if (node != mas->node)
pr_err("parent %p[%u] does not have %p\n",
parent, i, mas_mn(mas));
MT_BUG_ON(mas->tree, node != mas->node);
} else if (node == mas->node) {
pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
mas_mn(mas), parent, i, p_slot);
MT_BUG_ON(mas->tree, node == mas->node);
}
}
}
static void mas_validate_child_slot(struct ma_state *mas)
{
enum maple_type type = mte_node_type(mas->node);
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
struct maple_enode *child;
unsigned char i;
if (mte_is_leaf(mas->node))
return;
for (i = 0; i < mt_slots[type]; i++) {
child = mas_slot(mas, slots, i);
if (!child) {
pr_err("Non-leaf node lacks child at %p[%u]\n",
mas_mn(mas), i);
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent_slot(child) != i) {
pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
mas_mn(mas), i, mte_to_node(child),
mte_parent_slot(child));
MT_BUG_ON(mas->tree, 1);
}
if (mte_parent(child) != mte_to_node(mas->node)) {
pr_err("child %p has parent %p not %p\n",
mte_to_node(child), mte_parent(child),
mte_to_node(mas->node));
MT_BUG_ON(mas->tree, 1);
}
if (i < mt_pivots[type] && pivots[i] == mas->max)
break;
}
}
/*
* Validate all pivots are within mas->min and mas->max, check metadata ends
* where the maximum ends and ensure there is no slots or pivots set outside of
* the end of the data.
*/
static void mas_validate_limits(struct ma_state *mas)
{
int i;
unsigned long prev_piv = 0;
enum maple_type type = mte_node_type(mas->node);
void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
for (i = 0; i < mt_slots[type]; i++) {
unsigned long piv;
piv = mas_safe_pivot(mas, pivots, i, type);
if (!piv && (i != 0)) {
pr_err("Missing node limit pivot at %p[%u]",
mas_mn(mas), i);
MAS_WARN_ON(mas, 1);
}
if (prev_piv > piv) {
pr_err("%p[%u] piv %lu < prev_piv %lu\n",
mas_mn(mas), i, piv, prev_piv);
MAS_WARN_ON(mas, piv < prev_piv);
}
if (piv < mas->min) {
pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
MAS_WARN_ON(mas, piv < mas->min);
}
if (piv > mas->max) {
pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
MAS_WARN_ON(mas, piv > mas->max);
}
prev_piv = piv;
if (piv == mas->max)
break;
}
if (mas_data_end(mas) != i) {
pr_err("node%p: data_end %u != the last slot offset %u\n",
mas_mn(mas), mas_data_end(mas), i);
MT_BUG_ON(mas->tree, 1);
}
for (i += 1; i < mt_slots[type]; i++) {
void *entry = mas_slot(mas, slots, i);
if (entry && (i != mt_slots[type] - 1)) {
pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
i, entry);
MT_BUG_ON(mas->tree, entry != NULL);
}
if (i < mt_pivots[type]) {
unsigned long piv = pivots[i];
if (!piv)
continue;
pr_err("%p[%u] should not have piv %lu\n",
mas_mn(mas), i, piv);
MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
}
}
}
static void mt_validate_nulls(struct maple_tree *mt)
{
void *entry, *last = (void *)1;
unsigned char offset = 0;
void __rcu **slots;
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
return;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
do {
entry = mas_slot(&mas, slots, offset);
if (!last && !entry) {
pr_err("Sequential nulls end at %p[%u]\n",
mas_mn(&mas), offset);
}
MT_BUG_ON(mt, !last && !entry);
last = entry;
if (offset == mas_data_end(&mas)) {
mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
if (mas_is_none(&mas))
return;
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
mte_node_type(mas.node));
} else {
offset++;
}
} while (!mas_is_none(&mas));
}
/*
* validate a maple tree by checking:
* 1. The limits (pivots are within mas->min to mas->max)
* 2. The gap is correctly set in the parents
*/
void mt_validate(struct maple_tree *mt)
{
unsigned char end;
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);
if (!mas_searchable(&mas))
goto done;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
while (!mas_is_none(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
(mas.max != ULONG_MAX))) {
pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
}
mas_validate_parent_slot(&mas);
mas_validate_limits(&mas);
mas_validate_child_slot(&mas);
if (mt_is_alloc(mt))
mas_validate_gaps(&mas);
mas_dfs_postorder(&mas, ULONG_MAX);
}
mt_validate_nulls(mt);
done:
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
if (mas_is_none(mas))
pr_err("(MAS_NONE) ");
else if (mas_is_ptr(mas))
pr_err("(MAS_ROOT) ");
else if (mas_is_start(mas))
pr_err("(MAS_START) ");
else if (mas_is_paused(mas))
pr_err("(MAS_PAUSED) ");
pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
if (mas->index > mas->last)
pr_err("Check index & last\n");
}
EXPORT_SYMBOL_GPL(mas_dump);
void mas_wr_dump(const struct ma_wr_state *wr_mas)
{
pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
wr_mas->end_piv);
}
EXPORT_SYMBOL_GPL(mas_wr_dump);
#endif /* CONFIG_DEBUG_MAPLE_TREE */
| linux-master | lib/maple_tree.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/module.h>
#include <linux/crc64.h>
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
#include <crypto/algapi.h>
#include <linux/static_key.h>
#include <linux/notifier.h>
static struct crypto_shash __rcu *crc64_rocksoft_tfm;
static DEFINE_STATIC_KEY_TRUE(crc64_rocksoft_fallback);
static DEFINE_MUTEX(crc64_rocksoft_mutex);
static struct work_struct crc64_rocksoft_rehash_work;
static int crc64_rocksoft_notify(struct notifier_block *self, unsigned long val, void *data)
{
struct crypto_alg *alg = data;
if (val != CRYPTO_MSG_ALG_LOADED ||
strcmp(alg->cra_name, CRC64_ROCKSOFT_STRING))
return NOTIFY_DONE;
schedule_work(&crc64_rocksoft_rehash_work);
return NOTIFY_OK;
}
static void crc64_rocksoft_rehash(struct work_struct *work)
{
struct crypto_shash *new, *old;
mutex_lock(&crc64_rocksoft_mutex);
old = rcu_dereference_protected(crc64_rocksoft_tfm,
lockdep_is_held(&crc64_rocksoft_mutex));
new = crypto_alloc_shash(CRC64_ROCKSOFT_STRING, 0, 0);
if (IS_ERR(new)) {
mutex_unlock(&crc64_rocksoft_mutex);
return;
}
rcu_assign_pointer(crc64_rocksoft_tfm, new);
mutex_unlock(&crc64_rocksoft_mutex);
if (old) {
synchronize_rcu();
crypto_free_shash(old);
} else {
static_branch_disable(&crc64_rocksoft_fallback);
}
}
static struct notifier_block crc64_rocksoft_nb = {
.notifier_call = crc64_rocksoft_notify,
};
u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len)
{
struct {
struct shash_desc shash;
u64 crc;
} desc;
int err;
if (static_branch_unlikely(&crc64_rocksoft_fallback))
return crc64_rocksoft_generic(crc, buffer, len);
rcu_read_lock();
desc.shash.tfm = rcu_dereference(crc64_rocksoft_tfm);
desc.crc = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
rcu_read_unlock();
BUG_ON(err);
return desc.crc;
}
EXPORT_SYMBOL_GPL(crc64_rocksoft_update);
u64 crc64_rocksoft(const unsigned char *buffer, size_t len)
{
return crc64_rocksoft_update(0, buffer, len);
}
EXPORT_SYMBOL_GPL(crc64_rocksoft);
static int __init crc64_rocksoft_mod_init(void)
{
INIT_WORK(&crc64_rocksoft_rehash_work, crc64_rocksoft_rehash);
crypto_register_notifier(&crc64_rocksoft_nb);
crc64_rocksoft_rehash(&crc64_rocksoft_rehash_work);
return 0;
}
static void __exit crc64_rocksoft_mod_fini(void)
{
crypto_unregister_notifier(&crc64_rocksoft_nb);
cancel_work_sync(&crc64_rocksoft_rehash_work);
crypto_free_shash(rcu_dereference_protected(crc64_rocksoft_tfm, 1));
}
module_init(crc64_rocksoft_mod_init);
module_exit(crc64_rocksoft_mod_fini);
static int crc64_rocksoft_transform_show(char *buffer, const struct kernel_param *kp)
{
struct crypto_shash *tfm;
int len;
if (static_branch_unlikely(&crc64_rocksoft_fallback))
return sprintf(buffer, "fallback\n");
rcu_read_lock();
tfm = rcu_dereference(crc64_rocksoft_tfm);
len = snprintf(buffer, PAGE_SIZE, "%s\n",
crypto_shash_driver_name(tfm));
rcu_read_unlock();
return len;
}
module_param_call(transform, NULL, crc64_rocksoft_transform_show, NULL, 0444);
MODULE_AUTHOR("Keith Busch <[email protected]>");
MODULE_DESCRIPTION("Rocksoft model CRC64 calculation (library API)");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crc64");
| linux-master | lib/crc64-rocksoft.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* test_ida.c: Test the IDA API
* Copyright (c) 2016-2018 Microsoft Corporation
* Copyright (c) 2018 Oracle Corporation
* Author: Matthew Wilcox <[email protected]>
*/
#include <linux/idr.h>
#include <linux/module.h>
static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
if (x) { \
ida_dump(ida); \
dump_stack(); \
} else { \
tests_passed++; \
} \
} while (0)
/*
* Straightforward checks that allocating and freeing IDs work.
*/
static void ida_check_alloc(struct ida *ida)
{
int i, id;
for (i = 0; i < 10000; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
ida_free(ida, 20);
ida_free(ida, 21);
for (i = 0; i < 3; i++) {
id = ida_alloc(ida, GFP_KERNEL);
IDA_BUG_ON(ida, id < 0);
if (i == 2)
IDA_BUG_ON(ida, id != 10000);
}
for (i = 0; i < 5000; i++)
ida_free(ida, i);
IDA_BUG_ON(ida, ida_alloc_min(ida, 5000, GFP_KERNEL) != 10001);
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/* Destroy an IDA with a single entry at @base */
static void ida_check_destroy_1(struct ida *ida, unsigned int base)
{
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) != base);
IDA_BUG_ON(ida, ida_is_empty(ida));
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/* Check that ida_destroy and ida_is_empty work */
static void ida_check_destroy(struct ida *ida)
{
/* Destroy an already-empty IDA */
IDA_BUG_ON(ida, !ida_is_empty(ida));
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
ida_check_destroy_1(ida, 0);
ida_check_destroy_1(ida, 1);
ida_check_destroy_1(ida, 1023);
ida_check_destroy_1(ida, 1024);
ida_check_destroy_1(ida, 12345678);
}
/*
* Check what happens when we fill a leaf and then delete it. This may
* discover mishandling of IDR_FREE.
*/
static void ida_check_leaf(struct ida *ida, unsigned int base)
{
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS; i++) {
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
base + i);
}
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != 0);
IDA_BUG_ON(ida, ida_is_empty(ida));
ida_free(ida, 0);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
/*
* Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
* Allocating up to 2^31-1 should succeed, and then allocating the next one
* should fail.
*/
static void ida_check_max(struct ida *ida)
{
unsigned long i, j;
for (j = 1; j < 65537; j *= 2) {
unsigned long base = (1UL << 31) - j;
for (i = 0; i < j; i++) {
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
base + i);
}
IDA_BUG_ON(ida, ida_alloc_min(ida, base, GFP_KERNEL) !=
-ENOSPC);
ida_destroy(ida);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
}
/*
* Check handling of conversions between exceptional entries and full bitmaps.
*/
static void ida_check_conv(struct ida *ida)
{
unsigned long i;
for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
IDA_BUG_ON(ida, ida_alloc_min(ida, i + 1, GFP_KERNEL) != i + 1);
IDA_BUG_ON(ida, ida_alloc_min(ida, i + BITS_PER_LONG,
GFP_KERNEL) != i + BITS_PER_LONG);
ida_free(ida, i + 1);
ida_free(ida, i + BITS_PER_LONG);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
for (i = 0; i < IDA_BITMAP_BITS * 2; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
for (i = IDA_BITMAP_BITS * 2; i > 0; i--)
ida_free(ida, i - 1);
IDA_BUG_ON(ida, !ida_is_empty(ida));
for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++)
IDA_BUG_ON(ida, ida_alloc(ida, GFP_KERNEL) != i);
for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--)
ida_free(ida, i - 1);
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
static DEFINE_IDA(ida);
static int ida_checks(void)
{
IDA_BUG_ON(&ida, !ida_is_empty(&ida));
ida_check_alloc(&ida);
ida_check_destroy(&ida);
ida_check_leaf(&ida, 0);
ida_check_leaf(&ida, 1024);
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
}
static void ida_exit(void)
{
}
module_init(ida_checks);
module_exit(ida_exit);
MODULE_AUTHOR("Matthew Wilcox <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | lib/test_ida.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Testsuite for BPF interpreter and BPF JIT compiler
*
* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/sched.h>
/* General test specific settings */
#define MAX_SUBTESTS 3
#define MAX_TESTRUNS 1000
#define MAX_DATA 128
#define MAX_INSNS 512
#define MAX_K 0xffffFFFF
/* Few constants used to init test 'skb' */
#define SKB_TYPE 3
#define SKB_MARK 0x1234aaaa
#define SKB_HASH 0x1234aaab
#define SKB_QUEUE_MAP 123
#define SKB_VLAN_TCI 0xffff
#define SKB_VLAN_PRESENT 1
#define SKB_DEV_IFINDEX 577
#define SKB_DEV_TYPE 588
/* Redefine REGs to make tests less verbose */
#define R0 BPF_REG_0
#define R1 BPF_REG_1
#define R2 BPF_REG_2
#define R3 BPF_REG_3
#define R4 BPF_REG_4
#define R5 BPF_REG_5
#define R6 BPF_REG_6
#define R7 BPF_REG_7
#define R8 BPF_REG_8
#define R9 BPF_REG_9
#define R10 BPF_REG_10
/* Flags that can be passed to test cases */
#define FLAG_NO_DATA BIT(0)
#define FLAG_EXPECTED_FAIL BIT(1)
#define FLAG_SKB_FRAG BIT(2)
#define FLAG_VERIFIER_ZEXT BIT(3)
#define FLAG_LARGE_MEM BIT(4)
enum {
CLASSIC = BIT(6), /* Old BPF instructions only. */
INTERNAL = BIT(7), /* Extended instruction set. */
};
#define TEST_TYPE_MASK (CLASSIC | INTERNAL)
struct bpf_test {
const char *descr;
union {
struct sock_filter insns[MAX_INSNS];
struct bpf_insn insns_int[MAX_INSNS];
struct {
void *insns;
unsigned int len;
} ptr;
} u;
__u8 aux;
__u8 data[MAX_DATA];
struct {
int data_size;
__u32 result;
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
};
/* Large test cases need separate allocation and fill handler. */
static int bpf_fill_maxinsns1(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
__u32 k = ~0;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len; i++, k--)
insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns2(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len; i++)
insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns3(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
struct rnd_state rnd;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
prandom_seed_state(&rnd, 3141592653589793238ULL);
for (i = 0; i < len - 1; i++) {
__u32 k = prandom_u32_state(&rnd);
insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
}
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns4(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS + 1;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len; i++)
insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns5(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
for (i = 1; i < len - 1; i++)
insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns6(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 1; i++)
insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
SKF_AD_VLAN_TAG_PRESENT);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns7(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 4; i++)
insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
SKF_AD_CPU);
insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
SKF_AD_CPU);
insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns8(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i, jmp_off = len - 3;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
for (i = 1; i < len - 1; i++)
insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns9(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
insn[2] = BPF_EXIT_INSN();
for (i = 3; i < len - 2; i++)
insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
insn[len - 2] = BPF_EXIT_INSN();
insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns10(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS, hlen = len - 2;
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < hlen / 2; i++)
insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
for (i = hlen - 1; i > hlen / 2; i--)
insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
insn[hlen + 1] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
unsigned int plen)
{
struct sock_filter *insn;
unsigned int rlen;
int i, j;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
rlen = (len % plen) - 1;
for (i = 0; i + plen < len; i += plen)
for (j = 0; j < plen; j++)
insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
plen - 1 - j, 0, 0);
for (j = 0; j < rlen; j++)
insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
0, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns11(struct bpf_test *self)
{
/* Hits 70 passes on x86_64 and triggers NOPs padding. */
return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}
static int bpf_fill_maxinsns12(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
for (i = 1; i < len - 1; i++)
insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_maxinsns13(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 3; i++)
insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);
insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int bpf_fill_ja(struct bpf_test *self)
{
/* Hits exactly 11 passes on x86_64 JIT. */
return __bpf_fill_ja(self, 12, 9);
}
static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{
unsigned int len = BPF_MAXINSNS;
struct sock_filter *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
for (i = 0; i < len - 1; i += 2) {
insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_CPU);
}
insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
self->u.ptr.insns = insn;
self->u.ptr.len = len;
return 0;
}
static int __bpf_fill_stxdw(struct bpf_test *self, int size)
{
unsigned int len = BPF_MAXINSNS;
struct bpf_insn *insn;
int i;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
insn[1] = BPF_ST_MEM(size, R10, -40, 42);
for (i = 2; i < len - 2; i++)
insn[i] = BPF_STX_XADD(size, R10, R0, -40);
insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
insn[len - 1] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
self->stack_depth = 40;
return 0;
}
static int bpf_fill_stxw(struct bpf_test *self)
{
return __bpf_fill_stxdw(self, BPF_W);
}
static int bpf_fill_stxdw(struct bpf_test *self)
{
return __bpf_fill_stxdw(self, BPF_DW);
}
static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
{
struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};
memcpy(insns, tmp, sizeof(tmp));
return 2;
}
/*
* Branch conversion tests. Complex operations can expand to a lot
* of instructions when JITed. This in turn may cause jump offsets
* to overflow the field size of the native instruction, triggering
* a branch conversion mechanism in some JITs.
*/
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
{
struct bpf_insn *insns;
int len = S16_MAX + 5;
int i;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
insns[i++] = BPF_EXIT_INSN();
while (i < len - 1) {
static const int ops[] = {
BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
};
int op = ops[(i >> 1) % ARRAY_SIZE(ops)];
if (i & 1)
insns[i++] = BPF_ALU32_REG(op, R0, R1);
else
insns[i++] = BPF_ALU64_REG(op, R0, R1);
}
insns[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insns;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
/* Branch taken by runtime decision */
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
}
/* Branch not taken by runtime decision */
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
}
/* Branch always taken, known at JIT time */
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JGE, 0);
}
/* Branch never taken, known at JIT time */
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{
return __bpf_fill_max_jmp(self, BPF_JLT, 0);
}
/* ALU result computation used in tests */
static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
{
*res = 0;
switch (op) {
case BPF_MOV:
*res = v2;
break;
case BPF_AND:
*res = v1 & v2;
break;
case BPF_OR:
*res = v1 | v2;
break;
case BPF_XOR:
*res = v1 ^ v2;
break;
case BPF_LSH:
*res = v1 << v2;
break;
case BPF_RSH:
*res = v1 >> v2;
break;
case BPF_ARSH:
*res = v1 >> v2;
if (v2 > 0 && v1 > S64_MAX)
*res |= ~0ULL << (64 - v2);
break;
case BPF_ADD:
*res = v1 + v2;
break;
case BPF_SUB:
*res = v1 - v2;
break;
case BPF_MUL:
*res = v1 * v2;
break;
case BPF_DIV:
if (v2 == 0)
return false;
*res = div64_u64(v1, v2);
break;
case BPF_MOD:
if (v2 == 0)
return false;
div64_u64_rem(v1, v2, res);
break;
}
return true;
}
/* Test an ALU shift operation for all valid shift values */
static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
u8 mode, bool alu32)
{
static const s64 regs[] = {
0x0123456789abcdefLL, /* dword > 0, word < 0 */
0xfedcba9876543210LL, /* dword < 0, word > 0 */
0xfedcba0198765432LL, /* dword < 0, word < 0 */
0x0123458967abcdefLL, /* dword > 0, word > 0 */
};
int bits = alu32 ? 32 : 64;
int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
struct bpf_insn *insn;
int imm, k;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
for (k = 0; k < ARRAY_SIZE(regs); k++) {
s64 reg = regs[k];
i += __bpf_ld_imm64(&insn[i], R3, reg);
for (imm = 0; imm < bits; imm++) {
u64 val;
/* Perform operation */
insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
if (alu32) {
if (mode == BPF_K)
insn[i++] = BPF_ALU32_IMM(op, R1, imm);
else
insn[i++] = BPF_ALU32_REG(op, R1, R2);
if (op == BPF_ARSH)
reg = (s32)reg;
else
reg = (u32)reg;
__bpf_alu_result(&val, reg, imm, op);
val = (u32)val;
} else {
if (mode == BPF_K)
insn[i++] = BPF_ALU64_IMM(op, R1, imm);
else
insn[i++] = BPF_ALU64_REG(op, R1, R2);
__bpf_alu_result(&val, reg, imm, op);
}
/*
* When debugging a JIT that fails this test, one
* can write the immediate value to R0 here to find
* out which operand values that fail.
*/
/* Load reference and check the result */
i += __bpf_ld_imm64(&insn[i], R4, val);
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
insn[i++] = BPF_EXIT_INSN();
}
}
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
}
static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
}
static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
}
static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
}
static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
}
static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
}
static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
}
static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
}
static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
}
static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
}
static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
}
static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
}
/*
* Test an ALU register shift operation for all valid shift values
* for the case when the source and destination are the same.
*/
static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
bool alu32)
{
int bits = alu32 ? 32 : 64;
int len = 3 + 6 * bits;
struct bpf_insn *insn;
int i = 0;
u64 val;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
for (val = 0; val < bits; val++) {
u64 res;
/* Perform operation */
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
if (alu32)
insn[i++] = BPF_ALU32_REG(op, R1, R1);
else
insn[i++] = BPF_ALU64_REG(op, R1, R1);
/* Compute the reference result */
__bpf_alu_result(&res, val, val, op);
if (alu32)
res = (u32)res;
i += __bpf_ld_imm64(&insn[i], R2, res);
/* Check the actual result */
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
insn[i++] = BPF_EXIT_INSN();
}
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
}
static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
}
static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
}
static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
}
static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
}
static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
{
return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
}
/*
* Common operand pattern generator for exhaustive power-of-two magnitudes
* tests. The block size parameters can be adjusted to increase/reduce the
* number of combinatons tested and thereby execution speed and memory
* footprint.
*/
static inline s64 value(int msb, int delta, int sign)
{
return sign * (1LL << msb) + delta;
}
static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
int dbits, int sbits, int block1, int block2,
int (*emit)(struct bpf_test*, void*,
struct bpf_insn*, s64, s64))
{
static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
struct bpf_insn *insns;
int di, si, bt, db, sb;
int count, len, k;
int extra = 1 + 2;
int i = 0;
/* Total number of iterations for the two pattern */
count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);
/* Compute the maximum number of insns and allocate the buffer */
len = extra + count * (*emit)(self, arg, NULL, 0, 0);
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
/* Add head instruction(s) */
insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
/*
* Pattern 1: all combinations of power-of-two magnitudes and sign,
* and with a block of contiguous values around each magnitude.
*/
for (di = 0; di < dbits - 1; di++) /* Dst magnitudes */
for (si = 0; si < sbits - 1; si++) /* Src magnitudes */
for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
for (db = -(block1 / 2);
db < (block1 + 1) / 2; db++)
for (sb = -(block1 / 2);
sb < (block1 + 1) / 2; sb++) {
s64 dst, src;
dst = value(di, db, sgn[k][0]);
src = value(si, sb, sgn[k][1]);
i += (*emit)(self, arg,
&insns[i],
dst, src);
}
/*
* Pattern 2: all combinations for a larger block of values
* for each power-of-two magnitude and sign, where the magnitude is
* the same for both operands.
*/
for (bt = 0; bt < max(dbits, sbits) - 1; bt++) /* Magnitude */
for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
for (sb = -(block2 / 2);
sb < (block2 + 1) / 2; sb++) {
s64 dst, src;
dst = value(bt % dbits, db, sgn[k][0]);
src = value(bt % sbits, sb, sgn[k][1]);
i += (*emit)(self, arg, &insns[i],
dst, src);
}
/* Append tail instructions */
insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insns[i++] = BPF_EXIT_INSN();
BUG_ON(i > len);
self->u.ptr.insns = insns;
self->u.ptr.len = i;
return 0;
}
/*
* Block size parameters used in pattern tests below. une as needed to
* increase/reduce the number combinations tested, see following examples.
* block values per operand MSB
* ----------------------------------------
* 0 none
* 1 (1 << MSB)
* 2 (1 << MSB) + [-1, 0]
* 3 (1 << MSB) + [-1, 0, 1]
*/
#define PATTERN_BLOCK1 1
#define PATTERN_BLOCK2 5
/* Number of test runs for a pattern test */
#define NR_PATTERN_RUNS 1
/*
* Exhaustive tests of ALU operations for all combinations of power-of-two
* magnitudes of the operands, both for positive and negative values. The
* test is designed to verify e.g. the ALU and ALU64 operations for JITs that
* emit different code depending on the magnitude of the immediate value.
*/
static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 imm)
{
int op = *(int *)arg;
int i = 0;
u64 res;
if (!insns)
return 7;
if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R3, res);
insns[i++] = BPF_ALU64_IMM(op, R1, imm);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
}
return i;
}
static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 imm)
{
int op = *(int *)arg;
int i = 0;
u64 res;
if (!insns)
return 7;
if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
insns[i++] = BPF_ALU32_IMM(op, R1, imm);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
}
return i;
}
static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
int i = 0;
u64 res;
if (!insns)
return 9;
if (__bpf_alu_result(&res, dst, src, op)) {
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
i += __bpf_ld_imm64(&insns[i], R3, res);
insns[i++] = BPF_ALU64_REG(op, R1, R2);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
}
return i;
}
static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
int i = 0;
u64 res;
if (!insns)
return 9;
if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
insns[i++] = BPF_ALU32_REG(op, R1, R2);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
}
return i;
}
static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 32,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_alu64_imm);
}
static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 32,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_alu32_imm);
}
static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_alu64_reg);
}
static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_alu32_reg);
}
/* ALU64 immediate operations */
static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_MOV);
}
static int bpf_fill_alu64_and_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_AND);
}
static int bpf_fill_alu64_or_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_OR);
}
static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_XOR);
}
static int bpf_fill_alu64_add_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_ADD);
}
static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_SUB);
}
static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_MUL);
}
static int bpf_fill_alu64_div_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_DIV);
}
static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
{
return __bpf_fill_alu64_imm(self, BPF_MOD);
}
/* ALU32 immediate operations */
static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_MOV);
}
static int bpf_fill_alu32_and_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_AND);
}
static int bpf_fill_alu32_or_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_OR);
}
static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_XOR);
}
static int bpf_fill_alu32_add_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_ADD);
}
static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_SUB);
}
static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_MUL);
}
static int bpf_fill_alu32_div_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_DIV);
}
static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
{
return __bpf_fill_alu32_imm(self, BPF_MOD);
}
/* ALU64 register operations */
static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_MOV);
}
static int bpf_fill_alu64_and_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_AND);
}
static int bpf_fill_alu64_or_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_OR);
}
static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_XOR);
}
static int bpf_fill_alu64_add_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_ADD);
}
static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_SUB);
}
static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_MUL);
}
static int bpf_fill_alu64_div_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_DIV);
}
static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
{
return __bpf_fill_alu64_reg(self, BPF_MOD);
}
/* ALU32 register operations */
static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_MOV);
}
static int bpf_fill_alu32_and_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_AND);
}
static int bpf_fill_alu32_or_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_OR);
}
static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_XOR);
}
static int bpf_fill_alu32_add_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_ADD);
}
static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_SUB);
}
static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_MUL);
}
static int bpf_fill_alu32_div_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_DIV);
}
static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
{
return __bpf_fill_alu32_reg(self, BPF_MOD);
}
/*
* Test JITs that implement complex ALU operations as function
* calls, and must re-arrange operands for argument passing.
*/
static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
{
int len = 2 + 10 * 10;
struct bpf_insn *insns;
u64 dst, res;
int i = 0;
u32 imm;
int rd;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
/* Operand and result values according to operation */
if (alu32)
dst = 0x76543210U;
else
dst = 0x7edcba9876543210ULL;
imm = 0x01234567U;
if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
imm &= 31;
__bpf_alu_result(&res, dst, imm, op);
if (alu32)
res = (u32)res;
/* Check all operand registers */
for (rd = R0; rd <= R9; rd++) {
i += __bpf_ld_imm64(&insns[i], rd, dst);
if (alu32)
insns[i++] = BPF_ALU32_IMM(op, rd, imm);
else
insns[i++] = BPF_ALU64_IMM(op, rd, imm);
insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
}
insns[i++] = BPF_MOV64_IMM(R0, 1);
insns[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insns;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
/* ALU64 K registers */
static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
}
static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
}
static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
}
static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
}
static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
}
static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
}
static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
}
static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
}
static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
}
static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
}
static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
}
static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
}
/* ALU32 K registers */
static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
}
static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
}
static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
}
static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
}
static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
}
static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
}
static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
}
static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
}
static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
}
static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
}
static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
}
static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
{
return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
}
/*
* Test JITs that implement complex ALU operations as function
* calls, and must re-arrange operands for argument passing.
*/
static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
{
int len = 2 + 10 * 10 * 12;
u64 dst, src, res, same;
struct bpf_insn *insns;
int rd, rs;
int i = 0;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
/* Operand and result values according to operation */
if (alu32) {
dst = 0x76543210U;
src = 0x01234567U;
} else {
dst = 0x7edcba9876543210ULL;
src = 0x0123456789abcdefULL;
}
if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
src &= 31;
__bpf_alu_result(&res, dst, src, op);
__bpf_alu_result(&same, src, src, op);
if (alu32) {
res = (u32)res;
same = (u32)same;
}
/* Check all combinations of operand registers */
for (rd = R0; rd <= R9; rd++) {
for (rs = R0; rs <= R9; rs++) {
u64 val = rd == rs ? same : res;
i += __bpf_ld_imm64(&insns[i], rd, dst);
i += __bpf_ld_imm64(&insns[i], rs, src);
if (alu32)
insns[i++] = BPF_ALU32_REG(op, rd, rs);
else
insns[i++] = BPF_ALU64_REG(op, rd, rs);
insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
}
}
insns[i++] = BPF_MOV64_IMM(R0, 1);
insns[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insns;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
/* ALU64 X register combinations */
static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
}
static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
}
static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
}
static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
}
static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
}
static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
}
static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
}
static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
}
static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
}
static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
}
static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
}
static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
}
/* ALU32 X register combinations */
static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
}
static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
}
static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
}
static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
}
static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
}
static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
}
static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
}
static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
}
static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
}
static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
}
static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
}
static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
}
/*
* Exhaustive tests of atomic operations for all power-of-two operand
* magnitudes, both for positive and negative values.
*/
static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
u64 keep, fetch, res;
int i = 0;
if (!insns)
return 21;
switch (op) {
case BPF_XCHG:
res = src;
break;
default:
__bpf_alu_result(&res, dst, src, BPF_OP(op));
}
keep = 0x0123456789abcdefULL;
if (op & BPF_FETCH)
fetch = dst;
else
fetch = src;
i += __bpf_ld_imm64(&insns[i], R0, keep);
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
i += __bpf_ld_imm64(&insns[i], R3, res);
i += __bpf_ld_imm64(&insns[i], R4, fetch);
i += __bpf_ld_imm64(&insns[i], R5, keep);
insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
u64 keep, fetch, res;
int i = 0;
if (!insns)
return 21;
switch (op) {
case BPF_XCHG:
res = src;
break;
default:
__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
}
keep = 0x0123456789abcdefULL;
if (op & BPF_FETCH)
fetch = (u32)dst;
else
fetch = src;
i += __bpf_ld_imm64(&insns[i], R0, keep);
i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
i += __bpf_ld_imm64(&insns[i], R4, fetch);
i += __bpf_ld_imm64(&insns[i], R5, keep);
insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int i = 0;
if (!insns)
return 23;
i += __bpf_ld_imm64(&insns[i], R0, ~dst);
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
/* Result unsuccessful */
insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
/* Result successful */
insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);
insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
return i;
}
static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int i = 0;
if (!insns)
return 27;
i += __bpf_ld_imm64(&insns[i], R0, ~dst);
i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
/* Result unsuccessful */
insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
/* Result successful */
i += __bpf_ld_imm64(&insns[i], R0, dst);
insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);
insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
insns[i++] = BPF_EXIT_INSN();
return i;
}
static int __bpf_fill_atomic64(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
0, PATTERN_BLOCK2,
&__bpf_emit_atomic64);
}
static int __bpf_fill_atomic32(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
0, PATTERN_BLOCK2,
&__bpf_emit_atomic32);
}
/* 64-bit atomic operations */
static int bpf_fill_atomic64_add(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_ADD);
}
static int bpf_fill_atomic64_and(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_AND);
}
static int bpf_fill_atomic64_or(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_OR);
}
static int bpf_fill_atomic64_xor(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_XOR);
}
static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
}
static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
}
static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
}
static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
}
static int bpf_fill_atomic64_xchg(struct bpf_test *self)
{
return __bpf_fill_atomic64(self, BPF_XCHG);
}
static int bpf_fill_cmpxchg64(struct bpf_test *self)
{
return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
&__bpf_emit_cmpxchg64);
}
/* 32-bit atomic operations */
static int bpf_fill_atomic32_add(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_ADD);
}
static int bpf_fill_atomic32_and(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_AND);
}
static int bpf_fill_atomic32_or(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_OR);
}
static int bpf_fill_atomic32_xor(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_XOR);
}
static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
}
static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
}
static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
}
static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
}
static int bpf_fill_atomic32_xchg(struct bpf_test *self)
{
return __bpf_fill_atomic32(self, BPF_XCHG);
}
static int bpf_fill_cmpxchg32(struct bpf_test *self)
{
return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
&__bpf_emit_cmpxchg32);
}
/*
* Test JITs that implement ATOMIC operations as function calls or
* other primitives, and must re-arrange operands for argument passing.
*/
static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
{
struct bpf_insn *insn;
int len = 2 + 34 * 10 * 10;
u64 mem, upd, res;
int rd, rs, i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
/* Operand and memory values */
if (width == BPF_DW) {
mem = 0x0123456789abcdefULL;
upd = 0xfedcba9876543210ULL;
} else { /* BPF_W */
mem = 0x01234567U;
upd = 0x76543210U;
}
/* Memory updated according to operation */
switch (op) {
case BPF_XCHG:
res = upd;
break;
case BPF_CMPXCHG:
res = mem;
break;
default:
__bpf_alu_result(&res, mem, upd, BPF_OP(op));
}
/* Test all operand registers */
for (rd = R0; rd <= R9; rd++) {
for (rs = R0; rs <= R9; rs++) {
u64 cmp, src;
/* Initialize value in memory */
i += __bpf_ld_imm64(&insn[i], R0, mem);
insn[i++] = BPF_STX_MEM(width, R10, R0, -8);
/* Initialize registers in order */
i += __bpf_ld_imm64(&insn[i], R0, ~mem);
i += __bpf_ld_imm64(&insn[i], rs, upd);
insn[i++] = BPF_MOV64_REG(rd, R10);
/* Perform atomic operation */
insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
if (op == BPF_CMPXCHG && width == BPF_W)
insn[i++] = BPF_ZEXT_REG(R0);
/* Check R0 register value */
if (op == BPF_CMPXCHG)
cmp = mem; /* Expect value from memory */
else if (R0 == rd || R0 == rs)
cmp = 0; /* Aliased, checked below */
else
cmp = ~mem; /* Expect value to be preserved */
if (cmp) {
insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
(u32)cmp, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
cmp >> 32, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
}
/* Check source register value */
if (rs == R0 && op == BPF_CMPXCHG)
src = 0; /* Aliased with R0, checked above */
else if (rs == rd && (op == BPF_CMPXCHG ||
!(op & BPF_FETCH)))
src = 0; /* Aliased with rd, checked below */
else if (op == BPF_CMPXCHG)
src = upd; /* Expect value to be preserved */
else if (op & BPF_FETCH)
src = mem; /* Expect fetched value from mem */
else /* no fetch */
src = upd; /* Expect value to be preserved */
if (src) {
insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
(u32)src, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
src >> 32, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
}
/* Check destination register value */
if (!(rd == R0 && op == BPF_CMPXCHG) &&
!(rd == rs && (op & BPF_FETCH))) {
insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
}
/* Check value in memory */
if (rs != rd) { /* No aliasing */
i += __bpf_ld_imm64(&insn[i], R1, res);
} else if (op == BPF_XCHG) { /* Aliased, XCHG */
insn[i++] = BPF_MOV64_REG(R1, R10);
} else if (op == BPF_CMPXCHG) { /* Aliased, CMPXCHG */
i += __bpf_ld_imm64(&insn[i], R1, mem);
} else { /* Aliased, ALU oper */
i += __bpf_ld_imm64(&insn[i], R1, mem);
insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
}
insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
if (width == BPF_DW)
insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
else /* width == BPF_W */
insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
insn[i++] = BPF_EXIT_INSN();
}
}
insn[i++] = BPF_MOV64_IMM(R0, 1);
insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = i;
BUG_ON(i > len);
return 0;
}
/* 64-bit atomic register tests */
static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
}
static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
}
static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
}
static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
}
static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
}
static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
}
static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
}
static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
}
static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
}
static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
}
/* 32-bit atomic register tests */
static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
}
static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
}
static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
}
static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
}
static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
}
static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
}
static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
}
static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
}
static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
}
static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
{
return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
}
/*
* Test the two-instruction 64-bit immediate load operation for all
* power-of-two magnitudes of the immediate operand. For each MSB, a block
* of immediate values centered around the power-of-two MSB are tested,
* both for positive and negative values. The test is designed to verify
* the operation for JITs that emit different code depending on the magnitude
* of the immediate value. This is often the case if the native instruction
* immediate field width is narrower than 32 bits.
*/
static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
{
int block = 64; /* Increase for more tests per MSB position */
int len = 3 + 8 * 63 * block * 2;
struct bpf_insn *insn;
int bit, adj, sign;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
for (bit = 0; bit <= 62; bit++) {
for (adj = -block / 2; adj < block / 2; adj++) {
for (sign = -1; sign <= 1; sign += 2) {
s64 imm = sign * ((1LL << bit) + adj);
/* Perform operation */
i += __bpf_ld_imm64(&insn[i], R1, imm);
/* Load reference */
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
(u32)(imm >> 32));
insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
/* Check result */
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
insn[i++] = BPF_EXIT_INSN();
}
}
}
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
/*
* Test the two-instruction 64-bit immediate load operation for different
* combinations of bytes. Each byte in the 64-bit word is constructed as
* (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
* All patterns (base1, mask1) and (base2, mask2) bytes are tested.
*/
static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
u8 base1, u8 mask1,
u8 base2, u8 mask2)
{
struct bpf_insn *insn;
int len = 3 + 8 * BIT(8);
int pattern, index;
u32 rand = 1;
int i = 0;
insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
if (!insn)
return -ENOMEM;
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
for (pattern = 0; pattern < BIT(8); pattern++) {
u64 imm = 0;
for (index = 0; index < 8; index++) {
int byte;
if (pattern & BIT(index))
byte = (base1 & mask1) | (rand & ~mask1);
else
byte = (base2 & mask2) | (rand & ~mask2);
imm = (imm << 8) | byte;
}
/* Update our LCG */
rand = rand * 1664525 + 1013904223;
/* Perform operation */
i += __bpf_ld_imm64(&insn[i], R1, imm);
/* Load reference */
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);
/* Check result */
insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
insn[i++] = BPF_EXIT_INSN();
}
insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
insn[i++] = BPF_EXIT_INSN();
self->u.ptr.insns = insn;
self->u.ptr.len = len;
BUG_ON(i != len);
return 0;
}
static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
{
return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
}
static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
{
return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
}
static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
{
return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
}
static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
{
return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
}
/*
* Exhaustive tests of JMP operations for all combinations of power-of-two
* magnitudes of the operands, both for positive and negative values. The
* test is designed to verify e.g. the JMP and JMP32 operations for JITs that
* emit different code depending on the magnitude of the immediate value.
*/
static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
{
switch (op) {
case BPF_JSET:
return !!(v1 & v2);
case BPF_JEQ:
return v1 == v2;
case BPF_JNE:
return v1 != v2;
case BPF_JGT:
return (u64)v1 > (u64)v2;
case BPF_JGE:
return (u64)v1 >= (u64)v2;
case BPF_JLT:
return (u64)v1 < (u64)v2;
case BPF_JLE:
return (u64)v1 <= (u64)v2;
case BPF_JSGT:
return v1 > v2;
case BPF_JSGE:
return v1 >= v2;
case BPF_JSLT:
return v1 < v2;
case BPF_JSLE:
return v1 <= v2;
}
return false;
}
static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 imm)
{
int op = *(int *)arg;
if (insns) {
bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
int i = 0;
insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);
i += __bpf_ld_imm64(&insns[i], R1, dst);
insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
if (!match)
insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
return 5 + 1;
}
static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 imm)
{
int op = *(int *)arg;
if (insns) {
bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
int i = 0;
i += __bpf_ld_imm64(&insns[i], R1, dst);
insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
if (!match)
insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
return 5;
}
static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
if (insns) {
bool match = __bpf_match_jmp_cond(dst, src, op);
int i = 0;
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
if (!match)
insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
return 7;
}
static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
struct bpf_insn *insns, s64 dst, s64 src)
{
int op = *(int *)arg;
if (insns) {
bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
int i = 0;
i += __bpf_ld_imm64(&insns[i], R1, dst);
i += __bpf_ld_imm64(&insns[i], R2, src);
insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
if (!match)
insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
insns[i++] = BPF_EXIT_INSN();
return i;
}
return 7;
}
static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 32,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_jmp_imm);
}
static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 32,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_jmp32_imm);
}
static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_jmp_reg);
}
static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
{
return __bpf_fill_pattern(self, &op, 64, 64,
PATTERN_BLOCK1, PATTERN_BLOCK2,
&__bpf_emit_jmp32_reg);
}
/* JMP immediate tests */
static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JSET);
}
static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JEQ);
}
static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JNE);
}
static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JGT);
}
static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JGE);
}
static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JLT);
}
static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JLE);
}
static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JSGT);
}
static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JSGE);
}
static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JSLT);
}
static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
{
return __bpf_fill_jmp_imm(self, BPF_JSLE);
}
/* JMP32 immediate tests */
static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JSET);
}
static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JEQ);
}
static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JNE);
}
static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JGT);
}
static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JGE);
}
static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JLT);
}
static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JLE);
}
static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JSGT);
}
static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JSGE);
}
static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JSLT);
}
static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
{
return __bpf_fill_jmp32_imm(self, BPF_JSLE);
}
/* JMP register tests */
static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JSET);
}
static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JEQ);
}
static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JNE);
}
static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JGT);
}
static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JGE);
}
static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JLT);
}
static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JLE);
}
static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JSGT);
}
static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JSGE);
}
static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JSLT);
}
static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
{
return __bpf_fill_jmp_reg(self, BPF_JSLE);
}
/* JMP32 register tests */
static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JSET);
}
static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JEQ);
}
static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JNE);
}
static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JGT);
}
static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JGE);
}
static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JLT);
}
static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JLE);
}
static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JSGT);
}
static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JSGE);
}
static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JSLT);
}
static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
{
return __bpf_fill_jmp32_reg(self, BPF_JSLE);
}
/*
* Set up a sequence of staggered jumps, forwards and backwards with
* increasing offset. This tests the conversion of relative jumps to
* JITed native jumps. On some architectures, for example MIPS, a large
* PC-relative jump offset may overflow the immediate field of the native
* conditional branch instruction, triggering a conversion to use an
* absolute jump instead. Since this changes the jump offsets, another
* offset computation pass is necessary, and that may in turn trigger
* another branch conversion. This jump sequence is particularly nasty
* in that regard.
*
* The sequence generation is parameterized by size and jump type.
* The size must be even, and the expected result is always size + 1.
* Below is an example with size=8 and result=9.
*
* ________________________Start
* R0 = 0
* R1 = r1
* R2 = r2
* ,------- JMP +4 * 3______________Preamble: 4 insns
* ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
* | | R0 = 8 |
* | | JMP +7 * 3 ------------------------.
* | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------. | |
* | | | R0 = 6 | | |
* | | | JMP +5 * 3 ------------------. | |
* | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------. | | | |
* | | | | R0 = 4 | | | | |
* | | | | JMP +3 * 3 ------------. | | | |
* | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--. | | | | | |
* | | | | | R0 = 2 | | | | | | |
* | | | | | JMP +1 * 3 ------. | | | | | |
* | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1 1 2 3 4 5 6 7 8 loc
* | | | | | R0 = 1 -1 +2 -3 +4 -5 +6 -7 +8 off
* | | | | | JMP -2 * 3 ---' | | | | | | |
* | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----' | | | | | |
* | | | | | | R0 = 3 | | | | | |
* | | | | | | JMP -4 * 3 ---------' | | | | |
* | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------' | | | |
* | | | | | | | R0 = 5 | | | |
* | | | | | | | JMP -6 * 3 ---------------' | | |
* | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------' | |
* | | | | | | | | R0 = 7 | |
* | | Error | | | JMP -8 * 3 ---------------------' |
* | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
* | | | | | | | | | R0 = 9__________________Sequence: 3 * size - 1 insns
* `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
*
*/
/* The maximum size parameter */
#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)
/* We use a reduced number of iterations to get a reasonable execution time */
#define NR_STAGGERED_JMP_RUNS 10
static int __bpf_fill_staggered_jumps(struct bpf_test *self,
const struct bpf_insn *jmp,
u64 r1, u64 r2)
{
int size = self->test[0].result - 1;
int len = 4 + 3 * (size + 1);
struct bpf_insn *insns;
int off, ind;
insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
/* Preamble */
insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);
/* Sequence */
for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
struct bpf_insn *ins = &insns[4 + 3 * ind];
int loc;
if (off == 0)
off--;
loc = abs(off);
ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
3 * (size - ind) + 1);
ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
ins[2] = *jmp;
ins[2].off = 3 * (off - 1);
}
/* Return */
insns[len - 1] = BPF_EXIT_INSN();
self->u.ptr.insns = insns;
self->u.ptr.len = len;
return 0;
}
/* 64-bit unconditional jump */
static int bpf_fill_staggered_ja(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
}
/* 64-bit immediate jumps */
static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
}
static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
}
static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
}
static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}
static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}
static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}
static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}
/* 64-bit register jumps */
static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
}
static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
}
static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
}
static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
}
static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
}
static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
}
static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
}
static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
}
/* 32-bit immediate jumps */
static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
}
static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
}
static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
}
static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}
static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}
static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}
static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}
static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}
/* 32-bit register jumps */
static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
}
static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
}
static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
}
static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
}
static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}
static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
}
static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
}
static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
}
static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
{
struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);
return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
}
static struct bpf_test tests[] = {
{
"TAX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_LEN, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 10, 20, 30, 40, 50 },
{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
},
{
"TXA",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
},
CLASSIC,
{ 10, 20, 30, 40, 50 },
{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
},
{
"ADD_SUB_MUL_K",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
BPF_STMT(BPF_LDX | BPF_IMM, 3),
BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xfffffffd } }
},
{
"DIV_MOD_KX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 8),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x20000000 } }
},
{
"AND_OR_LSH_K",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xff),
BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xf),
BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x800000ff }, { 1, 0x800000ff } },
},
{
"LD_IMM_0",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
},
CLASSIC,
{ },
{ { 1, 1 } },
},
{
"LD_IND",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
BPF_STMT(BPF_RET | BPF_K, 1)
},
CLASSIC,
{ },
{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
},
{
"LD_ABS",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
BPF_STMT(BPF_RET | BPF_K, 1)
},
CLASSIC,
{ },
{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
},
{
"LD_ABS_LL",
.u.insns = {
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 1, 2, 3 },
{ { 1, 0 }, { 2, 3 } },
},
{
"LD_IND_LL",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 1, 2, 3, 0xff },
{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
},
{
"LD_ABS_NET",
.u.insns = {
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
{ { 15, 0 }, { 16, 3 } },
},
{
"LD_IND_NET",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
},
{
"LD_PKTTYPE",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PKTTYPE),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PKTTYPE),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PKTTYPE),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, 3 }, { 10, 3 } },
},
{
"LD_MARK",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_MARK),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, SKB_MARK}, { 10, SKB_MARK} },
},
{
"LD_RXHASH",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_RXHASH),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, SKB_HASH}, { 10, SKB_HASH} },
},
{
"LD_QUEUE",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_QUEUE),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
},
{
"LD_PROTOCOL",
.u.insns = {
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 0),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PROTOCOL),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ 10, 20, 30 },
{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
},
{
"LD_VLAN_TAG",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_VLAN_TAG),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{
{ 1, SKB_VLAN_TCI },
{ 10, SKB_VLAN_TCI }
},
},
{
"LD_VLAN_TAG_PRESENT",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{
{ 1, SKB_VLAN_PRESENT },
{ 10, SKB_VLAN_PRESENT }
},
},
{
"LD_IFINDEX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_IFINDEX),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
},
{
"LD_HATYPE",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_HATYPE),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
},
{
"LD_CPU",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_CPU),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_CPU),
BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, 0 }, { 10, 0 } },
},
{
"LD_NLATTR",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 2),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_LDX | BPF_IMM, 3),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
#ifdef __BIG_ENDIAN
{ 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
#else
{ 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
#endif
{ { 4, 0 }, { 20, 6 } },
},
{
"LD_NLATTR_NEST",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LDX | BPF_IMM, 3),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_NLATTR_NEST),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
#ifdef __BIG_ENDIAN
{ 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
#else
{ 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
#endif
{ { 4, 0 }, { 20, 10 } },
},
{
"LD_PAYLOAD_OFF",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PAY_OFFSET),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PAY_OFFSET),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PAY_OFFSET),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PAY_OFFSET),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_PAY_OFFSET),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
* length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
* id 9737, seq 1, length 64
*/
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x08, 0x00,
0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
{ { 30, 0 }, { 100, 42 } },
},
{
"LD_ANC_XOR",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 10),
BPF_STMT(BPF_LDX | BPF_IMM, 300),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_ALU_XOR_X),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
},
{
"SPILL_FILL",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_IMM, 2),
BPF_STMT(BPF_ALU | BPF_RSH, 1),
BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
BPF_STMT(BPF_STX, 15), /* M3 = len */
BPF_STMT(BPF_LDX | BPF_MEM, 1),
BPF_STMT(BPF_LD | BPF_MEM, 2),
BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 15),
BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
},
{
"JEQ",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 3, 3, 3, 3, 3 },
{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
},
{
"JGT",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 4, 4, 4, 3, 3 },
{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
},
{
"JGE (jt 0), test 1",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 4, 4, 4, 3, 3 },
{ { 2, 0 }, { 3, 1 }, { 4, 1 } },
},
{
"JGE (jt 0), test 2",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 4, 4, 5, 3, 3 },
{ { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
},
{
"JGE",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 10),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 20),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 40),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 1, 2, 3, 4, 5 },
{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
},
{
"JSET",
.u.insns = {
BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
BPF_STMT(BPF_LDX | BPF_LEN, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 10),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 20),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 30),
BPF_STMT(BPF_RET | BPF_K, MAX_K)
},
CLASSIC,
{ 0, 0xAA, 0x55, 1 },
{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
},
{
"tcpdump port 22",
.u.insns = {
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 0xffff),
BPF_STMT(BPF_RET | BPF_K, 0),
},
CLASSIC,
/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
* length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
* seq 1305692979:1305693027, ack 3650467037, win 65535,
* options [nop,nop,TS val 2502645400 ecr 3971138], length 48
*/
{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
0x08, 0x00,
0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
0x0a, 0x01, 0x01, 0x95, /* ip src */
0x0a, 0x01, 0x02, 0x0a, /* ip dst */
0xc2, 0x24,
0x00, 0x16 /* dst port */ },
{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
},
{
"tcpdump complex",
.u.insns = {
/* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
* ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
* (len > 115 or len < 30000000000)' -d
*/
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
BPF_STMT(BPF_ST, 1),
BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
BPF_STMT(BPF_LD | BPF_MEM, 1),
BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
BPF_STMT(BPF_ST, 5),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
BPF_STMT(BPF_LD | BPF_MEM, 5),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
BPF_STMT(BPF_LD | BPF_LEN, 0),
BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 0xffff),
BPF_STMT(BPF_RET | BPF_K, 0),
},
CLASSIC,
{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
0x08, 0x00,
0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
0x0a, 0x01, 0x01, 0x95, /* ip src */
0x0a, 0x01, 0x02, 0x0a, /* ip dst */
0xc2, 0x24,
0x00, 0x16 /* dst port */ },
{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
},
{
"RET_A",
.u.insns = {
/* check that uninitialized X and A contain zeros */
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0)
},
CLASSIC,
{ },
{ {1, 0}, {2, 0} },
},
{
"INT: ADD trivial",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_ADD, R1, 2),
BPF_ALU64_IMM(BPF_MOV, R2, 3),
BPF_ALU64_REG(BPF_SUB, R1, R2),
BPF_ALU64_IMM(BPF_ADD, R1, -1),
BPF_ALU64_IMM(BPF_MUL, R1, 3),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffd } }
},
{
"INT: MUL_X",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_ALU64_IMM(BPF_MOV, R1, -1),
BPF_ALU64_IMM(BPF_MOV, R2, 3),
BPF_ALU64_REG(BPF_MUL, R1, R2),
BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"INT: MUL_X2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -1),
BPF_ALU32_IMM(BPF_MOV, R1, -1),
BPF_ALU32_IMM(BPF_MOV, R2, 3),
BPF_ALU64_REG(BPF_MUL, R1, R2),
BPF_ALU64_IMM(BPF_RSH, R1, 8),
BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"INT: MUL32_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -1),
BPF_ALU64_IMM(BPF_MOV, R1, -1),
BPF_ALU32_IMM(BPF_MOV, R2, 3),
BPF_ALU32_REG(BPF_MUL, R1, R2),
BPF_ALU64_IMM(BPF_RSH, R1, 8),
BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
/* Have to test all register combinations, since
* JITing of different registers will produce
* different asm code.
*/
"INT: ADD 64-bit",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R2, 2),
BPF_ALU64_IMM(BPF_MOV, R3, 3),
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_IMM(BPF_MOV, R5, 5),
BPF_ALU64_IMM(BPF_MOV, R6, 6),
BPF_ALU64_IMM(BPF_MOV, R7, 7),
BPF_ALU64_IMM(BPF_MOV, R8, 8),
BPF_ALU64_IMM(BPF_MOV, R9, 9),
BPF_ALU64_IMM(BPF_ADD, R0, 20),
BPF_ALU64_IMM(BPF_ADD, R1, 20),
BPF_ALU64_IMM(BPF_ADD, R2, 20),
BPF_ALU64_IMM(BPF_ADD, R3, 20),
BPF_ALU64_IMM(BPF_ADD, R4, 20),
BPF_ALU64_IMM(BPF_ADD, R5, 20),
BPF_ALU64_IMM(BPF_ADD, R6, 20),
BPF_ALU64_IMM(BPF_ADD, R7, 20),
BPF_ALU64_IMM(BPF_ADD, R8, 20),
BPF_ALU64_IMM(BPF_ADD, R9, 20),
BPF_ALU64_IMM(BPF_SUB, R0, 10),
BPF_ALU64_IMM(BPF_SUB, R1, 10),
BPF_ALU64_IMM(BPF_SUB, R2, 10),
BPF_ALU64_IMM(BPF_SUB, R3, 10),
BPF_ALU64_IMM(BPF_SUB, R4, 10),
BPF_ALU64_IMM(BPF_SUB, R5, 10),
BPF_ALU64_IMM(BPF_SUB, R6, 10),
BPF_ALU64_IMM(BPF_SUB, R7, 10),
BPF_ALU64_IMM(BPF_SUB, R8, 10),
BPF_ALU64_IMM(BPF_SUB, R9, 10),
BPF_ALU64_REG(BPF_ADD, R0, R0),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_ALU64_REG(BPF_ADD, R0, R2),
BPF_ALU64_REG(BPF_ADD, R0, R3),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_ALU64_REG(BPF_ADD, R0, R5),
BPF_ALU64_REG(BPF_ADD, R0, R6),
BPF_ALU64_REG(BPF_ADD, R0, R7),
BPF_ALU64_REG(BPF_ADD, R0, R8),
BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R1, R0),
BPF_ALU64_REG(BPF_ADD, R1, R1),
BPF_ALU64_REG(BPF_ADD, R1, R2),
BPF_ALU64_REG(BPF_ADD, R1, R3),
BPF_ALU64_REG(BPF_ADD, R1, R4),
BPF_ALU64_REG(BPF_ADD, R1, R5),
BPF_ALU64_REG(BPF_ADD, R1, R6),
BPF_ALU64_REG(BPF_ADD, R1, R7),
BPF_ALU64_REG(BPF_ADD, R1, R8),
BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R2, R0),
BPF_ALU64_REG(BPF_ADD, R2, R1),
BPF_ALU64_REG(BPF_ADD, R2, R2),
BPF_ALU64_REG(BPF_ADD, R2, R3),
BPF_ALU64_REG(BPF_ADD, R2, R4),
BPF_ALU64_REG(BPF_ADD, R2, R5),
BPF_ALU64_REG(BPF_ADD, R2, R6),
BPF_ALU64_REG(BPF_ADD, R2, R7),
BPF_ALU64_REG(BPF_ADD, R2, R8),
BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R3, R0),
BPF_ALU64_REG(BPF_ADD, R3, R1),
BPF_ALU64_REG(BPF_ADD, R3, R2),
BPF_ALU64_REG(BPF_ADD, R3, R3),
BPF_ALU64_REG(BPF_ADD, R3, R4),
BPF_ALU64_REG(BPF_ADD, R3, R5),
BPF_ALU64_REG(BPF_ADD, R3, R6),
BPF_ALU64_REG(BPF_ADD, R3, R7),
BPF_ALU64_REG(BPF_ADD, R3, R8),
BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R4, R0),
BPF_ALU64_REG(BPF_ADD, R4, R1),
BPF_ALU64_REG(BPF_ADD, R4, R2),
BPF_ALU64_REG(BPF_ADD, R4, R3),
BPF_ALU64_REG(BPF_ADD, R4, R4),
BPF_ALU64_REG(BPF_ADD, R4, R5),
BPF_ALU64_REG(BPF_ADD, R4, R6),
BPF_ALU64_REG(BPF_ADD, R4, R7),
BPF_ALU64_REG(BPF_ADD, R4, R8),
BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R5, R0),
BPF_ALU64_REG(BPF_ADD, R5, R1),
BPF_ALU64_REG(BPF_ADD, R5, R2),
BPF_ALU64_REG(BPF_ADD, R5, R3),
BPF_ALU64_REG(BPF_ADD, R5, R4),
BPF_ALU64_REG(BPF_ADD, R5, R5),
BPF_ALU64_REG(BPF_ADD, R5, R6),
BPF_ALU64_REG(BPF_ADD, R5, R7),
BPF_ALU64_REG(BPF_ADD, R5, R8),
BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R6, R0),
BPF_ALU64_REG(BPF_ADD, R6, R1),
BPF_ALU64_REG(BPF_ADD, R6, R2),
BPF_ALU64_REG(BPF_ADD, R6, R3),
BPF_ALU64_REG(BPF_ADD, R6, R4),
BPF_ALU64_REG(BPF_ADD, R6, R5),
BPF_ALU64_REG(BPF_ADD, R6, R6),
BPF_ALU64_REG(BPF_ADD, R6, R7),
BPF_ALU64_REG(BPF_ADD, R6, R8),
BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R7, R0),
BPF_ALU64_REG(BPF_ADD, R7, R1),
BPF_ALU64_REG(BPF_ADD, R7, R2),
BPF_ALU64_REG(BPF_ADD, R7, R3),
BPF_ALU64_REG(BPF_ADD, R7, R4),
BPF_ALU64_REG(BPF_ADD, R7, R5),
BPF_ALU64_REG(BPF_ADD, R7, R6),
BPF_ALU64_REG(BPF_ADD, R7, R7),
BPF_ALU64_REG(BPF_ADD, R7, R8),
BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R8, R0),
BPF_ALU64_REG(BPF_ADD, R8, R1),
BPF_ALU64_REG(BPF_ADD, R8, R2),
BPF_ALU64_REG(BPF_ADD, R8, R3),
BPF_ALU64_REG(BPF_ADD, R8, R4),
BPF_ALU64_REG(BPF_ADD, R8, R5),
BPF_ALU64_REG(BPF_ADD, R8, R6),
BPF_ALU64_REG(BPF_ADD, R8, R7),
BPF_ALU64_REG(BPF_ADD, R8, R8),
BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_ADD, R9, R0),
BPF_ALU64_REG(BPF_ADD, R9, R1),
BPF_ALU64_REG(BPF_ADD, R9, R2),
BPF_ALU64_REG(BPF_ADD, R9, R3),
BPF_ALU64_REG(BPF_ADD, R9, R4),
BPF_ALU64_REG(BPF_ADD, R9, R5),
BPF_ALU64_REG(BPF_ADD, R9, R6),
BPF_ALU64_REG(BPF_ADD, R9, R7),
BPF_ALU64_REG(BPF_ADD, R9, R8),
BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
BPF_ALU64_REG(BPF_MOV, R0, R9),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2957380 } }
},
{
"INT: ADD 32-bit",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 20),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R2, 2),
BPF_ALU32_IMM(BPF_MOV, R3, 3),
BPF_ALU32_IMM(BPF_MOV, R4, 4),
BPF_ALU32_IMM(BPF_MOV, R5, 5),
BPF_ALU32_IMM(BPF_MOV, R6, 6),
BPF_ALU32_IMM(BPF_MOV, R7, 7),
BPF_ALU32_IMM(BPF_MOV, R8, 8),
BPF_ALU32_IMM(BPF_MOV, R9, 9),
BPF_ALU64_IMM(BPF_ADD, R1, 10),
BPF_ALU64_IMM(BPF_ADD, R2, 10),
BPF_ALU64_IMM(BPF_ADD, R3, 10),
BPF_ALU64_IMM(BPF_ADD, R4, 10),
BPF_ALU64_IMM(BPF_ADD, R5, 10),
BPF_ALU64_IMM(BPF_ADD, R6, 10),
BPF_ALU64_IMM(BPF_ADD, R7, 10),
BPF_ALU64_IMM(BPF_ADD, R8, 10),
BPF_ALU64_IMM(BPF_ADD, R9, 10),
BPF_ALU32_REG(BPF_ADD, R0, R1),
BPF_ALU32_REG(BPF_ADD, R0, R2),
BPF_ALU32_REG(BPF_ADD, R0, R3),
BPF_ALU32_REG(BPF_ADD, R0, R4),
BPF_ALU32_REG(BPF_ADD, R0, R5),
BPF_ALU32_REG(BPF_ADD, R0, R6),
BPF_ALU32_REG(BPF_ADD, R0, R7),
BPF_ALU32_REG(BPF_ADD, R0, R8),
BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R1, R0),
BPF_ALU32_REG(BPF_ADD, R1, R1),
BPF_ALU32_REG(BPF_ADD, R1, R2),
BPF_ALU32_REG(BPF_ADD, R1, R3),
BPF_ALU32_REG(BPF_ADD, R1, R4),
BPF_ALU32_REG(BPF_ADD, R1, R5),
BPF_ALU32_REG(BPF_ADD, R1, R6),
BPF_ALU32_REG(BPF_ADD, R1, R7),
BPF_ALU32_REG(BPF_ADD, R1, R8),
BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R2, R0),
BPF_ALU32_REG(BPF_ADD, R2, R1),
BPF_ALU32_REG(BPF_ADD, R2, R2),
BPF_ALU32_REG(BPF_ADD, R2, R3),
BPF_ALU32_REG(BPF_ADD, R2, R4),
BPF_ALU32_REG(BPF_ADD, R2, R5),
BPF_ALU32_REG(BPF_ADD, R2, R6),
BPF_ALU32_REG(BPF_ADD, R2, R7),
BPF_ALU32_REG(BPF_ADD, R2, R8),
BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R3, R0),
BPF_ALU32_REG(BPF_ADD, R3, R1),
BPF_ALU32_REG(BPF_ADD, R3, R2),
BPF_ALU32_REG(BPF_ADD, R3, R3),
BPF_ALU32_REG(BPF_ADD, R3, R4),
BPF_ALU32_REG(BPF_ADD, R3, R5),
BPF_ALU32_REG(BPF_ADD, R3, R6),
BPF_ALU32_REG(BPF_ADD, R3, R7),
BPF_ALU32_REG(BPF_ADD, R3, R8),
BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R4, R0),
BPF_ALU32_REG(BPF_ADD, R4, R1),
BPF_ALU32_REG(BPF_ADD, R4, R2),
BPF_ALU32_REG(BPF_ADD, R4, R3),
BPF_ALU32_REG(BPF_ADD, R4, R4),
BPF_ALU32_REG(BPF_ADD, R4, R5),
BPF_ALU32_REG(BPF_ADD, R4, R6),
BPF_ALU32_REG(BPF_ADD, R4, R7),
BPF_ALU32_REG(BPF_ADD, R4, R8),
BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R5, R0),
BPF_ALU32_REG(BPF_ADD, R5, R1),
BPF_ALU32_REG(BPF_ADD, R5, R2),
BPF_ALU32_REG(BPF_ADD, R5, R3),
BPF_ALU32_REG(BPF_ADD, R5, R4),
BPF_ALU32_REG(BPF_ADD, R5, R5),
BPF_ALU32_REG(BPF_ADD, R5, R6),
BPF_ALU32_REG(BPF_ADD, R5, R7),
BPF_ALU32_REG(BPF_ADD, R5, R8),
BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R6, R0),
BPF_ALU32_REG(BPF_ADD, R6, R1),
BPF_ALU32_REG(BPF_ADD, R6, R2),
BPF_ALU32_REG(BPF_ADD, R6, R3),
BPF_ALU32_REG(BPF_ADD, R6, R4),
BPF_ALU32_REG(BPF_ADD, R6, R5),
BPF_ALU32_REG(BPF_ADD, R6, R6),
BPF_ALU32_REG(BPF_ADD, R6, R7),
BPF_ALU32_REG(BPF_ADD, R6, R8),
BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R7, R0),
BPF_ALU32_REG(BPF_ADD, R7, R1),
BPF_ALU32_REG(BPF_ADD, R7, R2),
BPF_ALU32_REG(BPF_ADD, R7, R3),
BPF_ALU32_REG(BPF_ADD, R7, R4),
BPF_ALU32_REG(BPF_ADD, R7, R5),
BPF_ALU32_REG(BPF_ADD, R7, R6),
BPF_ALU32_REG(BPF_ADD, R7, R7),
BPF_ALU32_REG(BPF_ADD, R7, R8),
BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R8, R0),
BPF_ALU32_REG(BPF_ADD, R8, R1),
BPF_ALU32_REG(BPF_ADD, R8, R2),
BPF_ALU32_REG(BPF_ADD, R8, R3),
BPF_ALU32_REG(BPF_ADD, R8, R4),
BPF_ALU32_REG(BPF_ADD, R8, R5),
BPF_ALU32_REG(BPF_ADD, R8, R6),
BPF_ALU32_REG(BPF_ADD, R8, R7),
BPF_ALU32_REG(BPF_ADD, R8, R8),
BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
BPF_EXIT_INSN(),
BPF_ALU32_REG(BPF_ADD, R9, R0),
BPF_ALU32_REG(BPF_ADD, R9, R1),
BPF_ALU32_REG(BPF_ADD, R9, R2),
BPF_ALU32_REG(BPF_ADD, R9, R3),
BPF_ALU32_REG(BPF_ADD, R9, R4),
BPF_ALU32_REG(BPF_ADD, R9, R5),
BPF_ALU32_REG(BPF_ADD, R9, R6),
BPF_ALU32_REG(BPF_ADD, R9, R7),
BPF_ALU32_REG(BPF_ADD, R9, R8),
BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
BPF_ALU32_REG(BPF_MOV, R0, R9),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2957380 } }
},
{ /* Mainly checking JIT here. */
"INT: SUB",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R2, 2),
BPF_ALU64_IMM(BPF_MOV, R3, 3),
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_IMM(BPF_MOV, R5, 5),
BPF_ALU64_IMM(BPF_MOV, R6, 6),
BPF_ALU64_IMM(BPF_MOV, R7, 7),
BPF_ALU64_IMM(BPF_MOV, R8, 8),
BPF_ALU64_IMM(BPF_MOV, R9, 9),
BPF_ALU64_REG(BPF_SUB, R0, R0),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_ALU64_REG(BPF_SUB, R0, R3),
BPF_ALU64_REG(BPF_SUB, R0, R4),
BPF_ALU64_REG(BPF_SUB, R0, R5),
BPF_ALU64_REG(BPF_SUB, R0, R6),
BPF_ALU64_REG(BPF_SUB, R0, R7),
BPF_ALU64_REG(BPF_SUB, R0, R8),
BPF_ALU64_REG(BPF_SUB, R0, R9),
BPF_ALU64_IMM(BPF_SUB, R0, 10),
BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R1, R0),
BPF_ALU64_REG(BPF_SUB, R1, R2),
BPF_ALU64_REG(BPF_SUB, R1, R3),
BPF_ALU64_REG(BPF_SUB, R1, R4),
BPF_ALU64_REG(BPF_SUB, R1, R5),
BPF_ALU64_REG(BPF_SUB, R1, R6),
BPF_ALU64_REG(BPF_SUB, R1, R7),
BPF_ALU64_REG(BPF_SUB, R1, R8),
BPF_ALU64_REG(BPF_SUB, R1, R9),
BPF_ALU64_IMM(BPF_SUB, R1, 10),
BPF_ALU64_REG(BPF_SUB, R2, R0),
BPF_ALU64_REG(BPF_SUB, R2, R1),
BPF_ALU64_REG(BPF_SUB, R2, R3),
BPF_ALU64_REG(BPF_SUB, R2, R4),
BPF_ALU64_REG(BPF_SUB, R2, R5),
BPF_ALU64_REG(BPF_SUB, R2, R6),
BPF_ALU64_REG(BPF_SUB, R2, R7),
BPF_ALU64_REG(BPF_SUB, R2, R8),
BPF_ALU64_REG(BPF_SUB, R2, R9),
BPF_ALU64_IMM(BPF_SUB, R2, 10),
BPF_ALU64_REG(BPF_SUB, R3, R0),
BPF_ALU64_REG(BPF_SUB, R3, R1),
BPF_ALU64_REG(BPF_SUB, R3, R2),
BPF_ALU64_REG(BPF_SUB, R3, R4),
BPF_ALU64_REG(BPF_SUB, R3, R5),
BPF_ALU64_REG(BPF_SUB, R3, R6),
BPF_ALU64_REG(BPF_SUB, R3, R7),
BPF_ALU64_REG(BPF_SUB, R3, R8),
BPF_ALU64_REG(BPF_SUB, R3, R9),
BPF_ALU64_IMM(BPF_SUB, R3, 10),
BPF_ALU64_REG(BPF_SUB, R4, R0),
BPF_ALU64_REG(BPF_SUB, R4, R1),
BPF_ALU64_REG(BPF_SUB, R4, R2),
BPF_ALU64_REG(BPF_SUB, R4, R3),
BPF_ALU64_REG(BPF_SUB, R4, R5),
BPF_ALU64_REG(BPF_SUB, R4, R6),
BPF_ALU64_REG(BPF_SUB, R4, R7),
BPF_ALU64_REG(BPF_SUB, R4, R8),
BPF_ALU64_REG(BPF_SUB, R4, R9),
BPF_ALU64_IMM(BPF_SUB, R4, 10),
BPF_ALU64_REG(BPF_SUB, R5, R0),
BPF_ALU64_REG(BPF_SUB, R5, R1),
BPF_ALU64_REG(BPF_SUB, R5, R2),
BPF_ALU64_REG(BPF_SUB, R5, R3),
BPF_ALU64_REG(BPF_SUB, R5, R4),
BPF_ALU64_REG(BPF_SUB, R5, R6),
BPF_ALU64_REG(BPF_SUB, R5, R7),
BPF_ALU64_REG(BPF_SUB, R5, R8),
BPF_ALU64_REG(BPF_SUB, R5, R9),
BPF_ALU64_IMM(BPF_SUB, R5, 10),
BPF_ALU64_REG(BPF_SUB, R6, R0),
BPF_ALU64_REG(BPF_SUB, R6, R1),
BPF_ALU64_REG(BPF_SUB, R6, R2),
BPF_ALU64_REG(BPF_SUB, R6, R3),
BPF_ALU64_REG(BPF_SUB, R6, R4),
BPF_ALU64_REG(BPF_SUB, R6, R5),
BPF_ALU64_REG(BPF_SUB, R6, R7),
BPF_ALU64_REG(BPF_SUB, R6, R8),
BPF_ALU64_REG(BPF_SUB, R6, R9),
BPF_ALU64_IMM(BPF_SUB, R6, 10),
BPF_ALU64_REG(BPF_SUB, R7, R0),
BPF_ALU64_REG(BPF_SUB, R7, R1),
BPF_ALU64_REG(BPF_SUB, R7, R2),
BPF_ALU64_REG(BPF_SUB, R7, R3),
BPF_ALU64_REG(BPF_SUB, R7, R4),
BPF_ALU64_REG(BPF_SUB, R7, R5),
BPF_ALU64_REG(BPF_SUB, R7, R6),
BPF_ALU64_REG(BPF_SUB, R7, R8),
BPF_ALU64_REG(BPF_SUB, R7, R9),
BPF_ALU64_IMM(BPF_SUB, R7, 10),
BPF_ALU64_REG(BPF_SUB, R8, R0),
BPF_ALU64_REG(BPF_SUB, R8, R1),
BPF_ALU64_REG(BPF_SUB, R8, R2),
BPF_ALU64_REG(BPF_SUB, R8, R3),
BPF_ALU64_REG(BPF_SUB, R8, R4),
BPF_ALU64_REG(BPF_SUB, R8, R5),
BPF_ALU64_REG(BPF_SUB, R8, R6),
BPF_ALU64_REG(BPF_SUB, R8, R7),
BPF_ALU64_REG(BPF_SUB, R8, R9),
BPF_ALU64_IMM(BPF_SUB, R8, 10),
BPF_ALU64_REG(BPF_SUB, R9, R0),
BPF_ALU64_REG(BPF_SUB, R9, R1),
BPF_ALU64_REG(BPF_SUB, R9, R2),
BPF_ALU64_REG(BPF_SUB, R9, R3),
BPF_ALU64_REG(BPF_SUB, R9, R4),
BPF_ALU64_REG(BPF_SUB, R9, R5),
BPF_ALU64_REG(BPF_SUB, R9, R6),
BPF_ALU64_REG(BPF_SUB, R9, R7),
BPF_ALU64_REG(BPF_SUB, R9, R8),
BPF_ALU64_IMM(BPF_SUB, R9, 10),
BPF_ALU64_IMM(BPF_SUB, R0, 10),
BPF_ALU64_IMM(BPF_NEG, R0, 0),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_ALU64_REG(BPF_SUB, R0, R3),
BPF_ALU64_REG(BPF_SUB, R0, R4),
BPF_ALU64_REG(BPF_SUB, R0, R5),
BPF_ALU64_REG(BPF_SUB, R0, R6),
BPF_ALU64_REG(BPF_SUB, R0, R7),
BPF_ALU64_REG(BPF_SUB, R0, R8),
BPF_ALU64_REG(BPF_SUB, R0, R9),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 11 } }
},
{ /* Mainly checking JIT here. */
"INT: XOR",
.u.insns_int = {
BPF_ALU64_REG(BPF_SUB, R0, R0),
BPF_ALU64_REG(BPF_XOR, R1, R1),
BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, R0, 10),
BPF_ALU64_IMM(BPF_MOV, R1, -1),
BPF_ALU64_REG(BPF_SUB, R1, R1),
BPF_ALU64_REG(BPF_XOR, R2, R2),
BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R2, R2),
BPF_ALU64_REG(BPF_XOR, R3, R3),
BPF_ALU64_IMM(BPF_MOV, R0, 10),
BPF_ALU64_IMM(BPF_MOV, R1, -1),
BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R3, R3),
BPF_ALU64_REG(BPF_XOR, R4, R4),
BPF_ALU64_IMM(BPF_MOV, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R5, -1),
BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R4, R4),
BPF_ALU64_REG(BPF_XOR, R5, R5),
BPF_ALU64_IMM(BPF_MOV, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R7, -1),
BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, R5, 1),
BPF_ALU64_REG(BPF_SUB, R5, R5),
BPF_ALU64_REG(BPF_XOR, R6, R6),
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R8, -1),
BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R6, R6),
BPF_ALU64_REG(BPF_XOR, R7, R7),
BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R7, R7),
BPF_ALU64_REG(BPF_XOR, R8, R8),
BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R8, R8),
BPF_ALU64_REG(BPF_XOR, R9, R9),
BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R9, R9),
BPF_ALU64_REG(BPF_XOR, R0, R0),
BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, R1, R1),
BPF_ALU64_REG(BPF_XOR, R0, R0),
BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{ /* Mainly checking JIT here. */
"INT: MUL",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 11),
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R2, 2),
BPF_ALU64_IMM(BPF_MOV, R3, 3),
BPF_ALU64_IMM(BPF_MOV, R4, 4),
BPF_ALU64_IMM(BPF_MOV, R5, 5),
BPF_ALU64_IMM(BPF_MOV, R6, 6),
BPF_ALU64_IMM(BPF_MOV, R7, 7),
BPF_ALU64_IMM(BPF_MOV, R8, 8),
BPF_ALU64_IMM(BPF_MOV, R9, 9),
BPF_ALU64_REG(BPF_MUL, R0, R0),
BPF_ALU64_REG(BPF_MUL, R0, R1),
BPF_ALU64_REG(BPF_MUL, R0, R2),
BPF_ALU64_REG(BPF_MUL, R0, R3),
BPF_ALU64_REG(BPF_MUL, R0, R4),
BPF_ALU64_REG(BPF_MUL, R0, R5),
BPF_ALU64_REG(BPF_MUL, R0, R6),
BPF_ALU64_REG(BPF_MUL, R0, R7),
BPF_ALU64_REG(BPF_MUL, R0, R8),
BPF_ALU64_REG(BPF_MUL, R0, R9),
BPF_ALU64_IMM(BPF_MUL, R0, 10),
BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_MUL, R1, R0),
BPF_ALU64_REG(BPF_MUL, R1, R2),
BPF_ALU64_REG(BPF_MUL, R1, R3),
BPF_ALU64_REG(BPF_MUL, R1, R4),
BPF_ALU64_REG(BPF_MUL, R1, R5),
BPF_ALU64_REG(BPF_MUL, R1, R6),
BPF_ALU64_REG(BPF_MUL, R1, R7),
BPF_ALU64_REG(BPF_MUL, R1, R8),
BPF_ALU64_REG(BPF_MUL, R1, R9),
BPF_ALU64_IMM(BPF_MUL, R1, 10),
BPF_ALU64_REG(BPF_MOV, R2, R1),
BPF_ALU64_IMM(BPF_RSH, R2, 32),
BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_LSH, R1, 32),
BPF_ALU64_IMM(BPF_ARSH, R1, 32),
BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_MUL, R2, R0),
BPF_ALU64_REG(BPF_MUL, R2, R1),
BPF_ALU64_REG(BPF_MUL, R2, R3),
BPF_ALU64_REG(BPF_MUL, R2, R4),
BPF_ALU64_REG(BPF_MUL, R2, R5),
BPF_ALU64_REG(BPF_MUL, R2, R6),
BPF_ALU64_REG(BPF_MUL, R2, R7),
BPF_ALU64_REG(BPF_MUL, R2, R8),
BPF_ALU64_REG(BPF_MUL, R2, R9),
BPF_ALU64_IMM(BPF_MUL, R2, 10),
BPF_ALU64_IMM(BPF_RSH, R2, 32),
BPF_ALU64_REG(BPF_MOV, R0, R2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x35d97ef2 } }
},
{ /* Mainly checking JIT here. */
"MOV REG64",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
BPF_MOV64_REG(R1, R0),
BPF_MOV64_REG(R2, R1),
BPF_MOV64_REG(R3, R2),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_MOV64_REG(R6, R5),
BPF_MOV64_REG(R7, R6),
BPF_MOV64_REG(R8, R7),
BPF_MOV64_REG(R9, R8),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_ALU64_IMM(BPF_MOV, R1, 0),
BPF_ALU64_IMM(BPF_MOV, R2, 0),
BPF_ALU64_IMM(BPF_MOV, R3, 0),
BPF_ALU64_IMM(BPF_MOV, R4, 0),
BPF_ALU64_IMM(BPF_MOV, R5, 0),
BPF_ALU64_IMM(BPF_MOV, R6, 0),
BPF_ALU64_IMM(BPF_MOV, R7, 0),
BPF_ALU64_IMM(BPF_MOV, R8, 0),
BPF_ALU64_IMM(BPF_MOV, R9, 0),
BPF_ALU64_REG(BPF_ADD, R0, R0),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_ALU64_REG(BPF_ADD, R0, R2),
BPF_ALU64_REG(BPF_ADD, R0, R3),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_ALU64_REG(BPF_ADD, R0, R5),
BPF_ALU64_REG(BPF_ADD, R0, R6),
BPF_ALU64_REG(BPF_ADD, R0, R7),
BPF_ALU64_REG(BPF_ADD, R0, R8),
BPF_ALU64_REG(BPF_ADD, R0, R9),
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfefe } }
},
{ /* Mainly checking JIT here. */
"MOV REG32",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
BPF_MOV64_REG(R1, R0),
BPF_MOV64_REG(R2, R1),
BPF_MOV64_REG(R3, R2),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_MOV64_REG(R6, R5),
BPF_MOV64_REG(R7, R6),
BPF_MOV64_REG(R8, R7),
BPF_MOV64_REG(R9, R8),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU32_IMM(BPF_MOV, R2, 0),
BPF_ALU32_IMM(BPF_MOV, R3, 0),
BPF_ALU32_IMM(BPF_MOV, R4, 0),
BPF_ALU32_IMM(BPF_MOV, R5, 0),
BPF_ALU32_IMM(BPF_MOV, R6, 0),
BPF_ALU32_IMM(BPF_MOV, R7, 0),
BPF_ALU32_IMM(BPF_MOV, R8, 0),
BPF_ALU32_IMM(BPF_MOV, R9, 0),
BPF_ALU64_REG(BPF_ADD, R0, R0),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_ALU64_REG(BPF_ADD, R0, R2),
BPF_ALU64_REG(BPF_ADD, R0, R3),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_ALU64_REG(BPF_ADD, R0, R5),
BPF_ALU64_REG(BPF_ADD, R0, R6),
BPF_ALU64_REG(BPF_ADD, R0, R7),
BPF_ALU64_REG(BPF_ADD, R0, R8),
BPF_ALU64_REG(BPF_ADD, R0, R9),
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfefe } }
},
{ /* Mainly checking JIT here. */
"LD IMM64",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
BPF_MOV64_REG(R1, R0),
BPF_MOV64_REG(R2, R1),
BPF_MOV64_REG(R3, R2),
BPF_MOV64_REG(R4, R3),
BPF_MOV64_REG(R5, R4),
BPF_MOV64_REG(R6, R5),
BPF_MOV64_REG(R7, R6),
BPF_MOV64_REG(R8, R7),
BPF_MOV64_REG(R9, R8),
BPF_LD_IMM64(R0, 0x0LL),
BPF_LD_IMM64(R1, 0x0LL),
BPF_LD_IMM64(R2, 0x0LL),
BPF_LD_IMM64(R3, 0x0LL),
BPF_LD_IMM64(R4, 0x0LL),
BPF_LD_IMM64(R5, 0x0LL),
BPF_LD_IMM64(R6, 0x0LL),
BPF_LD_IMM64(R7, 0x0LL),
BPF_LD_IMM64(R8, 0x0LL),
BPF_LD_IMM64(R9, 0x0LL),
BPF_ALU64_REG(BPF_ADD, R0, R0),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_ALU64_REG(BPF_ADD, R0, R2),
BPF_ALU64_REG(BPF_ADD, R0, R3),
BPF_ALU64_REG(BPF_ADD, R0, R4),
BPF_ALU64_REG(BPF_ADD, R0, R5),
BPF_ALU64_REG(BPF_ADD, R0, R6),
BPF_ALU64_REG(BPF_ADD, R0, R7),
BPF_ALU64_REG(BPF_ADD, R0, R8),
BPF_ALU64_REG(BPF_ADD, R0, R9),
BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfefe } }
},
{
"INT: ALU MIX",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 11),
BPF_ALU64_IMM(BPF_ADD, R0, -1),
BPF_ALU64_IMM(BPF_MOV, R2, 2),
BPF_ALU64_IMM(BPF_XOR, R2, 3),
BPF_ALU64_REG(BPF_DIV, R0, R2),
BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOD, R0, 3),
BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
{
"INT: shifts by register",
.u.insns_int = {
BPF_MOV64_IMM(R0, -1234),
BPF_MOV64_IMM(R1, 1),
BPF_ALU32_REG(BPF_RSH, R0, R1),
BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(R2, 1),
BPF_ALU64_REG(BPF_LSH, R0, R2),
BPF_MOV32_IMM(R4, -1234),
BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_AND, R4, 63),
BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
BPF_MOV64_IMM(R3, 47),
BPF_ALU64_REG(BPF_ARSH, R0, R3),
BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(R2, 1),
BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(R4, 4),
BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(R4, 5),
BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(R0, -1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
#ifdef CONFIG_32BIT
{
"INT: 32-bit context pointer word order and zero-extension",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
#endif
{
"check: missing ret",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: div_k_0",
.u.insns = {
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
BPF_STMT(BPF_RET | BPF_K, 0)
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: unknown insn",
.u.insns = {
/* seccomp insn, rejected in socket filter */
BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
BPF_STMT(BPF_RET | BPF_K, 0)
},
CLASSIC | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: out of range spill/fill",
.u.insns = {
BPF_STMT(BPF_STX, 16),
BPF_STMT(BPF_RET | BPF_K, 0)
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"JUMPS + HOLES",
.u.insns = {
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC,
{ 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
0x08, 0x00,
0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
0xc0, 0xa8, 0x33, 0x01,
0xc0, 0xa8, 0x33, 0x02,
0xbb, 0xb6,
0xa9, 0xfa,
0x00, 0x14, 0x00, 0x00,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0xcc },
{ { 88, 0x001b } }
},
{
"check: RET X",
.u.insns = {
BPF_STMT(BPF_RET | BPF_X, 0),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{
"check: LDX + RET X",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 42),
BPF_STMT(BPF_RET | BPF_X, 0),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"M[]: alt STX + LDX",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 100),
BPF_STMT(BPF_STX, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 1),
BPF_STMT(BPF_LDX | BPF_MEM, 1),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 2),
BPF_STMT(BPF_LDX | BPF_MEM, 2),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 3),
BPF_STMT(BPF_LDX | BPF_MEM, 3),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 4),
BPF_STMT(BPF_LDX | BPF_MEM, 4),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 5),
BPF_STMT(BPF_LDX | BPF_MEM, 5),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 6),
BPF_STMT(BPF_LDX | BPF_MEM, 6),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 7),
BPF_STMT(BPF_LDX | BPF_MEM, 7),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 8),
BPF_STMT(BPF_LDX | BPF_MEM, 8),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 9),
BPF_STMT(BPF_LDX | BPF_MEM, 9),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 10),
BPF_STMT(BPF_LDX | BPF_MEM, 10),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 11),
BPF_STMT(BPF_LDX | BPF_MEM, 11),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 12),
BPF_STMT(BPF_LDX | BPF_MEM, 12),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 13),
BPF_STMT(BPF_LDX | BPF_MEM, 13),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 14),
BPF_STMT(BPF_LDX | BPF_MEM, 14),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_STX, 15),
BPF_STMT(BPF_LDX | BPF_MEM, 15),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 116 } },
},
{ /* Mainly checking JIT here. */
"M[]: full STX + full LDX",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
BPF_STMT(BPF_STX, 0),
BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
BPF_STMT(BPF_STX, 1),
BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
BPF_STMT(BPF_STX, 2),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
BPF_STMT(BPF_STX, 3),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
BPF_STMT(BPF_STX, 4),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
BPF_STMT(BPF_STX, 5),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
BPF_STMT(BPF_STX, 6),
BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
BPF_STMT(BPF_STX, 7),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
BPF_STMT(BPF_STX, 8),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
BPF_STMT(BPF_STX, 9),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
BPF_STMT(BPF_STX, 10),
BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
BPF_STMT(BPF_STX, 11),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
BPF_STMT(BPF_STX, 12),
BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
BPF_STMT(BPF_STX, 13),
BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
BPF_STMT(BPF_STX, 14),
BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
BPF_STMT(BPF_STX, 15),
BPF_STMT(BPF_LDX | BPF_MEM, 0),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 1),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 2),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 3),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 4),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 5),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 6),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 7),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 8),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 9),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 10),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 11),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 12),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 13),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 14),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_LDX | BPF_MEM, 15),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x2a5a5e5 } },
},
{
"check: SKF_AD_MAX",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF + SKF_AD_MAX),
BPF_STMT(BPF_RET | BPF_A, 0),
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = NULL,
.expected_errcode = -EINVAL,
},
{ /* Passes checker but fails during runtime. */
"LD [SKF_AD_OFF-1]",
.u.insns = {
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
SKF_AD_OFF - 1),
BPF_STMT(BPF_RET | BPF_K, 1),
},
CLASSIC,
{ },
{ { 1, 0 } },
},
{
"load 64-bit immediate",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x567800001234LL),
BPF_MOV64_REG(R2, R1),
BPF_MOV64_REG(R3, R2),
BPF_ALU64_IMM(BPF_RSH, R2, 32),
BPF_ALU64_IMM(BPF_LSH, R3, 32),
BPF_ALU64_IMM(BPF_RSH, R3, 32),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(R0, 0x1ffffffffLL),
BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
/* BPF_ALU | BPF_MOV | BPF_X */
{
"ALU_MOV_X: dst = 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_MOV_X: dst = 4294967295",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
BPF_ALU32_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
{
"ALU64_MOV_X: dst = 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_MOV_X: dst = 4294967295",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
/* BPF_ALU | BPF_MOV | BPF_K */
{
"ALU_MOV_K: dst = 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_MOV_K: dst = 4294967295",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
{
"ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_MOV_K: small negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"ALU_MOV_K: small negative zero extension",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU_MOV_K: large negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123456789 } }
},
{
"ALU_MOV_K: large negative zero extension",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_MOV_K: dst = 2",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_MOV_K: dst = 2147483647",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2147483647 } },
},
{
"ALU64_OR_K: dst = 0x0",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0),
BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MOV_K: dst = -1",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MOV_K: small negative",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, -123),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"ALU64_MOV_K: small negative sign extension",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, -123),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } }
},
{
"ALU64_MOV_K: large negative",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123456789 } }
},
{
"ALU64_MOV_K: large negative sign extension",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } }
},
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_ADD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_ADD_X: 1 + 4294967294 = 4294967295",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
BPF_ALU32_REG(BPF_ADD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
{
"ALU_ADD_X: 2 + 4294967294 = 0",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_LD_IMM64(R1, 4294967294U),
BPF_ALU32_REG(BPF_ADD, R0, R1),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_ADD_X: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_ADD_X: 1 + 4294967294 = 4294967295",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
{
"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_LD_IMM64(R1, 4294967294U),
BPF_LD_IMM64(R2, 4294967296ULL),
BPF_ALU64_REG(BPF_ADD, R0, R1),
BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
BPF_MOV32_IMM(R0, 0),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_ALU | BPF_ADD | BPF_K */
{
"ALU_ADD_K: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_ADD_K: 3 + 0 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_ADD, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_ADD_K: 1 + 4294967294 = 4294967295",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4294967295U } },
},
{
"ALU_ADD_K: 4294967294 + 2 = 0",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967294U),
BPF_ALU32_IMM(BPF_ADD, R0, 2),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0x00000000ffffffff),
BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_ADD_K: 0 + 0xffff = 0xffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0xffff),
BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0x7fffffff),
BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0x80000000),
BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0x80008000),
BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_ADD_K: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_ADD, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_ADD_K: 3 + 0 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_ADD, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_ADD_K: 1 + 2147483646 = 2147483647",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2147483647 } },
},
{
"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967294U),
BPF_LD_IMM64(R1, 4294967296ULL),
BPF_ALU64_IMM(BPF_ADD, R0, 2),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483646),
BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
{
"ALU64_ADD_K: 1 + 0 = 1",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x1),
BPF_LD_IMM64(R3, 0x1),
BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_ADD_K: 0 + 0xffff = 0xffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0xffff),
BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0x7fffffff),
BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0xffffffff80000000LL),
BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
BPF_LD_IMM64(R3, 0xffffffff80008000LL),
BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* BPF_ALU | BPF_SUB | BPF_X */
{
"ALU_SUB_X: 3 - 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU32_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_SUB_X: 4294967295 - 4294967294 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
BPF_ALU32_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_SUB_X: 3 - 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_SUB_X: 4294967295 - 4294967294 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_ALU | BPF_SUB | BPF_K */
{
"ALU_SUB_K: 3 - 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_SUB, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_SUB_K: 3 - 0 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_SUB, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_SUB_K: 4294967295 - 4294967294 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_SUB_K: 3 - 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_SUB, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_SUB_K: 3 - 0 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_SUB, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_SUB_K: 4294967294 - 4294967295 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967294U),
BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
{
"ALU64_ADD_K: 2147483646 - 2147483647 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483646),
BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
/* BPF_ALU | BPF_MUL | BPF_X */
{
"ALU_MUL_X: 2 * 3 = 6",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 3),
BPF_ALU32_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 6 } },
},
{
"ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
BPF_ALU32_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xFFFFFFF0 } },
},
{
"ALU_MUL_X: -1 * -1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, -1),
BPF_ALU32_IMM(BPF_MOV, R1, -1),
BPF_ALU32_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_MUL_X: 2 * 3 = 6",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 3),
BPF_ALU64_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 6 } },
},
{
"ALU64_MUL_X: 1 * 2147483647 = 2147483647",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
BPF_ALU64_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2147483647 } },
},
{
"ALU64_MUL_X: 64x64 multiply, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
BPF_ALU64_REG(BPF_MUL, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xe5618cf0 } }
},
{
"ALU64_MUL_X: 64x64 multiply, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
BPF_ALU64_REG(BPF_MUL, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x2236d88f } }
},
/* BPF_ALU | BPF_MUL | BPF_K */
{
"ALU_MUL_K: 2 * 3 = 6",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MUL, R0, 3),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 6 } },
},
{
"ALU_MUL_K: 3 * 1 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MUL, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xFFFFFFF0 } },
},
{
"ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x1),
BPF_LD_IMM64(R3, 0x00000000ffffffff),
BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MUL_K: 2 * 3 = 6",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU64_IMM(BPF_MUL, R0, 3),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 6 } },
},
{
"ALU64_MUL_K: 3 * 1 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_MUL, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_MUL_K: 1 * 2147483647 = 2147483647",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2147483647 } },
},
{
"ALU64_MUL_K: 1 * -2147483647 = -2147483647",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -2147483647 } },
},
{
"ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x1),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MUL_K: 64x32 multiply, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xe242d208 } }
},
{
"ALU64_MUL_K: 64x32 multiply, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xc28f5c28 } }
},
/* BPF_ALU | BPF_DIV | BPF_X */
{
"ALU_DIV_X: 6 / 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 6),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_DIV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_DIV_X: 4294967295 / 4294967295 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
BPF_ALU32_REG(BPF_DIV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_DIV_X: 6 / 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 6),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_DIV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_DIV_X: 2147483647 / 2147483647 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483647),
BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
BPF_ALU64_REG(BPF_DIV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
BPF_LD_IMM64(R3, 0x0000000000000001LL),
BPF_ALU64_REG(BPF_DIV, R2, R4),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* BPF_ALU | BPF_DIV | BPF_K */
{
"ALU_DIV_K: 6 / 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 6),
BPF_ALU32_IMM(BPF_DIV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_DIV_K: 3 / 1 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_DIV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_DIV_K: 4294967295 / 4294967295 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
BPF_LD_IMM64(R3, 0x1UL),
BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_DIV_K: 6 / 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 6),
BPF_ALU64_IMM(BPF_DIV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_DIV_K: 3 / 1 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_DIV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_DIV_K: 2147483647 / 2147483647 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483647),
BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
BPF_LD_IMM64(R3, 0x0000000000000001LL),
BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* BPF_ALU | BPF_MOD | BPF_X */
{
"ALU_MOD_X: 3 % 2 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_MOD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_MOD_X: 4294967295 % 4294967293 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
BPF_ALU32_REG(BPF_MOD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_MOD_X: 3 % 2 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_MOD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_MOD_X: 2147483647 % 2147483645 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483647),
BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
BPF_ALU64_REG(BPF_MOD, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
/* BPF_ALU | BPF_MOD | BPF_K */
{
"ALU_MOD_K: 3 % 2 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOD, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_MOD_K: 3 % 1 = 0",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOD, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
},
{
"ALU_MOD_K: 4294967295 % 4294967293 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 4294967295U),
BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_MOD_K: 3 % 2 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_MOD, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_MOD_K: 3 % 1 = 0",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_MOD, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
},
{
"ALU64_MOD_K: 2147483647 % 2147483645 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483647),
BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
/* BPF_ALU | BPF_AND | BPF_X */
{
"ALU_AND_X: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_AND, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffff),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU32_REG(BPF_AND, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU64_AND_X: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_AND, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffff),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU64_REG(BPF_AND, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
/* BPF_ALU | BPF_AND | BPF_K */
{
"ALU_AND_K: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU32_IMM(BPF_AND, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffff),
BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU_AND_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
BPF_ALU32_IMM(BPF_AND, R0, 15),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 4 } }
},
{
"ALU_AND_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xa1b2c3d4 } }
},
{
"ALU_AND_K: Zero extension",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_AND_K: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_AND, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xffffffff),
BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000000000000000LL),
BPF_ALU64_IMM(BPF_AND, R2, 0x0),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_AND_K: Sign extension 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_AND_K: Sign extension 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
/* BPF_ALU | BPF_OR | BPF_X */
{
"ALU_OR_X: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG(BPF_OR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU32_REG(BPF_OR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU64_OR_X: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG(BPF_OR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU64_REG(BPF_OR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
/* BPF_ALU | BPF_OR | BPF_K */
{
"ALU_OR_K: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_OR, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU_OR_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
BPF_ALU32_IMM(BPF_OR, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x01020305 } }
},
{
"ALU_OR_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xa1b2c3d4 } }
},
{
"ALU_OR_K: Zero extension",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_OR_K: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_OR, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
},
{
"ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
BPF_ALU64_IMM(BPF_OR, R2, 0x0),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000000000000000LL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_OR_K: Sign extension 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_OR_K: Sign extension 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
/* BPF_ALU | BPF_XOR | BPF_X */
{
"ALU_XOR_X: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
BPF_ALU32_IMM(BPF_MOV, R1, 6),
BPF_ALU32_REG(BPF_XOR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU32_REG(BPF_XOR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } },
},
{
"ALU64_XOR_X: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
BPF_ALU32_IMM(BPF_MOV, R1, 6),
BPF_ALU64_REG(BPF_XOR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_ALU64_REG(BPF_XOR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } },
},
/* BPF_ALU | BPF_XOR | BPF_K */
{
"ALU_XOR_K: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
BPF_ALU32_IMM(BPF_XOR, R0, 6),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } },
},
{
"ALU_XOR_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
BPF_ALU32_IMM(BPF_XOR, R0, 15),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x0102030b } }
},
{
"ALU_XOR_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x5e4d3c2b } }
},
{
"ALU_XOR_K: Zero extension",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_XOR_K: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
BPF_ALU64_IMM(BPF_XOR, R0, 6),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } },
},
{
"ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000000000000000LL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_XOR_K: Sign extension 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"ALU64_XOR_K: Sign extension 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
/* BPF_ALU | BPF_LSH | BPF_X */
{
"ALU_LSH_X: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU32_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_LSH_X: 1 << 31 = 0x80000000",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 31),
BPF_ALU32_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x80000000 } },
},
{
"ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU32_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x45678000 } }
},
{
"ALU64_LSH_X: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_LSH_X: 1 << 31 = 0x80000000",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_MOV, R1, 31),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x80000000 } },
},
{
"ALU64_LSH_X: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xbcdef000 } }
},
{
"ALU64_LSH_X: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x3456789a } }
},
{
"ALU64_LSH_X: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_LSH_X: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x9abcdef0 } }
},
{
"ALU64_LSH_X: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_LSH_X: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
{
"ALU64_LSH_X: Zero shift, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
{
"ALU64_LSH_X: Zero shift, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_LSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x01234567 } }
},
/* BPF_ALU | BPF_LSH | BPF_K */
{
"ALU_LSH_K: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_LSH, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU_LSH_K: 1 << 31 = 0x80000000",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU32_IMM(BPF_LSH, R0, 31),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x80000000 } },
},
{
"ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_LSH, R0, 12),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x45678000 } }
},
{
"ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_LSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x12345678 } }
},
{
"ALU64_LSH_K: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_LSH, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"ALU64_LSH_K: 1 << 31 = 0x80000000",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_LSH, R0, 31),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x80000000 } },
},
{
"ALU64_LSH_K: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 12),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xbcdef000 } }
},
{
"ALU64_LSH_K: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 12),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x3456789a } }
},
{
"ALU64_LSH_K: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 36),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_LSH_K: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 36),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x9abcdef0 } }
},
{
"ALU64_LSH_K: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_LSH_K: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 32),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
{
"ALU64_LSH_K: Zero shift",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_LSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
/* BPF_ALU | BPF_RSH | BPF_X */
{
"ALU_RSH_X: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU32_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_RSH_X: 0x80000000 >> 31 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x80000000),
BPF_ALU32_IMM(BPF_MOV, R1, 31),
BPF_ALU32_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_RSH_X: 0x12345678 >> 20 = 0x123",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_MOV, R1, 20),
BPF_ALU32_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x123 } }
},
{
"ALU64_RSH_X: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 1),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_RSH_X: 0x80000000 >> 31 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x80000000),
BPF_ALU32_IMM(BPF_MOV, R1, 31),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_RSH_X: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x56789abc } }
},
{
"ALU64_RSH_X: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x00081234 } }
},
{
"ALU64_RSH_X: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x08123456 } }
},
{
"ALU64_RSH_X: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_RSH_X: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
{
"ALU64_RSH_X: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_RSH_X: Zero shift, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
{
"ALU64_RSH_X: Zero shift, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_RSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
/* BPF_ALU | BPF_RSH | BPF_K */
{
"ALU_RSH_K: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU32_IMM(BPF_RSH, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_RSH_K: 0x80000000 >> 31 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x80000000),
BPF_ALU32_IMM(BPF_RSH, R0, 31),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU_RSH_K: 0x12345678 >> 20 = 0x123",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_RSH, R0, 20),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x123 } }
},
{
"ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
BPF_ALU32_IMM(BPF_RSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x12345678 } }
},
{
"ALU64_RSH_K: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
BPF_ALU64_IMM(BPF_RSH, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_RSH_K: 0x80000000 >> 31 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x80000000),
BPF_ALU64_IMM(BPF_RSH, R0, 31),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"ALU64_RSH_K: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 12),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x56789abc } }
},
{
"ALU64_RSH_K: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 12),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x00081234 } }
},
{
"ALU64_RSH_K: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 36),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x08123456 } }
},
{
"ALU64_RSH_K: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 36),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_RSH_K: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
{
"ALU64_RSH_K: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } }
},
{
"ALU64_RSH_K: Zero shift",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
/* BPF_ALU | BPF_ARSH | BPF_X */
{
"ALU32_ARSH_X: -1234 >> 7 = -10",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -1234),
BPF_ALU32_IMM(BPF_MOV, R1, 7),
BPF_ALU32_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -10 } }
},
{
"ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU32_IMM(BPF_MOV, R1, 40),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffff00ff } },
},
{
"ALU64_ARSH_X: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x56789abc } }
},
{
"ALU64_ARSH_X: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 12),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfff81234 } }
},
{
"ALU64_ARSH_X: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xf8123456 } }
},
{
"ALU64_ARSH_X: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 36),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
{
"ALU64_ARSH_X: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
{
"ALU64_ARSH_X: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 32),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
{
"ALU64_ARSH_X: Zero shift, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
{
"ALU64_ARSH_X: Zero shift, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU32_IMM(BPF_MOV, R1, 0),
BPF_ALU64_REG(BPF_ARSH, R0, R1),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
/* BPF_ALU | BPF_ARSH | BPF_K */
{
"ALU32_ARSH_K: -1234 >> 7 = -10",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -1234),
BPF_ALU32_IMM(BPF_ARSH, R0, 7),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -10 } }
},
{
"ALU32_ARSH_K: -1234 >> 0 = -1234",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -1234),
BPF_ALU32_IMM(BPF_ARSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1234 } }
},
{
"ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU64_IMM(BPF_ARSH, R0, 40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffff00ff } },
},
{
"ALU64_ARSH_K: Shift < 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_RSH, R0, 12),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x56789abc } }
},
{
"ALU64_ARSH_K: Shift < 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 12),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfff81234 } }
},
{
"ALU64_ARSH_K: Shift > 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 36),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xf8123456 } }
},
{
"ALU64_ARSH_K: Shift > 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 36),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
{
"ALU64_ARSH_K: Shift == 32, low word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x81234567 } }
},
{
"ALU64_ARSH_K: Shift == 32, high word",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 32),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } }
},
{
"ALU64_ARSH_K: Zero shift",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
BPF_ALU64_IMM(BPF_ARSH, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } }
},
/* BPF_ALU | BPF_NEG */
{
"ALU_NEG: -(3) = -3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 3),
BPF_ALU32_IMM(BPF_NEG, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
{
"ALU_NEG: -(-3) = 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -3),
BPF_ALU32_IMM(BPF_NEG, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
{
"ALU64_NEG: -(3) = -3",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
BPF_ALU64_IMM(BPF_NEG, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
{
"ALU64_NEG: -(-3) = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, -3),
BPF_ALU64_IMM(BPF_NEG, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 3 } },
},
/* BPF_ALU | BPF_END | BPF_FROM_BE */
{
"ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_be16(0xcdef) } },
},
{
"ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_be32(0x89abcdef) } },
},
{
"ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
},
{
"ALU_END_FROM_BE 64: 0x0123456789abcdef >> 32 -> 0x01234567",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_BE, R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) (cpu_to_be64(0x0123456789abcdefLL) >> 32) } },
},
/* BPF_ALU | BPF_END | BPF_FROM_BE, reversed */
{
"ALU_END_FROM_BE 16: 0xfedcba9876543210 -> 0x3210",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_BE, R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_be16(0x3210) } },
},
{
"ALU_END_FROM_BE 32: 0xfedcba9876543210 -> 0x76543210",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_BE, R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_be32(0x76543210) } },
},
{
"ALU_END_FROM_BE 64: 0xfedcba9876543210 -> 0x76543210",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_BE, R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) cpu_to_be64(0xfedcba9876543210ULL) } },
},
{
"ALU_END_FROM_BE 64: 0xfedcba9876543210 >> 32 -> 0xfedcba98",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_BE, R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) (cpu_to_be64(0xfedcba9876543210ULL) >> 32) } },
},
/* BPF_ALU | BPF_END | BPF_FROM_LE */
{
"ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_le16(0xcdef) } },
},
{
"ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_le32(0x89abcdef) } },
},
{
"ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
},
{
"ALU_END_FROM_LE 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_ENDIAN(BPF_FROM_LE, R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) (cpu_to_le64(0x0123456789abcdefLL) >> 32) } },
},
/* BPF_ALU | BPF_END | BPF_FROM_LE, reversed */
{
"ALU_END_FROM_LE 16: 0xfedcba9876543210 -> 0x1032",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_LE, R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_le16(0x3210) } },
},
{
"ALU_END_FROM_LE 32: 0xfedcba9876543210 -> 0x10325476",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_LE, R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, cpu_to_le32(0x76543210) } },
},
{
"ALU_END_FROM_LE 64: 0xfedcba9876543210 -> 0x10325476",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_LE, R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) cpu_to_le64(0xfedcba9876543210ULL) } },
},
{
"ALU_END_FROM_LE 64: 0xfedcba9876543210 >> 32 -> 0x98badcfe",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_ENDIAN(BPF_FROM_LE, R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
},
/* BPF_LDX_MEM B/H/W/DW */
{
"BPF_LDX_MEM | BPF_B, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000008ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_B, R0, R10, -1),
#else
BPF_LDX_MEM(BPF_B, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_B, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8182838485868788ULL),
BPF_LD_IMM64(R2, 0x0000000000000088ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_B, R0, R10, -1),
#else
BPF_LDX_MEM(BPF_B, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_B, negative offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000000088ULL),
BPF_ALU64_IMM(BPF_ADD, R1, 512),
BPF_STX_MEM(BPF_B, R1, R2, -256),
BPF_LDX_MEM(BPF_B, R0, R1, -256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_B, small positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000000088ULL),
BPF_STX_MEM(BPF_B, R1, R2, 256),
BPF_LDX_MEM(BPF_B, R0, R1, 256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_B, large positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000000088ULL),
BPF_STX_MEM(BPF_B, R1, R2, 4096),
BPF_LDX_MEM(BPF_B, R0, R1, 4096),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 4096 + 16, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_H, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000000000708ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_H, R0, R10, -2),
#else
BPF_LDX_MEM(BPF_H, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_H, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8182838485868788ULL),
BPF_LD_IMM64(R2, 0x0000000000008788ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_H, R0, R10, -2),
#else
BPF_LDX_MEM(BPF_H, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_H, negative offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000008788ULL),
BPF_ALU64_IMM(BPF_ADD, R1, 512),
BPF_STX_MEM(BPF_H, R1, R2, -256),
BPF_LDX_MEM(BPF_H, R0, R1, -256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_H, small positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000008788ULL),
BPF_STX_MEM(BPF_H, R1, R2, 256),
BPF_LDX_MEM(BPF_H, R0, R1, 256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_H, large positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000008788ULL),
BPF_STX_MEM(BPF_H, R1, R2, 8192),
BPF_LDX_MEM(BPF_H, R0, R1, 8192),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 8192 + 16, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_H, unaligned positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000000008788ULL),
BPF_STX_MEM(BPF_H, R1, R2, 13),
BPF_LDX_MEM(BPF_H, R0, R1, 13),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 32, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_W, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_LD_IMM64(R2, 0x0000000005060708ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_W, R0, R10, -4),
#else
BPF_LDX_MEM(BPF_W, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_W, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8182838485868788ULL),
BPF_LD_IMM64(R2, 0x0000000085868788ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEM(BPF_W, R0, R10, -4),
#else
BPF_LDX_MEM(BPF_W, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_W, negative offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000085868788ULL),
BPF_ALU64_IMM(BPF_ADD, R1, 512),
BPF_STX_MEM(BPF_W, R1, R2, -256),
BPF_LDX_MEM(BPF_W, R0, R1, -256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_W, small positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000085868788ULL),
BPF_STX_MEM(BPF_W, R1, R2, 256),
BPF_LDX_MEM(BPF_W, R0, R1, 256),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_W, large positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000085868788ULL),
BPF_STX_MEM(BPF_W, R1, R2, 16384),
BPF_LDX_MEM(BPF_W, R0, R1, 16384),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 16384 + 16, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_W, unaligned positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x0000000085868788ULL),
BPF_STX_MEM(BPF_W, R1, R2, 13),
BPF_LDX_MEM(BPF_W, R0, R1, 13),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 32, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_DW, base",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0102030405060708ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_DW, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8182838485868788ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_DW, negative offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_ALU64_IMM(BPF_ADD, R1, 512),
BPF_STX_MEM(BPF_DW, R1, R2, -256),
BPF_LDX_MEM(BPF_DW, R0, R1, -256),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_DW, small positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_STX_MEM(BPF_DW, R1, R2, 256),
BPF_LDX_MEM(BPF_DW, R0, R1, 256),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 512, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEM | BPF_DW, large positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_STX_MEM(BPF_DW, R1, R2, 32760),
BPF_LDX_MEM(BPF_DW, R0, R1, 32760),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 32768, 0 } },
.stack_depth = 0,
},
{
"BPF_LDX_MEM | BPF_DW, unaligned positive offset",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_STX_MEM(BPF_DW, R1, R2, 13),
BPF_LDX_MEM(BPF_DW, R0, R1, 13),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_LARGE_MEM,
{ },
{ { 32, 0 } },
.stack_depth = 0,
},
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x0102030405060708ULL),
BPF_LD_IMM64(R3, 0x8090a0b0c0d0e008ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_B, R10, R2, -1),
#else
BPF_STX_MEM(BPF_B, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_STX_MEM | BPF_B, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x8090a0b0c0d0e088ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_B, R10, R2, -1),
#else
BPF_STX_MEM(BPF_B, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_STX_MEM | BPF_H",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x0102030405060708ULL),
BPF_LD_IMM64(R3, 0x8090a0b0c0d00708ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_H, R10, R2, -2),
#else
BPF_STX_MEM(BPF_H, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_STX_MEM | BPF_H, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x8090a0b0c0d08788ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_H, R10, R2, -2),
#else
BPF_STX_MEM(BPF_H, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_STX_MEM | BPF_W",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x0102030405060708ULL),
BPF_LD_IMM64(R3, 0x8090a0b005060708ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_W, R10, R2, -4),
#else
BPF_STX_MEM(BPF_W, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_STX_MEM | BPF_W, MSB set",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x8090a0b0c0d0e0f0ULL),
BPF_LD_IMM64(R2, 0x8182838485868788ULL),
BPF_LD_IMM64(R3, 0x8090a0b085868788ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_STX_MEM(BPF_W, R10, R2, -4),
#else
BPF_STX_MEM(BPF_W, R10, R2, -8),
#endif
BPF_LDX_MEM(BPF_DW, R0, R10, -8),
BPF_JMP_REG(BPF_JNE, R0, R3, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
/* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
{
"ST_MEM_B: Store/Load byte: max negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_B, R10, -40, 0xff),
BPF_LDX_MEM(BPF_B, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xff } },
.stack_depth = 40,
},
{
"ST_MEM_B: Store/Load byte: max positive",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
BPF_LDX_MEM(BPF_H, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x7f } },
.stack_depth = 40,
},
{
"STX_MEM_B: Store/Load byte: max negative",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffLL),
BPF_STX_MEM(BPF_B, R10, R1, -40),
BPF_LDX_MEM(BPF_B, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xff } },
.stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
BPF_LDX_MEM(BPF_H, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffff } },
.stack_depth = 40,
},
{
"ST_MEM_H: Store/Load half word: max positive",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
BPF_LDX_MEM(BPF_H, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x7fff } },
.stack_depth = 40,
},
{
"STX_MEM_H: Store/Load half word: max negative",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffffLL),
BPF_STX_MEM(BPF_H, R10, R1, -40),
BPF_LDX_MEM(BPF_H, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffff } },
.stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
{
"ST_MEM_W: Store/Load word: max positive",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
.stack_depth = 40,
},
{
"STX_MEM_W: Store/Load word: max negative",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffffffffLL),
BPF_STX_MEM(BPF_W, R10, R1, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max negative 2",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
BPF_LDX_MEM(BPF_DW, R2, R10, -40),
BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
.stack_depth = 40,
},
{
"ST_MEM_DW: Store/Load double word: max positive",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x7fffffff } },
.stack_depth = 40,
},
{
"STX_MEM_DW: Store/Load double word: max negative",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
{
"STX_MEM_DW: Store double word: first word in memory",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
#ifdef __BIG_ENDIAN
{ { 0, 0x01234567 } },
#else
{ { 0, 0x89abcdef } },
#endif
.stack_depth = 40,
},
{
"STX_MEM_DW: Store double word: second word in memory",
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -36),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
#ifdef __BIG_ENDIAN
{ { 0, 0x89abcdef } },
#else
{ { 0, 0x01234567 } },
#endif
.stack_depth = 40,
},
/* BPF_STX | BPF_ATOMIC | BPF_W/DW */
{
"STX_XADD_W: X + 1 + 1 + 1 + ...",
{ },
INTERNAL,
{ },
{ { 0, 4134 } },
.fill_helper = bpf_fill_stxw,
},
{
"STX_XADD_DW: X + 1 + 1 + 1 + ...",
{ },
INTERNAL,
{ },
{ { 0, 4134 } },
.fill_helper = bpf_fill_stxdw,
},
/*
* Exhaustive tests of atomic operation variants.
* Individual tests are expanded from template macros for all
* combinations of ALU operation, word size and fetching.
*/
#define BPF_ATOMIC_POISON(width) ((width) == BPF_W ? (0xbaadf00dULL << 32) : 0)
#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \
{ \
"BPF_ATOMIC | " #width ", " #op ": Test: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_LD_IMM64(R5, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R5, -40), \
BPF_LDX_MEM(width, R0, R10, -40), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, result } }, \
.stack_depth = 40, \
}
#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \
{ \
"BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_ALU64_REG(BPF_MOV, R1, R10), \
BPF_LD_IMM64(R0, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(BPF_W, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R0, -40), \
BPF_ALU64_REG(BPF_MOV, R0, R10), \
BPF_ALU64_REG(BPF_SUB, R0, R1), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
.stack_depth = 40, \
}
#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \
{ \
"BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_ALU64_REG(BPF_MOV, R0, R10), \
BPF_LD_IMM64(R1, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R1, -40), \
BPF_ALU64_REG(BPF_SUB, R0, R10), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
.stack_depth = 40, \
}
#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \
{ \
"BPF_ATOMIC | " #width ", " #op ": Test fetch: " \
#old " " #logic " " #update " = " #result, \
.u.insns_int = { \
BPF_LD_IMM64(R3, (update) | BPF_ATOMIC_POISON(width)), \
BPF_ST_MEM(width, R10, -40, old), \
BPF_ATOMIC_OP(width, op, R10, R3, -40), \
BPF_ALU32_REG(BPF_MOV, R0, R3), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, (op) & BPF_FETCH ? old : update } }, \
.stack_depth = 40, \
}
/* BPF_ATOMIC | BPF_W: BPF_ADD */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
/* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
/* BPF_ATOMIC | BPF_DW: BPF_ADD */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
/* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
/* BPF_ATOMIC | BPF_W: BPF_AND */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
/* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
/* BPF_ATOMIC | BPF_DW: BPF_AND */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
/* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
/* BPF_ATOMIC | BPF_W: BPF_OR */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
/* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
/* BPF_ATOMIC | BPF_DW: BPF_OR */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
/* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
/* BPF_ATOMIC | BPF_W: BPF_XOR */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
/* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
/* BPF_ATOMIC | BPF_DW: BPF_XOR */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
/* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
/* BPF_ATOMIC | BPF_W: BPF_XCHG */
BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
/* BPF_ATOMIC | BPF_DW: BPF_XCHG */
BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
#undef BPF_ATOMIC_POISON
#undef BPF_ATOMIC_OP_TEST1
#undef BPF_ATOMIC_OP_TEST2
#undef BPF_ATOMIC_OP_TEST3
#undef BPF_ATOMIC_OP_TEST4
/* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
{
"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x01234567 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x01234567 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x01234567 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_ALU32_REG(BPF_MOV, R0, R3),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x89abcdef } },
.stack_depth = 40,
},
/* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
{
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R0, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_ALU64_IMM(BPF_ADD, R0, 1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_ALU64_IMM(BPF_ADD, R0, 1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 40,
},
{
"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_STX_MEM(BPF_DW, R10, R1, -40),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 40,
},
/* BPF_JMP32 | BPF_JEQ | BPF_K */
{
"JMP32_JEQ_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JEQ_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 12345678 } }
},
{
"JMP32_JEQ_K: negative immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
/* BPF_JMP32 | BPF_JEQ | BPF_X */
{
"JMP32_JEQ_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1234),
BPF_ALU32_IMM(BPF_MOV, R1, 4321),
BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 1234),
BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1234 } }
},
/* BPF_JMP32 | BPF_JNE | BPF_K */
{
"JMP32_JNE_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JNE_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 12345678 } }
},
{
"JMP32_JNE_K: negative immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
/* BPF_JMP32 | BPF_JNE | BPF_X */
{
"JMP32_JNE_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1234),
BPF_ALU32_IMM(BPF_MOV, R1, 1234),
BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 4321),
BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1234 } }
},
/* BPF_JMP32 | BPF_JSET | BPF_K */
{
"JMP32_JSET_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } }
},
{
"JMP32_JSET_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x40000000 } }
},
{
"JMP32_JSET_K: negative immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
/* BPF_JMP32 | BPF_JSET | BPF_X */
{
"JMP32_JSET_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 8),
BPF_ALU32_IMM(BPF_MOV, R1, 7),
BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 8 } }
},
/* BPF_JMP32 | BPF_JGT | BPF_K */
{
"JMP32_JGT_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JGT_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JGT | BPF_X */
{
"JMP32_JGT_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JGE | BPF_K */
{
"JMP32_JGE_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JGE_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JGE | BPF_X */
{
"JMP32_JGE_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JLT | BPF_K */
{
"JMP32_JLT_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JLT_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JLT | BPF_X */
{
"JMP32_JLT_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JLE | BPF_K */
{
"JMP32_JLE_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 123),
BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } }
},
{
"JMP32_JLE_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JLE | BPF_X */
{
"JMP32_JLE_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xfffffffe } }
},
/* BPF_JMP32 | BPF_JSGT | BPF_K */
{
"JMP32_JSGT_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"JMP32_JSGT_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSGT | BPF_X */
{
"JMP32_JSGT_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSGE | BPF_K */
{
"JMP32_JSGE_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"JMP32_JSGE_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSGE | BPF_X */
{
"JMP32_JSGE_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSLT | BPF_K */
{
"JMP32_JSLT_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"JMP32_JSLT_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSLT | BPF_X */
{
"JMP32_JSLT_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSLE | BPF_K */
{
"JMP32_JSLE_K: Small immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -123),
BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -123 } }
},
{
"JMP32_JSLE_K: Large immediate",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP32 | BPF_JSLE | BPF_K */
{
"JMP32_JSLE_X",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -12345678 } }
},
/* BPF_JMP | BPF_EXIT */
{
"JMP_EXIT",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
},
INTERNAL,
{ },
{ { 0, 0x4711 } },
},
/* BPF_JMP | BPF_JA */
{
"JMP_JA: Unconditional jump: if (true) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLT_K: Signed jump: if (-1 < -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGT | BPF_K */
{
"JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSLE | BPF_K */
{
"JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xfffffffffffffffeLL),
BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSLE, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: value walk 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 6),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
BPF_ALU64_IMM(BPF_SUB, R1, 1),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_K: Signed jump: value walk 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 4),
BPF_ALU64_IMM(BPF_SUB, R1, 2),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 2),
BPF_ALU64_IMM(BPF_SUB, R1, 2),
BPF_JMP_IMM(BPF_JSLE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGE | BPF_K */
{
"JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_K: Signed jump: value walk 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -3),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 6),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
BPF_ALU64_IMM(BPF_ADD, R1, 1),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_K: Signed jump: value walk 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -3),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 4),
BPF_ALU64_IMM(BPF_ADD, R1, 2),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 2),
BPF_ALU64_IMM(BPF_ADD, R1, 2),
BPF_JMP_IMM(BPF_JSGE, R1, 0, 1),
BPF_EXIT_INSN(), /* bad exit */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_K */
{
"JMP_JGT_K: if (3 > 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JLT | BPF_K */
{
"JMP_JLT_K: if (2 < 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 2),
BPF_JMP_IMM(BPF_JLT, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGT_K: Unsigned jump: if (1 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 1),
BPF_JMP_IMM(BPF_JLT, R1, -1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGE | BPF_K */
{
"JMP_JGE_K: if (3 >= 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JLE | BPF_K */
{
"JMP_JLE_K: if (2 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 2),
BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_K jump backwards */
{
"JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
.u.insns_int = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGE_K: if (3 >= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JLT | BPF_K jump backwards */
{
"JMP_JGT_K: if (2 < 3) return 1 (jump backwards)",
.u.insns_int = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */
BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_K: if (3 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JLE, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JNE | BPF_K */
{
"JMP_JNE_K: if (3 != 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JEQ | BPF_K */
{
"JMP_JEQ_K: if (3 == 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSET | BPF_K */
{
"JMP_JSET_K: if (0x3 & 0x2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGT | BPF_X */
{
"JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSLT | BPF_X */
{
"JMP_JSLT_X: Signed jump: if (-2 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLT_X: Signed jump: if (-1 < -1) return 0",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSLT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSGE | BPF_X */
{
"JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSLE | BPF_X */
{
"JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -2),
BPF_JMP_REG(BPF_JSLE, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, -1),
BPF_JMP_REG(BPF_JSLE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGT | BPF_X */
{
"JMP_JGT_X: if (3 > 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, 1),
BPF_JMP_REG(BPF_JGT, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JLT | BPF_X */
{
"JMP_JLT_X: if (2 < 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLT_X: Unsigned jump: if (1 < -1) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, -1),
BPF_LD_IMM64(R2, 1),
BPF_JMP_REG(BPF_JLT, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JGE | BPF_X */
{
"JMP_JGE_X: if (3 >= 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JGE_X: if (3 >= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 3),
BPF_JMP_REG(BPF_JGE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JLE | BPF_X */
{
"JMP_JLE_X: if (2 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_X: if (3 <= 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 3),
BPF_JMP_REG(BPF_JLE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
/* Mainly testing JIT + imm64 here. */
"JMP_JGE_X: ldimm64 test 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 2),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xeeeeeeeeU } },
},
{
"JMP_JGE_X: ldimm64 test 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 0),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffffU } },
},
{
"JMP_JGE_X: ldimm64 test 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JGE, R1, R2, 4),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_X: ldimm64 test 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 2),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xeeeeeeeeU } },
},
{
"JMP_JLE_X: ldimm64 test 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 0),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xffffffffU } },
},
{
"JMP_JLE_X: ldimm64 test 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JLE, R2, R1, 4),
BPF_LD_IMM64(R0, 0xffffffffffffffffULL),
BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JNE, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JEQ | BPF_X */
{
"JMP_JEQ_X: if (3 == 3) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 3),
BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSET | BPF_X */
{
"JMP_JSET_X: if (0x3 & 0x2) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
BPF_JMP_REG(BPF_JSET, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 0xffffffff),
BPF_JMP_REG(BPF_JSET, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JMP_JA: Jump, gap, jump, ...",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_ja,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Maximum possible literals",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xffffffff } },
.fill_helper = bpf_fill_maxinsns1,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Single literal",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xfefefefe } },
.fill_helper = bpf_fill_maxinsns2,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Run/add until end",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0x947bf368 } },
.fill_helper = bpf_fill_maxinsns3,
},
{
"BPF_MAXINSNS: Too many instructions",
{ },
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
.fill_helper = bpf_fill_maxinsns4,
.expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Very long jump",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xabababab } },
.fill_helper = bpf_fill_maxinsns5,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
CLASSIC,
{ },
{
{ 1, SKB_VLAN_PRESENT },
{ 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xffffffff } },
.fill_helper = bpf_fill_maxinsns8,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Very long jump backwards",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0xcbababab } },
.fill_helper = bpf_fill_maxinsns9,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Edge hopping nuthouse",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0xabababac } },
.fill_helper = bpf_fill_maxinsns10,
},
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
CLASSIC | FLAG_NO_DATA,
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
},
{
"BPF_MAXINSNS: jump over MSH",
{ },
CLASSIC | FLAG_EXPECTED_FAIL,
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xabababab } },
.fill_helper = bpf_fill_maxinsns12,
.expected_errcode = -EINVAL,
},
{
"BPF_MAXINSNS: exec all MSH",
{ },
CLASSIC,
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xababab83 } },
.fill_helper = bpf_fill_maxinsns13,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
CLASSIC,
{ },
{ { 1, 0xbee } },
.fill_helper = bpf_fill_ld_abs_get_processor_id,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
*/
{
"LD_IND byte frag",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x42} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_IND halfword frag",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x4344} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_IND word frag",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x21071983} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_IND halfword mixed head/frag",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ [0x3e] = 0x25, [0x3f] = 0x05, },
{ {0x40, 0x0519} },
.frag_data = { 0x19, 0x82 },
},
{
"LD_IND word mixed head/frag",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ [0x3e] = 0x25, [0x3f] = 0x05, },
{ {0x40, 0x25051982} },
.frag_data = { 0x19, 0x82 },
},
{
"LD_ABS byte frag",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x42} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_ABS halfword frag",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x4344} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_ABS word frag",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ },
{ {0x40, 0x21071983} },
.frag_data = {
0x42, 0x00, 0x00, 0x00,
0x43, 0x44, 0x00, 0x00,
0x21, 0x07, 0x19, 0x83,
},
},
{
"LD_ABS halfword mixed head/frag",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ [0x3e] = 0x25, [0x3f] = 0x05, },
{ {0x40, 0x0519} },
.frag_data = { 0x19, 0x82 },
},
{
"LD_ABS word mixed head/frag",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_SKB_FRAG,
{ [0x3e] = 0x25, [0x3f] = 0x05, },
{ {0x40, 0x25051982} },
.frag_data = { 0x19, 0x82 },
},
/*
* LD_IND / LD_ABS on non fragmented SKBs
*/
{
/*
* this tests that the JIT/interpreter correctly resets X
* before using it in an LD_IND instruction.
*/
"LD_IND byte default X",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x1] = 0x42 },
{ {0x40, 0x42 } },
},
{
"LD_IND byte positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x82 } },
},
{
"LD_IND byte negative offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x05 } },
},
{
"LD_IND byte positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xff } },
},
{
"LD_IND byte positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_IND byte negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 } },
},
{
"LD_IND byte negative offset, multiple calls",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 1),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 2),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 3),
BPF_STMT(BPF_LD | BPF_IND | BPF_B, SKF_LL_OFF + 4),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x82 }, },
},
{
"LD_IND halfword positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
},
{ {0x40, 0xdd88 } },
},
{
"LD_IND halfword negative offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
},
{ {0x40, 0xbb66 } },
},
{
"LD_IND halfword unaligned",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
},
{ {0x40, 0x66cc } },
},
{
"LD_IND halfword positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3d),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xffff } },
},
{
"LD_IND halfword positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_IND halfword negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 } },
},
{
"LD_IND word positive offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xee99ffaa } },
},
{
"LD_IND word negative offset",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xaa55bb66 } },
},
{
"LD_IND word unaligned (addr & 3 == 2)",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xbb66cc77 } },
},
{
"LD_IND word unaligned (addr & 3 == 1)",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0x55bb66cc } },
},
{
"LD_IND word unaligned (addr & 3 == 3)",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0x66cc77dd } },
},
{
"LD_IND word positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3b),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xffffffff } },
},
{
"LD_IND word positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_IND word negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 } },
},
{
"LD_ABS byte",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xcc } },
},
{
"LD_ABS byte positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xff } },
},
{
"LD_ABS byte positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_ABS byte negative offset, out of bounds load",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, -1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_EXPECTED_FAIL,
.expected_errcode = -EINVAL,
},
{
"LD_ABS byte negative offset, in bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x82 }, },
},
{
"LD_ABS byte negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_ABS byte negative offset, multiple calls",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3c),
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3d),
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3e),
BPF_STMT(BPF_LD | BPF_ABS | BPF_B, SKF_LL_OFF + 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x82 }, },
},
{
"LD_ABS halfword",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xdd88 } },
},
{
"LD_ABS halfword unaligned",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0x99ff } },
},
{
"LD_ABS halfword positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3e),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xffff } },
},
{
"LD_ABS halfword positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_ABS halfword negative offset, out of bounds load",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, -1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_EXPECTED_FAIL,
.expected_errcode = -EINVAL,
},
{
"LD_ABS halfword negative offset, in bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x1982 }, },
},
{
"LD_ABS halfword negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_H, SKF_LL_OFF + 0x3e),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_ABS word",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xaa55bb66 } },
},
{
"LD_ABS word unaligned (addr & 3 == 2)",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0xdd88ee99 } },
},
{
"LD_ABS word unaligned (addr & 3 == 1)",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0x77dd88ee } },
},
{
"LD_ABS word unaligned (addr & 3 == 3)",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{
[0x1c] = 0xaa, [0x1d] = 0x55,
[0x1e] = 0xbb, [0x1f] = 0x66,
[0x20] = 0xcc, [0x21] = 0x77,
[0x22] = 0xdd, [0x23] = 0x88,
[0x24] = 0xee, [0x25] = 0x99,
[0x26] = 0xff, [0x27] = 0xaa,
},
{ {0x40, 0x88ee99ff } },
},
{
"LD_ABS word positive offset, all ff",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3c),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0xff, [0x3d] = 0xff, [0x3e] = 0xff, [0x3f] = 0xff },
{ {0x40, 0xffffffff } },
},
{
"LD_ABS word positive offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LD_ABS word negative offset, out of bounds load",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, -1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_EXPECTED_FAIL,
.expected_errcode = -EINVAL,
},
{
"LD_ABS word negative offset, in bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x25051982 }, },
},
{
"LD_ABS word negative offset, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_ABS | BPF_W, SKF_LL_OFF + 0x3c),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x3f, 0 }, },
},
{
"LDX_MSH standalone, preserved A",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0xffeebbaa }, },
},
{
"LDX_MSH standalone, preserved A 2",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0x175e9d63),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3d),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3f),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x175e9d63 }, },
},
{
"LDX_MSH standalone, test result 1",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3c),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x14 }, },
},
{
"LDX_MSH standalone, test result 2",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x3e),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x24 }, },
},
{
"LDX_MSH standalone, negative offset",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, -1),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0 }, },
},
{
"LDX_MSH standalone, negative offset 2",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, SKF_LL_OFF + 0x3e),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0x24 }, },
},
{
"LDX_MSH standalone, out of bounds",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffeebbaa),
BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0x40),
BPF_STMT(BPF_MISC | BPF_TXA, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC,
{ [0x3c] = 0x25, [0x3d] = 0x05, [0x3e] = 0x19, [0x3f] = 0x82 },
{ {0x40, 0 }, },
},
/*
* verify that the interpreter or JIT correctly sets A and X
* to 0.
*/
{
"ADD default X",
.u.insns = {
/*
* A = 0x42
* A = A + X
* ret A
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x42),
BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x42 } },
},
{
"ADD default A",
.u.insns = {
/*
* A = A + 0x42
* ret A
*/
BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x42 } },
},
{
"SUB default X",
.u.insns = {
/*
* A = 0x66
* A = A - X
* ret A
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x66),
BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x66 } },
},
{
"SUB default A",
.u.insns = {
/*
* A = A - -0x66
* ret A
*/
BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x66 } },
},
{
"MUL default X",
.u.insns = {
/*
* A = 0x42
* A = A * X
* ret A
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x42),
BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"MUL default A",
.u.insns = {
/*
* A = A * 0x66
* ret A
*/
BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"DIV default X",
.u.insns = {
/*
* A = 0x42
* A = A / X ; this halt the filter execution if X is 0
* ret 0x42
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x42),
BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_K, 0x42),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"DIV default A",
.u.insns = {
/*
* A = A / 1
* ret A
*/
BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"MOD default X",
.u.insns = {
/*
* A = 0x42
* A = A mod X ; this halt the filter execution if X is 0
* ret 0x42
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x42),
BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
BPF_STMT(BPF_RET | BPF_K, 0x42),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"MOD default A",
.u.insns = {
/*
* A = A mod 1
* ret A
*/
BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x1),
BPF_STMT(BPF_RET | BPF_A, 0x0),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x0 } },
},
{
"JMP EQ default A",
.u.insns = {
/*
* cmp A, 0x0, 0, 1
* ret 0x42
* ret 0x66
*/
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 0x42),
BPF_STMT(BPF_RET | BPF_K, 0x66),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x42 } },
},
{
"JMP EQ default X",
.u.insns = {
/*
* A = 0x0
* cmp A, X, 0, 1
* ret 0x42
* ret 0x66
*/
BPF_STMT(BPF_LD | BPF_IMM, 0x0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
BPF_STMT(BPF_RET | BPF_K, 0x42),
BPF_STMT(BPF_RET | BPF_K, 0x66),
},
CLASSIC | FLAG_NO_DATA,
{},
{ {0x1, 0x42 } },
},
/* Checking interpreter vs JIT wrt signed extended imms. */
{
"JNE signed compare, test 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 2",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 3",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
BPF_MOV64_REG(R2, R1),
BPF_ALU64_REG(BPF_AND, R2, R3),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JNE, R2, R4, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 4",
.u.insns_int = {
BPF_LD_IMM64(R1, -17104896),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 5",
.u.insns_int = {
BPF_LD_IMM64(R1, 0xfefb0000),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
{
"JNE signed compare, test 6",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x7efb0000),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
BPF_ALU32_IMM(BPF_MOV, R0, 2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 2 } },
},
{
"JNE signed compare, test 7",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
BPF_STMT(BPF_MISC | BPF_TAX, 0),
BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
BPF_STMT(BPF_RET | BPF_K, 1),
BPF_STMT(BPF_RET | BPF_K, 2),
},
CLASSIC | FLAG_NO_DATA,
{},
{ { 0, 2 } },
},
/* BPF_LDX_MEM with operand aliasing */
{
"LDX_MEM_B: operand register aliasing",
.u.insns_int = {
BPF_ST_MEM(BPF_B, R10, -8, 123),
BPF_MOV64_REG(R0, R10),
BPF_LDX_MEM(BPF_B, R0, R0, -8),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123 } },
.stack_depth = 8,
},
{
"LDX_MEM_H: operand register aliasing",
.u.insns_int = {
BPF_ST_MEM(BPF_H, R10, -8, 12345),
BPF_MOV64_REG(R0, R10),
BPF_LDX_MEM(BPF_H, R0, R0, -8),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 12345 } },
.stack_depth = 8,
},
{
"LDX_MEM_W: operand register aliasing",
.u.insns_int = {
BPF_ST_MEM(BPF_W, R10, -8, 123456789),
BPF_MOV64_REG(R0, R10),
BPF_LDX_MEM(BPF_W, R0, R0, -8),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 123456789 } },
.stack_depth = 8,
},
{
"LDX_MEM_DW: operand register aliasing",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x123456789abcdefULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
BPF_MOV64_REG(R0, R10),
BPF_LDX_MEM(BPF_DW, R0, R0, -8),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_MOV64_REG(R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU64_REG(BPF_OR, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
/*
* Register (non-)clobbering tests for the case where a JIT implements
* complex ALU or ATOMIC operations via function calls. If so, the
* function call must be transparent to the eBPF registers. The JIT
* must therefore save and restore relevant registers across the call.
* The following tests check that the eBPF registers retain their
* values after such an operation. Mainly intended for complex ALU
* and atomic operation, but we run it for all. You never know...
*
* Note that each operations should be tested twice with different
* destinations, to check preservation for all registers.
*/
#define BPF_TEST_CLOBBER_ALU(alu, op, dst, src) \
{ \
#alu "_" #op " to " #dst ": no clobbering", \
.u.insns_int = { \
BPF_ALU64_IMM(BPF_MOV, R0, R0), \
BPF_ALU64_IMM(BPF_MOV, R1, R1), \
BPF_ALU64_IMM(BPF_MOV, R2, R2), \
BPF_ALU64_IMM(BPF_MOV, R3, R3), \
BPF_ALU64_IMM(BPF_MOV, R4, R4), \
BPF_ALU64_IMM(BPF_MOV, R5, R5), \
BPF_ALU64_IMM(BPF_MOV, R6, R6), \
BPF_ALU64_IMM(BPF_MOV, R7, R7), \
BPF_ALU64_IMM(BPF_MOV, R8, R8), \
BPF_ALU64_IMM(BPF_MOV, R9, R9), \
BPF_##alu(BPF_ ##op, dst, src), \
BPF_ALU32_IMM(BPF_MOV, dst, dst), \
BPF_JMP_IMM(BPF_JNE, R0, R0, 10), \
BPF_JMP_IMM(BPF_JNE, R1, R1, 9), \
BPF_JMP_IMM(BPF_JNE, R2, R2, 8), \
BPF_JMP_IMM(BPF_JNE, R3, R3, 7), \
BPF_JMP_IMM(BPF_JNE, R4, R4, 6), \
BPF_JMP_IMM(BPF_JNE, R5, R5, 5), \
BPF_JMP_IMM(BPF_JNE, R6, R6, 4), \
BPF_JMP_IMM(BPF_JNE, R7, R7, 3), \
BPF_JMP_IMM(BPF_JNE, R8, R8, 2), \
BPF_JMP_IMM(BPF_JNE, R9, R9, 1), \
BPF_ALU64_IMM(BPF_MOV, R0, 1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 1 } } \
}
/* ALU64 operations, register clobbering */
BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, AND, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, OR, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, XOR, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, LSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, RSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, ARSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, ADD, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, SUB, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, MUL, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, DIV, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU64_IMM, MOD, R9, 123456789),
/* ALU32 immediate operations, register clobbering */
BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, AND, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, OR, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, XOR, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, LSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, RSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R8, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, ARSH, R9, 12),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, ADD, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, SUB, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, MUL, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, DIV, R9, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R8, 123456789),
BPF_TEST_CLOBBER_ALU(ALU32_IMM, MOD, R9, 123456789),
/* ALU64 register operations, register clobbering */
BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, AND, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, OR, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, XOR, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, LSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, RSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, ARSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, ADD, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, SUB, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, MUL, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, DIV, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU64_REG, MOD, R9, R1),
/* ALU32 register operations, register clobbering */
BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, AND, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, OR, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, XOR, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, LSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, RSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, ARSH, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, ADD, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, SUB, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, MUL, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, DIV, R9, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R8, R1),
BPF_TEST_CLOBBER_ALU(ALU32_REG, MOD, R9, R1),
#undef BPF_TEST_CLOBBER_ALU
#define BPF_TEST_CLOBBER_ATOMIC(width, op) \
{ \
"Atomic_" #width " " #op ": no clobbering", \
.u.insns_int = { \
BPF_ALU64_IMM(BPF_MOV, R0, 0), \
BPF_ALU64_IMM(BPF_MOV, R1, 1), \
BPF_ALU64_IMM(BPF_MOV, R2, 2), \
BPF_ALU64_IMM(BPF_MOV, R3, 3), \
BPF_ALU64_IMM(BPF_MOV, R4, 4), \
BPF_ALU64_IMM(BPF_MOV, R5, 5), \
BPF_ALU64_IMM(BPF_MOV, R6, 6), \
BPF_ALU64_IMM(BPF_MOV, R7, 7), \
BPF_ALU64_IMM(BPF_MOV, R8, 8), \
BPF_ALU64_IMM(BPF_MOV, R9, 9), \
BPF_ST_MEM(width, R10, -8, \
(op) == BPF_CMPXCHG ? 0 : \
(op) & BPF_FETCH ? 1 : 0), \
BPF_ATOMIC_OP(width, op, R10, R1, -8), \
BPF_JMP_IMM(BPF_JNE, R0, 0, 10), \
BPF_JMP_IMM(BPF_JNE, R1, 1, 9), \
BPF_JMP_IMM(BPF_JNE, R2, 2, 8), \
BPF_JMP_IMM(BPF_JNE, R3, 3, 7), \
BPF_JMP_IMM(BPF_JNE, R4, 4, 6), \
BPF_JMP_IMM(BPF_JNE, R5, 5, 5), \
BPF_JMP_IMM(BPF_JNE, R6, 6, 4), \
BPF_JMP_IMM(BPF_JNE, R7, 7, 3), \
BPF_JMP_IMM(BPF_JNE, R8, 8, 2), \
BPF_JMP_IMM(BPF_JNE, R9, 9, 1), \
BPF_ALU64_IMM(BPF_MOV, R0, 1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 1 } }, \
.stack_depth = 8, \
}
/* 64-bit atomic operations, register clobbering */
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_ADD | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_AND | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_OR | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XOR | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_XCHG),
BPF_TEST_CLOBBER_ATOMIC(BPF_DW, BPF_CMPXCHG),
/* 32-bit atomic operations, register clobbering */
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_ADD | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_AND | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_OR | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XOR | BPF_FETCH),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_XCHG),
BPF_TEST_CLOBBER_ATOMIC(BPF_W, BPF_CMPXCHG),
#undef BPF_TEST_CLOBBER_ATOMIC
/* Checking that ALU32 src is not zero extended in place */
#define BPF_ALU32_SRC_ZEXT(op) \
{ \
"ALU32_" #op "_X: src preserved in zext", \
.u.insns_int = { \
BPF_LD_IMM64(R1, 0x0123456789acbdefULL),\
BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),\
BPF_ALU64_REG(BPF_MOV, R0, R1), \
BPF_ALU32_REG(BPF_##op, R2, R1), \
BPF_ALU64_REG(BPF_SUB, R0, R1), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
}
BPF_ALU32_SRC_ZEXT(MOV),
BPF_ALU32_SRC_ZEXT(AND),
BPF_ALU32_SRC_ZEXT(OR),
BPF_ALU32_SRC_ZEXT(XOR),
BPF_ALU32_SRC_ZEXT(ADD),
BPF_ALU32_SRC_ZEXT(SUB),
BPF_ALU32_SRC_ZEXT(MUL),
BPF_ALU32_SRC_ZEXT(DIV),
BPF_ALU32_SRC_ZEXT(MOD),
#undef BPF_ALU32_SRC_ZEXT
/* Checking that ATOMIC32 src is not zero extended in place */
#define BPF_ATOMIC32_SRC_ZEXT(op) \
{ \
"ATOMIC_W_" #op ": src preserved in zext", \
.u.insns_int = { \
BPF_LD_IMM64(R0, 0x0123456789acbdefULL), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ST_MEM(BPF_W, R10, -4, 0), \
BPF_ATOMIC_OP(BPF_W, BPF_##op, R10, R1, -4), \
BPF_ALU64_REG(BPF_SUB, R0, R1), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
.stack_depth = 8, \
}
BPF_ATOMIC32_SRC_ZEXT(ADD),
BPF_ATOMIC32_SRC_ZEXT(AND),
BPF_ATOMIC32_SRC_ZEXT(OR),
BPF_ATOMIC32_SRC_ZEXT(XOR),
#undef BPF_ATOMIC32_SRC_ZEXT
/* Checking that CMPXCHG32 src is not zero extended in place */
{
"ATOMIC_W_CMPXCHG: src preserved in zext",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x0123456789acbdefULL),
BPF_ALU64_REG(BPF_MOV, R2, R1),
BPF_ALU64_REG(BPF_MOV, R0, 0),
BPF_ST_MEM(BPF_W, R10, -4, 0),
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R1, -4),
BPF_ALU64_REG(BPF_SUB, R1, R2),
BPF_ALU64_REG(BPF_MOV, R2, R1),
BPF_ALU64_IMM(BPF_RSH, R2, 32),
BPF_ALU64_REG(BPF_OR, R1, R2),
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
/* Checking that JMP32 immediate src is not zero extended in place */
#define BPF_JMP32_IMM_ZEXT(op) \
{ \
"JMP32_" #op "_K: operand preserved in zext", \
.u.insns_int = { \
BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_JMP32_IMM(BPF_##op, R0, 1234, 1), \
BPF_JMP_A(0), /* Nop */ \
BPF_ALU64_REG(BPF_SUB, R0, R1), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
}
BPF_JMP32_IMM_ZEXT(JEQ),
BPF_JMP32_IMM_ZEXT(JNE),
BPF_JMP32_IMM_ZEXT(JSET),
BPF_JMP32_IMM_ZEXT(JGT),
BPF_JMP32_IMM_ZEXT(JGE),
BPF_JMP32_IMM_ZEXT(JLT),
BPF_JMP32_IMM_ZEXT(JLE),
BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSGE),
BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSLT),
BPF_JMP32_IMM_ZEXT(JSLE),
#undef BPF_JMP2_IMM_ZEXT
/* Checking that JMP32 dst & src are not zero extended in place */
#define BPF_JMP32_REG_ZEXT(op) \
{ \
"JMP32_" #op "_X: operands preserved in zext", \
.u.insns_int = { \
BPF_LD_IMM64(R0, 0x0123456789acbdefULL),\
BPF_LD_IMM64(R1, 0xfedcba9876543210ULL),\
BPF_ALU64_REG(BPF_MOV, R2, R0), \
BPF_ALU64_REG(BPF_MOV, R3, R1), \
BPF_JMP32_IMM(BPF_##op, R0, R1, 1), \
BPF_JMP_A(0), /* Nop */ \
BPF_ALU64_REG(BPF_SUB, R0, R2), \
BPF_ALU64_REG(BPF_SUB, R1, R3), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_ALU64_REG(BPF_MOV, R1, R0), \
BPF_ALU64_IMM(BPF_RSH, R1, 32), \
BPF_ALU64_REG(BPF_OR, R0, R1), \
BPF_EXIT_INSN(), \
}, \
INTERNAL, \
{ }, \
{ { 0, 0 } }, \
}
BPF_JMP32_REG_ZEXT(JEQ),
BPF_JMP32_REG_ZEXT(JNE),
BPF_JMP32_REG_ZEXT(JSET),
BPF_JMP32_REG_ZEXT(JGT),
BPF_JMP32_REG_ZEXT(JGE),
BPF_JMP32_REG_ZEXT(JLT),
BPF_JMP32_REG_ZEXT(JLE),
BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSGE),
BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSLT),
BPF_JMP32_REG_ZEXT(JSLE),
#undef BPF_JMP2_REG_ZEXT
/* ALU64 K register combinations */
{
"ALU64_MOV_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mov_imm_regs,
},
{
"ALU64_AND_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_and_imm_regs,
},
{
"ALU64_OR_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_or_imm_regs,
},
{
"ALU64_XOR_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_xor_imm_regs,
},
{
"ALU64_LSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_lsh_imm_regs,
},
{
"ALU64_RSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_rsh_imm_regs,
},
{
"ALU64_ARSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_arsh_imm_regs,
},
{
"ALU64_ADD_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_add_imm_regs,
},
{
"ALU64_SUB_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_sub_imm_regs,
},
{
"ALU64_MUL_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mul_imm_regs,
},
{
"ALU64_DIV_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_div_imm_regs,
},
{
"ALU64_MOD_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mod_imm_regs,
},
/* ALU32 K registers */
{
"ALU32_MOV_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mov_imm_regs,
},
{
"ALU32_AND_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_and_imm_regs,
},
{
"ALU32_OR_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_or_imm_regs,
},
{
"ALU32_XOR_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_xor_imm_regs,
},
{
"ALU32_LSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_lsh_imm_regs,
},
{
"ALU32_RSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_rsh_imm_regs,
},
{
"ALU32_ARSH_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_imm_regs,
},
{
"ALU32_ADD_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_add_imm_regs,
},
{
"ALU32_SUB_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_sub_imm_regs,
},
{
"ALU32_MUL_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mul_imm_regs,
},
{
"ALU32_DIV_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_div_imm_regs,
},
{
"ALU32_MOD_K: registers",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mod_imm_regs,
},
/* ALU64 X register combinations */
{
"ALU64_MOV_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mov_reg_pairs,
},
{
"ALU64_AND_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_and_reg_pairs,
},
{
"ALU64_OR_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_or_reg_pairs,
},
{
"ALU64_XOR_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_xor_reg_pairs,
},
{
"ALU64_LSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_lsh_reg_pairs,
},
{
"ALU64_RSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_rsh_reg_pairs,
},
{
"ALU64_ARSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_arsh_reg_pairs,
},
{
"ALU64_ADD_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_add_reg_pairs,
},
{
"ALU64_SUB_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_sub_reg_pairs,
},
{
"ALU64_MUL_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mul_reg_pairs,
},
{
"ALU64_DIV_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_div_reg_pairs,
},
{
"ALU64_MOD_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mod_reg_pairs,
},
/* ALU32 X register combinations */
{
"ALU32_MOV_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mov_reg_pairs,
},
{
"ALU32_AND_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_and_reg_pairs,
},
{
"ALU32_OR_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_or_reg_pairs,
},
{
"ALU32_XOR_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_xor_reg_pairs,
},
{
"ALU32_LSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_lsh_reg_pairs,
},
{
"ALU32_RSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_rsh_reg_pairs,
},
{
"ALU32_ARSH_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_reg_pairs,
},
{
"ALU32_ADD_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_add_reg_pairs,
},
{
"ALU32_SUB_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_sub_reg_pairs,
},
{
"ALU32_MUL_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mul_reg_pairs,
},
{
"ALU32_DIV_X: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_div_reg_pairs,
},
{
"ALU32_MOD_X register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mod_reg_pairs,
},
/* Exhaustive test of ALU64 shift operations */
{
"ALU64_LSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_lsh_imm,
},
{
"ALU64_RSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_rsh_imm,
},
{
"ALU64_ARSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_arsh_imm,
},
{
"ALU64_LSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_lsh_reg,
},
{
"ALU64_RSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_rsh_reg,
},
{
"ALU64_ARSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_arsh_reg,
},
/* Exhaustive test of ALU32 shift operations */
{
"ALU32_LSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_lsh_imm,
},
{
"ALU32_RSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_rsh_imm,
},
{
"ALU32_ARSH_K: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_imm,
},
{
"ALU32_LSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_lsh_reg,
},
{
"ALU32_RSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_rsh_reg,
},
{
"ALU32_ARSH_X: all shift values",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_reg,
},
/*
* Exhaustive test of ALU64 shift operations when
* source and destination register are the same.
*/
{
"ALU64_LSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_lsh_same_reg,
},
{
"ALU64_RSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_rsh_same_reg,
},
{
"ALU64_ARSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_arsh_same_reg,
},
/*
* Exhaustive test of ALU32 shift operations when
* source and destination register are the same.
*/
{
"ALU32_LSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_lsh_same_reg,
},
{
"ALU32_RSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_rsh_same_reg,
},
{
"ALU32_ARSH_X: all shift values with the same register",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_arsh_same_reg,
},
/* ALU64 immediate magnitudes */
{
"ALU64_MOV_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mov_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_AND_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_and_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_OR_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_or_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_XOR_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_xor_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_ADD_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_add_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_SUB_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_sub_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_MUL_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mul_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_DIV_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_div_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_MOD_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mod_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
/* ALU32 immediate magnitudes */
{
"ALU32_MOV_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mov_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_AND_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_and_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_OR_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_or_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_XOR_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_xor_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_ADD_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_add_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_SUB_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_sub_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_MUL_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mul_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_DIV_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_div_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_MOD_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mod_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
/* ALU64 register magnitudes */
{
"ALU64_MOV_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mov_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_AND_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_and_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_OR_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_or_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_XOR_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_xor_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_ADD_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_add_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_SUB_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_sub_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_MUL_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mul_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_DIV_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_div_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU64_MOD_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu64_mod_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
/* ALU32 register magnitudes */
{
"ALU32_MOV_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mov_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_AND_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_and_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_OR_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_or_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_XOR_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_xor_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_ADD_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_add_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_SUB_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_sub_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_MUL_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mul_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_DIV_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_div_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ALU32_MOD_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_alu32_mod_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
/* LD_IMM64 immediate magnitudes and byte patterns */
{
"LD_IMM64: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_ld_imm64_magn,
},
{
"LD_IMM64: checker byte patterns",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_ld_imm64_checker,
},
{
"LD_IMM64: random positive and zero byte patterns",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_ld_imm64_pos_zero,
},
{
"LD_IMM64: random negative and zero byte patterns",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_ld_imm64_neg_zero,
},
{
"LD_IMM64: random positive and negative byte patterns",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_ld_imm64_pos_neg,
},
/* 64-bit ATOMIC register combinations */
{
"ATOMIC_DW_ADD: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_add_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_AND: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_and_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_OR: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_or_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_XOR: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xor_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_ADD_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_add_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_AND_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_and_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_OR_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_or_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_XOR_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xor_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_XCHG: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xchg_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_DW_CMPXCHG: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_cmpxchg_reg_pairs,
.stack_depth = 8,
},
/* 32-bit ATOMIC register combinations */
{
"ATOMIC_W_ADD: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_add_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_AND: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_and_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_OR: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_or_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_XOR: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xor_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_ADD_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_add_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_AND_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_and_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_OR_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_or_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_XOR_FETCH: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xor_fetch_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_XCHG: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xchg_reg_pairs,
.stack_depth = 8,
},
{
"ATOMIC_W_CMPXCHG: register combinations",
{ },
INTERNAL,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_cmpxchg_reg_pairs,
.stack_depth = 8,
},
/* 64-bit ATOMIC magnitudes */
{
"ATOMIC_DW_ADD: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_add,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_AND: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_and,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_OR: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_or,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_XOR: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xor,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_ADD_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_add_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_AND_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_and_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_OR_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_or_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_XOR_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xor_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_XCHG: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic64_xchg,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_DW_CMPXCHG: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_cmpxchg64,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
/* 64-bit atomic magnitudes */
{
"ATOMIC_W_ADD: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_add,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_AND: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_and,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_OR: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_or,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_XOR: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xor,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_ADD_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_add_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_AND_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_and_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_OR_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_or_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_XOR_FETCH: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xor_fetch,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_XCHG: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_atomic32_xchg,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"ATOMIC_W_CMPXCHG: all operand magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_cmpxchg32,
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
/* JMP immediate magnitudes */
{
"JMP_JSET_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jset_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JEQ_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jeq_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JNE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jne_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JGT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jgt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JGE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jge_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JLT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jlt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JLE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jle_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSGT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsgt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSGE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsge_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSLT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jslt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSLE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsle_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
/* JMP register magnitudes */
{
"JMP_JSET_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jset_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JEQ_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jeq_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JNE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jne_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JGT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jgt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JGE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jge_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JLT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jlt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JLE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSGT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsgt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSGE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsge_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSLT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jslt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP_JSLE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp_jsle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
/* JMP32 immediate magnitudes */
{
"JMP32_JSET_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jset_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JEQ_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jeq_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JNE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jne_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JGT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jgt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JGE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jge_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JLT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jlt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JLE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jle_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSGT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsgt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSGE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsge_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSLT_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jslt_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSLE_K: all immediate value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsle_imm,
.nr_testruns = NR_PATTERN_RUNS,
},
/* JMP32 register magnitudes */
{
"JMP32_JSET_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jset_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JEQ_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jeq_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JNE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jne_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JGT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jgt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JGE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jge_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JLT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jlt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JLE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSGT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsgt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSGE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsge_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSLT_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jslt_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
{
"JMP32_JSLE_X: all register value magnitudes",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_jmp32_jsle_reg,
.nr_testruns = NR_PATTERN_RUNS,
},
/* Conditional jumps with constant decision */
{
"JMP_JSET_K: imm = 0 -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JSET, R1, 0, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JLT_K: imm = 0 -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JLT, R1, 0, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JGE_K: imm = 0 -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JGE, R1, 0, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JGT_K: imm = 0xffffffff -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JGT, R1, U32_MAX, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JLE_K: imm = 0xffffffff -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_IMM(BPF_JLE, R1, U32_MAX, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP32_JSGT_K: imm = 0x7fffffff -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP32_IMM(BPF_JSGT, R1, S32_MAX, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP32_JSGE_K: imm = -0x80000000 -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP32_IMM(BPF_JSGE, R1, S32_MIN, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP32_JSLT_K: imm = -0x80000000 -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP32_IMM(BPF_JSLT, R1, S32_MIN, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP32_JSLE_K: imm = 0x7fffffff -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP32_IMM(BPF_JSLE, R1, S32_MAX, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JEQ_X: dst = src -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JEQ, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JGE_X: dst = src -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JGE, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JLE_X: dst = src -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JLE, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JSGE_X: dst = src -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JSGE, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JSLE_X: dst = src -> always taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JSLE, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
},
{
"JMP_JNE_X: dst = src -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JNE, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JGT_X: dst = src -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JGT, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JLT_X: dst = src -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JLT, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JSGT_X: dst = src -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JSGT, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
{
"JMP_JSLT_X: dst = src -> never taken",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 1),
BPF_JMP_REG(BPF_JSLT, R1, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 0 } },
},
/* Short relative jumps */
{
"Short relative jump: offset=0",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 0),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, -1),
},
INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
{ },
{ { 0, 0 } },
},
{
"Short relative jump: offset=1",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, -1),
},
INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
{ },
{ { 0, 0 } },
},
{
"Short relative jump: offset=2",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, -1),
},
INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
{ },
{ { 0, 0 } },
},
{
"Short relative jump: offset=3",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 3),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, -1),
},
INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
{ },
{ { 0, 0 } },
},
{
"Short relative jump: offset=4",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_JMP_IMM(BPF_JEQ, R0, 0, 4),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_ALU32_IMM(BPF_ADD, R0, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, -1),
},
INTERNAL | FLAG_NO_DATA | FLAG_VERIFIER_ZEXT,
{ },
{ { 0, 0 } },
},
/* Conditional branch conversions */
{
"Long conditional jump: taken at runtime",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_max_jmp_taken,
},
{
"Long conditional jump: not taken at runtime",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 2 } },
.fill_helper = bpf_fill_max_jmp_not_taken,
},
{
"Long conditional jump: always taken, known at JIT time",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 1 } },
.fill_helper = bpf_fill_max_jmp_always_taken,
},
{
"Long conditional jump: never taken, known at JIT time",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, 2 } },
.fill_helper = bpf_fill_max_jmp_never_taken,
},
/* Staggered jump sequences, immediate */
{
"Staggered jumps: JMP_JA",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_ja,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JEQ_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jeq_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JNE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jne_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSET_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jset_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JGT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jgt_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JGE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jge_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JLT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jlt_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JLE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jle_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSGT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsgt_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSGE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsge_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSLT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jslt_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSLE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsle_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
/* Staggered jump sequences, register */
{
"Staggered jumps: JMP_JEQ_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jeq_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JNE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jne_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSET_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jset_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JGT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jgt_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JGE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jge_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JLT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jlt_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JLE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jle_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSGT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsgt_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSGE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsge_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSLT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jslt_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP_JSLE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsle_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
/* Staggered jump sequences, JMP32 immediate */
{
"Staggered jumps: JMP32_JEQ_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jeq32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JNE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jne32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSET_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jset32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JGT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jgt32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JGE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jge32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JLT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jlt32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JLE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jle32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSGT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsgt32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSGE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsge32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSLT_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jslt32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSLE_K",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsle32_imm,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
/* Staggered jump sequences, JMP32 register */
{
"Staggered jumps: JMP32_JEQ_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jeq32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JNE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jne32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSET_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jset32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JGT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jgt32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JGE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jge32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JLT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jlt32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JLE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jle32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSGT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsgt32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSGE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsge32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSLT_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jslt32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
{
"Staggered jumps: JMP32_JSLE_X",
{ },
INTERNAL | FLAG_NO_DATA,
{ },
{ { 0, MAX_STAGGERED_JMP_SIZE + 1 } },
.fill_helper = bpf_fill_staggered_jsle32_reg,
.nr_testruns = NR_STAGGERED_JMP_RUNS,
},
};
static struct net_device dev;
static struct sk_buff *populate_skb(char *buf, int size)
{
struct sk_buff *skb;
if (size >= MAX_DATA)
return NULL;
skb = alloc_skb(MAX_DATA, GFP_KERNEL);
if (!skb)
return NULL;
__skb_put_data(skb, buf, size);
/* Initialize a fake skb with test pattern. */
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = SKB_TYPE;
skb->mark = SKB_MARK;
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
skb_set_network_header(skb, min(size, ETH_HLEN));
return skb;
}
static void *generate_test_data(struct bpf_test *test, int sub)
{
struct sk_buff *skb;
struct page *page;
if (test->aux & FLAG_NO_DATA)
return NULL;
if (test->aux & FLAG_LARGE_MEM)
return kmalloc(test->test[sub].data_size, GFP_KERNEL);
/* Test case expects an skb, so populate one. Various
* subtests generate skbs of different sizes based on
* the same data.
*/
skb = populate_skb(test->data, test->test[sub].data_size);
if (!skb)
return NULL;
if (test->aux & FLAG_SKB_FRAG) {
/*
* when the test requires a fragmented skb, add a
* single fragment to the skb, filled with
* test->frag_data.
*/
page = alloc_page(GFP_KERNEL);
if (!page)
goto err_kfree_skb;
memcpy(page_address(page), test->frag_data, MAX_DATA);
skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
}
return skb;
err_kfree_skb:
kfree_skb(skb);
return NULL;
}
static void release_test_data(const struct bpf_test *test, void *data)
{
if (test->aux & FLAG_NO_DATA)
return;
if (test->aux & FLAG_LARGE_MEM)
kfree(data);
else
kfree_skb(data);
}
static int filter_length(int which)
{
struct sock_filter *fp;
int len;
if (tests[which].fill_helper)
return tests[which].u.ptr.len;
fp = tests[which].u.insns;
for (len = MAX_INSNS - 1; len > 0; --len)
if (fp[len].code != 0 || fp[len].k != 0)
break;
return len + 1;
}
static void *filter_pointer(int which)
{
if (tests[which].fill_helper)
return tests[which].u.ptr.insns;
else
return tests[which].u.insns;
}
static struct bpf_prog *generate_filter(int which, int *err)
{
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
unsigned int flen = filter_length(which);
void *fptr = filter_pointer(which);
struct sock_fprog_kern fprog;
struct bpf_prog *fp;
switch (test_type) {
case CLASSIC:
fprog.filter = fptr;
fprog.len = flen;
*err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
if (*err == tests[which].expected_errcode) {
pr_cont("PASS\n");
/* Verifier rejected filter as expected. */
*err = 0;
return NULL;
} else {
pr_cont("UNEXPECTED_PASS\n");
/* Verifier didn't reject the test that's
* bad enough, just return!
*/
*err = -EINVAL;
return NULL;
}
}
if (*err) {
pr_cont("FAIL to prog_create err=%d len=%d\n",
*err, fprog.len);
return NULL;
}
break;
case INTERNAL:
fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
if (fp == NULL) {
pr_cont("UNEXPECTED_FAIL no memory left\n");
*err = -ENOMEM;
return NULL;
}
fp->len = flen;
/* Type doesn't really matter here as long as it's not unspec. */
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
fp->aux->stack_depth = tests[which].stack_depth;
fp->aux->verifier_zext = !!(tests[which].aux &
FLAG_VERIFIER_ZEXT);
/* We cannot error here as we don't need type compatibility
* checks.
*/
fp = bpf_prog_select_runtime(fp, err);
if (*err) {
pr_cont("FAIL to select_runtime err=%d\n", *err);
return NULL;
}
break;
}
*err = 0;
return fp;
}
static void release_filter(struct bpf_prog *fp, int which)
{
__u8 test_type = tests[which].aux & TEST_TYPE_MASK;
switch (test_type) {
case CLASSIC:
bpf_prog_destroy(fp);
break;
case INTERNAL:
bpf_prog_free(fp);
break;
}
}
static int __run_one(const struct bpf_prog *fp, const void *data,
int runs, u64 *duration)
{
u64 start, finish;
int ret = 0, i;
migrate_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = bpf_prog_run(fp, data);
finish = ktime_get_ns();
migrate_enable();
*duration = finish - start;
do_div(*duration, runs);
return ret;
}
static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
{
int err_cnt = 0, i, runs = MAX_TESTRUNS;
if (test->nr_testruns)
runs = min(test->nr_testruns, MAX_TESTRUNS);
for (i = 0; i < MAX_SUBTESTS; i++) {
void *data;
u64 duration;
u32 ret;
/*
* NOTE: Several sub-tests may be present, in which case
* a zero {data_size, result} tuple indicates the end of
* the sub-test array. The first test is always run,
* even if both data_size and result happen to be zero.
*/
if (i > 0 &&
test->test[i].data_size == 0 &&
test->test[i].result == 0)
break;
data = generate_test_data(test, i);
if (!data && !(test->aux & FLAG_NO_DATA)) {
pr_cont("data generation failed ");
err_cnt++;
break;
}
ret = __run_one(fp, data, runs, &duration);
release_test_data(test, data);
if (ret == test->test[i].result) {
pr_cont("%lld ", duration);
} else {
s32 res = test->test[i].result;
pr_cont("ret %d != %d (%#x != %#x)",
ret, res, ret, res);
err_cnt++;
}
}
return err_cnt;
}
static char test_name[64];
module_param_string(test_name, test_name, sizeof(test_name), 0);
static int test_id = -1;
module_param(test_id, int, 0);
static int test_range[2] = { 0, INT_MAX };
module_param_array(test_range, int, NULL, 0);
static bool exclude_test(int test_id)
{
return test_id < test_range[0] || test_id > test_range[1];
}
static __init struct sk_buff *build_test_skb(void)
{
u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
struct sk_buff *skb[2];
struct page *page[2];
int i, data_size = 8;
for (i = 0; i < 2; i++) {
page[i] = alloc_page(GFP_KERNEL);
if (!page[i]) {
if (i == 0)
goto err_page0;
else
goto err_page1;
}
/* this will set skb[i]->head_frag */
skb[i] = dev_alloc_skb(headroom + data_size);
if (!skb[i]) {
if (i == 0)
goto err_skb0;
else
goto err_skb1;
}
skb_reserve(skb[i], headroom);
skb_put(skb[i], data_size);
skb[i]->protocol = htons(ETH_P_IP);
skb_reset_network_header(skb[i]);
skb_set_mac_header(skb[i], -ETH_HLEN);
skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
// skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
}
/* setup shinfo */
skb_shinfo(skb[0])->gso_size = 1448;
skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb[0])->gso_segs = 0;
skb_shinfo(skb[0])->frag_list = skb[1];
skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000;
/* adjust skb[0]'s len */
skb[0]->len += skb[1]->len;
skb[0]->data_len += skb[1]->data_len;
skb[0]->truesize += skb[1]->truesize;
return skb[0];
err_skb1:
__free_page(page[1]);
err_page1:
kfree_skb(skb[0]);
err_skb0:
__free_page(page[0]);
err_page0:
return NULL;
}
static __init struct sk_buff *build_test_skb_linear_no_head_frag(void)
{
unsigned int alloc_size = 2000;
unsigned int headroom = 102, doffset = 72, data_size = 1308;
struct sk_buff *skb[2];
int i;
/* skbs linked in a frag_list, both with linear data, with head_frag=0
* (data allocated by kmalloc), both have tcp data of 1308 bytes
* (total payload is 2616 bytes).
* Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom.
*/
for (i = 0; i < 2; i++) {
skb[i] = alloc_skb(alloc_size, GFP_KERNEL);
if (!skb[i]) {
if (i == 0)
goto err_skb0;
else
goto err_skb1;
}
skb[i]->protocol = htons(ETH_P_IPV6);
skb_reserve(skb[i], headroom);
skb_put(skb[i], doffset + data_size);
skb_reset_network_header(skb[i]);
if (i == 0)
skb_reset_mac_header(skb[i]);
else
skb_set_mac_header(skb[i], -ETH_HLEN);
__skb_pull(skb[i], doffset);
}
/* setup shinfo.
* mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a
* reduced gso_size.
*/
skb_shinfo(skb[0])->gso_size = 1288;
skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY;
skb_shinfo(skb[0])->gso_segs = 0;
skb_shinfo(skb[0])->frag_list = skb[1];
/* adjust skb[0]'s len */
skb[0]->len += skb[1]->len;
skb[0]->data_len += skb[1]->len;
skb[0]->truesize += skb[1]->truesize;
return skb[0];
err_skb1:
kfree_skb(skb[0]);
err_skb0:
return NULL;
}
struct skb_segment_test {
const char *descr;
struct sk_buff *(*build_skb)(void);
netdev_features_t features;
};
static struct skb_segment_test skb_segment_tests[] __initconst = {
{
.descr = "gso_with_rx_frags",
.build_skb = build_test_skb,
.features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM
},
{
.descr = "gso_linear_no_head_frag",
.build_skb = build_test_skb_linear_no_head_frag,
.features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO |
NETIF_F_LLTX | NETIF_F_GRO |
NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_STAG_TX
}
};
static __init int test_skb_segment_single(const struct skb_segment_test *test)
{
struct sk_buff *skb, *segs;
int ret = -1;
skb = test->build_skb();
if (!skb) {
pr_info("%s: failed to build_test_skb", __func__);
goto done;
}
segs = skb_segment(skb, test->features);
if (!IS_ERR(segs)) {
kfree_skb_list(segs);
ret = 0;
}
kfree_skb(skb);
done:
return ret;
}
static __init int test_skb_segment(void)
{
int i, err_cnt = 0, pass_cnt = 0;
for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
const struct skb_segment_test *test = &skb_segment_tests[i];
cond_resched();
if (exclude_test(i))
continue;
pr_info("#%d %s ", i, test->descr);
if (test_skb_segment_single(test)) {
pr_cont("FAIL\n");
err_cnt++;
} else {
pr_cont("PASS\n");
pass_cnt++;
}
}
pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__,
pass_cnt, err_cnt);
return err_cnt ? -EINVAL : 0;
}
static __init int test_bpf(void)
{
int i, err_cnt = 0, pass_cnt = 0;
int jit_cnt = 0, run_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct bpf_prog *fp;
int err;
cond_resched();
if (exclude_test(i))
continue;
pr_info("#%d %s ", i, tests[i].descr);
if (tests[i].fill_helper &&
tests[i].fill_helper(&tests[i]) < 0) {
pr_cont("FAIL to prog_fill\n");
continue;
}
fp = generate_filter(i, &err);
if (tests[i].fill_helper) {
kfree(tests[i].u.ptr.insns);
tests[i].u.ptr.insns = NULL;
}
if (fp == NULL) {
if (err == 0) {
pass_cnt++;
continue;
}
err_cnt++;
continue;
}
pr_cont("jited:%u ", fp->jited);
run_cnt++;
if (fp->jited)
jit_cnt++;
err = run_one(fp, &tests[i]);
release_filter(fp, i);
if (err) {
pr_cont("FAIL (%d times)\n", err);
err_cnt++;
} else {
pr_cont("PASS\n");
pass_cnt++;
}
}
pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
pass_cnt, err_cnt, jit_cnt, run_cnt);
return err_cnt ? -EINVAL : 0;
}
struct tail_call_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
int flags;
int result;
int stack_depth;
};
/* Flags that can be passed to tail call test cases */
#define FLAG_NEED_STATE BIT(0)
#define FLAG_RESULT_IN_STATE BIT(1)
/*
* Magic marker used in test snippets for tail calls below.
* BPF_LD/MOV to R2 and R2 with this immediate value is replaced
* with the proper values by the test runner.
*/
#define TAIL_CALL_MARKER 0x7a11ca11
/* Special offset to indicate a NULL call target */
#define TAIL_CALL_NULL 0x7fff
/* Special offset to indicate an out-of-range index */
#define TAIL_CALL_INVALID 0x7ffe
#define TAIL_CALL(offset) \
BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \
BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
offset, TAIL_CALL_MARKER), \
BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
/*
* A test function to be called from a BPF program, clobbering a lot of
* CPU registers in the process. A JITed BPF program calling this function
* must save and restore any caller-saved registers it uses for internal
* state, for example the current tail call count.
*/
BPF_CALL_1(bpf_test_func, u64, arg)
{
char buf[64];
long a = 0;
long b = 1;
long c = 2;
long d = 3;
long e = 4;
long f = 5;
long g = 6;
long h = 7;
return snprintf(buf, sizeof(buf),
"%ld %lu %lx %ld %lu %lx %ld %lu %x",
a, b, c, d, e, f, g, h, (int)arg);
}
#define BPF_FUNC_test_func __BPF_FUNC_MAX_ID
/*
* Tail call tests. Each test case may call any other test in the table,
* including itself, specified as a relative index offset from the calling
* test. The index TAIL_CALL_NULL can be used to specify a NULL target
* function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
* results in a target index that is out of range.
*/
static struct tail_call_test tail_call_tests[] = {
{
"Tail call leaf",
.insns = {
BPF_ALU64_REG(BPF_MOV, R0, R1),
BPF_ALU64_IMM(BPF_ADD, R0, 1),
BPF_EXIT_INSN(),
},
.result = 1,
},
{
"Tail call 2",
.insns = {
BPF_ALU64_IMM(BPF_ADD, R1, 2),
TAIL_CALL(-1),
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_EXIT_INSN(),
},
.result = 3,
},
{
"Tail call 3",
.insns = {
BPF_ALU64_IMM(BPF_ADD, R1, 3),
TAIL_CALL(-1),
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_EXIT_INSN(),
},
.result = 6,
},
{
"Tail call 4",
.insns = {
BPF_ALU64_IMM(BPF_ADD, R1, 4),
TAIL_CALL(-1),
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_EXIT_INSN(),
},
.result = 10,
},
{
"Tail call load/store leaf",
.insns = {
BPF_ALU64_IMM(BPF_MOV, R1, 1),
BPF_ALU64_IMM(BPF_MOV, R2, 2),
BPF_ALU64_REG(BPF_MOV, R3, BPF_REG_FP),
BPF_STX_MEM(BPF_DW, R3, R1, -8),
BPF_STX_MEM(BPF_DW, R3, R2, -16),
BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -8),
BPF_JMP_REG(BPF_JNE, R0, R1, 3),
BPF_LDX_MEM(BPF_DW, R0, BPF_REG_FP, -16),
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
.result = 0,
.stack_depth = 32,
},
{
"Tail call load/store",
.insns = {
BPF_ALU64_IMM(BPF_MOV, R0, 3),
BPF_STX_MEM(BPF_DW, BPF_REG_FP, R0, -8),
TAIL_CALL(-1),
BPF_ALU64_IMM(BPF_MOV, R0, -1),
BPF_EXIT_INSN(),
},
.result = 0,
.stack_depth = 16,
},
{
"Tail call error path, max count reached",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
BPF_ALU64_IMM(BPF_ADD, R2, 1),
BPF_STX_MEM(BPF_W, R1, R2, 0),
TAIL_CALL(0),
BPF_EXIT_INSN(),
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
},
{
"Tail call count preserved across function calls",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
BPF_ALU64_IMM(BPF_ADD, R2, 1),
BPF_STX_MEM(BPF_W, R1, R2, 0),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
BPF_CALL_REL(BPF_FUNC_get_numa_node_id),
BPF_CALL_REL(BPF_FUNC_ktime_get_ns),
BPF_CALL_REL(BPF_FUNC_ktime_get_boot_ns),
BPF_CALL_REL(BPF_FUNC_ktime_get_coarse_ns),
BPF_CALL_REL(BPF_FUNC_jiffies64),
BPF_CALL_REL(BPF_FUNC_test_func),
BPF_LDX_MEM(BPF_DW, R1, R10, -8),
BPF_ALU32_REG(BPF_MOV, R0, R1),
TAIL_CALL(0),
BPF_EXIT_INSN(),
},
.stack_depth = 8,
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS,
},
{
"Tail call error path, NULL target",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
BPF_ALU64_IMM(BPF_ADD, R2, 1),
BPF_STX_MEM(BPF_W, R1, R2, 0),
TAIL_CALL(TAIL_CALL_NULL),
BPF_EXIT_INSN(),
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
},
{
"Tail call error path, index out of range",
.insns = {
BPF_LDX_MEM(BPF_W, R2, R1, 0),
BPF_ALU64_IMM(BPF_ADD, R2, 1),
BPF_STX_MEM(BPF_W, R1, R2, 0),
TAIL_CALL(TAIL_CALL_INVALID),
BPF_EXIT_INSN(),
},
.flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE,
.result = MAX_TESTRUNS,
},
};
static void __init destroy_tail_call_tests(struct bpf_array *progs)
{
int i;
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
if (progs->ptrs[i])
bpf_prog_free(progs->ptrs[i]);
kfree(progs);
}
static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
{
int ntests = ARRAY_SIZE(tail_call_tests);
struct bpf_array *progs;
int which, err;
/* Allocate the table of programs to be used for tail calls */
progs = kzalloc(struct_size(progs, ptrs, ntests + 1), GFP_KERNEL);
if (!progs)
goto out_nomem;
/* Create all eBPF programs and populate the table */
for (which = 0; which < ntests; which++) {
struct tail_call_test *test = &tail_call_tests[which];
struct bpf_prog *fp;
int len, i;
/* Compute the number of program instructions */
for (len = 0; len < MAX_INSNS; len++) {
struct bpf_insn *insn = &test->insns[len];
if (len < MAX_INSNS - 1 &&
insn->code == (BPF_LD | BPF_DW | BPF_IMM))
len++;
if (insn->code == 0)
break;
}
/* Allocate and initialize the program */
fp = bpf_prog_alloc(bpf_prog_size(len), 0);
if (!fp)
goto out_nomem;
fp->len = len;
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
fp->aux->stack_depth = test->stack_depth;
memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
/* Relocate runtime tail call offsets and addresses */
for (i = 0; i < len; i++) {
struct bpf_insn *insn = &fp->insnsi[i];
long addr = 0;
switch (insn->code) {
case BPF_LD | BPF_DW | BPF_IMM:
if (insn->imm != TAIL_CALL_MARKER)
break;
insn[0].imm = (u32)(long)progs;
insn[1].imm = ((u64)(long)progs) >> 32;
break;
case BPF_ALU | BPF_MOV | BPF_K:
if (insn->imm != TAIL_CALL_MARKER)
break;
if (insn->off == TAIL_CALL_NULL)
insn->imm = ntests;
else if (insn->off == TAIL_CALL_INVALID)
insn->imm = ntests + 1;
else
insn->imm = which + insn->off;
insn->off = 0;
break;
case BPF_JMP | BPF_CALL:
if (insn->src_reg != BPF_PSEUDO_CALL)
break;
switch (insn->imm) {
case BPF_FUNC_get_numa_node_id:
addr = (long)&numa_node_id;
break;
case BPF_FUNC_ktime_get_ns:
addr = (long)&ktime_get_ns;
break;
case BPF_FUNC_ktime_get_boot_ns:
addr = (long)&ktime_get_boot_fast_ns;
break;
case BPF_FUNC_ktime_get_coarse_ns:
addr = (long)&ktime_get_coarse_ns;
break;
case BPF_FUNC_jiffies64:
addr = (long)&get_jiffies_64;
break;
case BPF_FUNC_test_func:
addr = (long)&bpf_test_func;
break;
default:
err = -EFAULT;
goto out_err;
}
*insn = BPF_EMIT_CALL(addr);
if ((long)__bpf_call_base + insn->imm != addr)
*insn = BPF_JMP_A(0); /* Skip: NOP */
break;
}
}
fp = bpf_prog_select_runtime(fp, &err);
if (err)
goto out_err;
progs->ptrs[which] = fp;
}
/* The last entry contains a NULL program pointer */
progs->map.max_entries = ntests + 1;
*pprogs = progs;
return 0;
out_nomem:
err = -ENOMEM;
out_err:
if (progs)
destroy_tail_call_tests(progs);
return err;
}
static __init int test_tail_calls(struct bpf_array *progs)
{
int i, err_cnt = 0, pass_cnt = 0;
int jit_cnt = 0, run_cnt = 0;
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
struct tail_call_test *test = &tail_call_tests[i];
struct bpf_prog *fp = progs->ptrs[i];
int *data = NULL;
int state = 0;
u64 duration;
int ret;
cond_resched();
if (exclude_test(i))
continue;
pr_info("#%d %s ", i, test->descr);
if (!fp) {
err_cnt++;
continue;
}
pr_cont("jited:%u ", fp->jited);
run_cnt++;
if (fp->jited)
jit_cnt++;
if (test->flags & FLAG_NEED_STATE)
data = &state;
ret = __run_one(fp, data, MAX_TESTRUNS, &duration);
if (test->flags & FLAG_RESULT_IN_STATE)
ret = state;
if (ret == test->result) {
pr_cont("%lld PASS", duration);
pass_cnt++;
} else {
pr_cont("ret %d != %d FAIL", ret, test->result);
err_cnt++;
}
}
pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
__func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
return err_cnt ? -EINVAL : 0;
}
static char test_suite[32];
module_param_string(test_suite, test_suite, sizeof(test_suite), 0);
static __init int find_test_index(const char *test_name)
{
int i;
if (!strcmp(test_suite, "test_bpf")) {
for (i = 0; i < ARRAY_SIZE(tests); i++) {
if (!strcmp(tests[i].descr, test_name))
return i;
}
}
if (!strcmp(test_suite, "test_tail_calls")) {
for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
if (!strcmp(tail_call_tests[i].descr, test_name))
return i;
}
}
if (!strcmp(test_suite, "test_skb_segment")) {
for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) {
if (!strcmp(skb_segment_tests[i].descr, test_name))
return i;
}
}
return -1;
}
static __init int prepare_test_range(void)
{
int valid_range;
if (!strcmp(test_suite, "test_bpf"))
valid_range = ARRAY_SIZE(tests);
else if (!strcmp(test_suite, "test_tail_calls"))
valid_range = ARRAY_SIZE(tail_call_tests);
else if (!strcmp(test_suite, "test_skb_segment"))
valid_range = ARRAY_SIZE(skb_segment_tests);
else
return 0;
if (test_id >= 0) {
/*
* if a test_id was specified, use test_range to
* cover only that test.
*/
if (test_id >= valid_range) {
pr_err("test_bpf: invalid test_id specified for '%s' suite.\n",
test_suite);
return -EINVAL;
}
test_range[0] = test_id;
test_range[1] = test_id;
} else if (*test_name) {
/*
* if a test_name was specified, find it and setup
* test_range to cover only that test.
*/
int idx = find_test_index(test_name);
if (idx < 0) {
pr_err("test_bpf: no test named '%s' found for '%s' suite.\n",
test_name, test_suite);
return -EINVAL;
}
test_range[0] = idx;
test_range[1] = idx;
} else if (test_range[0] != 0 || test_range[1] != INT_MAX) {
/*
* check that the supplied test_range is valid.
*/
if (test_range[0] < 0 || test_range[1] >= valid_range) {
pr_err("test_bpf: test_range is out of bound for '%s' suite.\n",
test_suite);
return -EINVAL;
}
if (test_range[1] < test_range[0]) {
pr_err("test_bpf: test_range is ending before it starts.\n");
return -EINVAL;
}
}
return 0;
}
static int __init test_bpf_init(void)
{
struct bpf_array *progs = NULL;
int ret;
if (strlen(test_suite) &&
strcmp(test_suite, "test_bpf") &&
strcmp(test_suite, "test_tail_calls") &&
strcmp(test_suite, "test_skb_segment")) {
pr_err("test_bpf: invalid test_suite '%s' specified.\n", test_suite);
return -EINVAL;
}
/*
* if test_suite is not specified, but test_id, test_name or test_range
* is specified, set 'test_bpf' as the default test suite.
*/
if (!strlen(test_suite) &&
(test_id != -1 || strlen(test_name) ||
(test_range[0] != 0 || test_range[1] != INT_MAX))) {
pr_info("test_bpf: set 'test_bpf' as the default test_suite.\n");
strscpy(test_suite, "test_bpf", sizeof(test_suite));
}
ret = prepare_test_range();
if (ret < 0)
return ret;
if (!strlen(test_suite) || !strcmp(test_suite, "test_bpf")) {
ret = test_bpf();
if (ret)
return ret;
}
if (!strlen(test_suite) || !strcmp(test_suite, "test_tail_calls")) {
ret = prepare_tail_call_tests(&progs);
if (ret)
return ret;
ret = test_tail_calls(progs);
destroy_tail_call_tests(progs);
if (ret)
return ret;
}
if (!strlen(test_suite) || !strcmp(test_suite, "test_skb_segment"))
return test_skb_segment();
return 0;
}
static void __exit test_bpf_exit(void)
{
}
module_init(test_bpf_init);
module_exit(test_bpf_exit);
MODULE_LICENSE("GPL");
| linux-master | lib/test_bpf.c |
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#define for_each_test(i, test) \
for (i = 0; i < ARRAY_SIZE(test); i++)
struct test_fail {
const char *str;
unsigned int base;
};
#define DEFINE_TEST_FAIL(test) \
const struct test_fail test[] __initconst
#define DECLARE_TEST_OK(type, test_type) \
test_type { \
const char *str; \
unsigned int base; \
type expected_res; \
}
#define DEFINE_TEST_OK(type, test) \
const type test[] __initconst
#define TEST_FAIL(fn, type, fmt, test) \
{ \
unsigned int i; \
\
for_each_test(i, test) { \
const struct test_fail *t = &test[i]; \
type tmp; \
int rv; \
\
tmp = 0; \
rv = fn(t->str, t->base, &tmp); \
if (rv >= 0) { \
WARN(1, "str '%s', base %u, expected -E, got %d/" fmt "\n", \
t->str, t->base, rv, tmp); \
continue; \
} \
} \
}
#define TEST_OK(fn, type, fmt, test) \
{ \
unsigned int i; \
\
for_each_test(i, test) { \
const typeof(test[0]) *t = &test[i]; \
type res; \
int rv; \
\
rv = fn(t->str, t->base, &res); \
if (rv != 0) { \
WARN(1, "str '%s', base %u, expected 0/" fmt ", got %d\n", \
t->str, t->base, t->expected_res, rv); \
continue; \
} \
if (res != t->expected_res) { \
WARN(1, "str '%s', base %u, expected " fmt ", got " fmt "\n", \
t->str, t->base, t->expected_res, res); \
continue; \
} \
} \
}
static void __init test_kstrtoull_ok(void)
{
DECLARE_TEST_OK(unsigned long long, struct test_ull);
static DEFINE_TEST_OK(struct test_ull, test_ull_ok) = {
{"0", 10, 0ULL},
{"1", 10, 1ULL},
{"127", 10, 127ULL},
{"128", 10, 128ULL},
{"129", 10, 129ULL},
{"255", 10, 255ULL},
{"256", 10, 256ULL},
{"257", 10, 257ULL},
{"32767", 10, 32767ULL},
{"32768", 10, 32768ULL},
{"32769", 10, 32769ULL},
{"65535", 10, 65535ULL},
{"65536", 10, 65536ULL},
{"65537", 10, 65537ULL},
{"2147483647", 10, 2147483647ULL},
{"2147483648", 10, 2147483648ULL},
{"2147483649", 10, 2147483649ULL},
{"4294967295", 10, 4294967295ULL},
{"4294967296", 10, 4294967296ULL},
{"4294967297", 10, 4294967297ULL},
{"9223372036854775807", 10, 9223372036854775807ULL},
{"9223372036854775808", 10, 9223372036854775808ULL},
{"9223372036854775809", 10, 9223372036854775809ULL},
{"18446744073709551614", 10, 18446744073709551614ULL},
{"18446744073709551615", 10, 18446744073709551615ULL},
{"00", 8, 00ULL},
{"01", 8, 01ULL},
{"0177", 8, 0177ULL},
{"0200", 8, 0200ULL},
{"0201", 8, 0201ULL},
{"0377", 8, 0377ULL},
{"0400", 8, 0400ULL},
{"0401", 8, 0401ULL},
{"077777", 8, 077777ULL},
{"0100000", 8, 0100000ULL},
{"0100001", 8, 0100001ULL},
{"0177777", 8, 0177777ULL},
{"0200000", 8, 0200000ULL},
{"0200001", 8, 0200001ULL},
{"017777777777", 8, 017777777777ULL},
{"020000000000", 8, 020000000000ULL},
{"020000000001", 8, 020000000001ULL},
{"037777777777", 8, 037777777777ULL},
{"040000000000", 8, 040000000000ULL},
{"040000000001", 8, 040000000001ULL},
{"0777777777777777777777", 8, 0777777777777777777777ULL},
{"01000000000000000000000", 8, 01000000000000000000000ULL},
{"01000000000000000000001", 8, 01000000000000000000001ULL},
{"01777777777777777777776", 8, 01777777777777777777776ULL},
{"01777777777777777777777", 8, 01777777777777777777777ULL},
{"0x0", 16, 0x0ULL},
{"0x1", 16, 0x1ULL},
{"0x7f", 16, 0x7fULL},
{"0x80", 16, 0x80ULL},
{"0x81", 16, 0x81ULL},
{"0xff", 16, 0xffULL},
{"0x100", 16, 0x100ULL},
{"0x101", 16, 0x101ULL},
{"0x7fff", 16, 0x7fffULL},
{"0x8000", 16, 0x8000ULL},
{"0x8001", 16, 0x8001ULL},
{"0xffff", 16, 0xffffULL},
{"0x10000", 16, 0x10000ULL},
{"0x10001", 16, 0x10001ULL},
{"0x7fffffff", 16, 0x7fffffffULL},
{"0x80000000", 16, 0x80000000ULL},
{"0x80000001", 16, 0x80000001ULL},
{"0xffffffff", 16, 0xffffffffULL},
{"0x100000000", 16, 0x100000000ULL},
{"0x100000001", 16, 0x100000001ULL},
{"0x7fffffffffffffff", 16, 0x7fffffffffffffffULL},
{"0x8000000000000000", 16, 0x8000000000000000ULL},
{"0x8000000000000001", 16, 0x8000000000000001ULL},
{"0xfffffffffffffffe", 16, 0xfffffffffffffffeULL},
{"0xffffffffffffffff", 16, 0xffffffffffffffffULL},
{"0\n", 0, 0ULL},
};
TEST_OK(kstrtoull, unsigned long long, "%llu", test_ull_ok);
}
static void __init test_kstrtoull_fail(void)
{
static DEFINE_TEST_FAIL(test_ull_fail) = {
{"", 0},
{"", 8},
{"", 10},
{"", 16},
{"\n", 0},
{"\n", 8},
{"\n", 10},
{"\n", 16},
{"\n0", 0},
{"\n0", 8},
{"\n0", 10},
{"\n0", 16},
{"+", 0},
{"+", 8},
{"+", 10},
{"+", 16},
{"-", 0},
{"-", 8},
{"-", 10},
{"-", 16},
{"0x", 0},
{"0x", 16},
{"0X", 0},
{"0X", 16},
{"0 ", 0},
{"1+", 0},
{"1-", 0},
{" 2", 0},
/* base autodetection */
{"0x0z", 0},
{"0z", 0},
{"a", 0},
/* digit >= base */
{"2", 2},
{"8", 8},
{"a", 10},
{"A", 10},
{"g", 16},
{"G", 16},
/* overflow */
{"10000000000000000000000000000000000000000000000000000000000000000", 2},
{"2000000000000000000000", 8},
{"18446744073709551616", 10},
{"10000000000000000", 16},
/* negative */
{"-0", 0},
{"-0", 8},
{"-0", 10},
{"-0", 16},
{"-1", 0},
{"-1", 8},
{"-1", 10},
{"-1", 16},
/* sign is first character if any */
{"-+1", 0},
{"-+1", 8},
{"-+1", 10},
{"-+1", 16},
/* nothing after \n */
{"0\n0", 0},
{"0\n0", 8},
{"0\n0", 10},
{"0\n0", 16},
{"0\n+", 0},
{"0\n+", 8},
{"0\n+", 10},
{"0\n+", 16},
{"0\n-", 0},
{"0\n-", 8},
{"0\n-", 10},
{"0\n-", 16},
{"0\n ", 0},
{"0\n ", 8},
{"0\n ", 10},
{"0\n ", 16},
};
TEST_FAIL(kstrtoull, unsigned long long, "%llu", test_ull_fail);
}
static void __init test_kstrtoll_ok(void)
{
DECLARE_TEST_OK(long long, struct test_ll);
static DEFINE_TEST_OK(struct test_ll, test_ll_ok) = {
{"0", 10, 0LL},
{"1", 10, 1LL},
{"127", 10, 127LL},
{"128", 10, 128LL},
{"129", 10, 129LL},
{"255", 10, 255LL},
{"256", 10, 256LL},
{"257", 10, 257LL},
{"32767", 10, 32767LL},
{"32768", 10, 32768LL},
{"32769", 10, 32769LL},
{"65535", 10, 65535LL},
{"65536", 10, 65536LL},
{"65537", 10, 65537LL},
{"2147483647", 10, 2147483647LL},
{"2147483648", 10, 2147483648LL},
{"2147483649", 10, 2147483649LL},
{"4294967295", 10, 4294967295LL},
{"4294967296", 10, 4294967296LL},
{"4294967297", 10, 4294967297LL},
{"9223372036854775807", 10, 9223372036854775807LL},
{"-0", 10, 0LL},
{"-1", 10, -1LL},
{"-2", 10, -2LL},
{"-9223372036854775808", 10, LLONG_MIN},
};
TEST_OK(kstrtoll, long long, "%lld", test_ll_ok);
}
static void __init test_kstrtoll_fail(void)
{
static DEFINE_TEST_FAIL(test_ll_fail) = {
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"-9223372036854775809", 10},
{"-18446744073709551614", 10},
{"-18446744073709551615", 10},
/* sign is first character if any */
{"-+1", 0},
{"-+1", 8},
{"-+1", 10},
{"-+1", 16},
};
TEST_FAIL(kstrtoll, long long, "%lld", test_ll_fail);
}
static void __init test_kstrtou64_ok(void)
{
DECLARE_TEST_OK(u64, struct test_u64);
static DEFINE_TEST_OK(struct test_u64, test_u64_ok) = {
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
{"32768", 10, 32768},
{"32769", 10, 32769},
{"65534", 10, 65534},
{"65535", 10, 65535},
{"65536", 10, 65536},
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
{"2147483648", 10, 2147483648ULL},
{"2147483649", 10, 2147483649ULL},
{"4294967294", 10, 4294967294ULL},
{"4294967295", 10, 4294967295ULL},
{"4294967296", 10, 4294967296ULL},
{"4294967297", 10, 4294967297ULL},
{"9223372036854775806", 10, 9223372036854775806ULL},
{"9223372036854775807", 10, 9223372036854775807ULL},
{"9223372036854775808", 10, 9223372036854775808ULL},
{"9223372036854775809", 10, 9223372036854775809ULL},
{"18446744073709551614", 10, 18446744073709551614ULL},
{"18446744073709551615", 10, 18446744073709551615ULL},
};
TEST_OK(kstrtou64, u64, "%llu", test_u64_ok);
}
static void __init test_kstrtou64_fail(void)
{
static DEFINE_TEST_FAIL(test_u64_fail) = {
{"-2", 10},
{"-1", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtou64, u64, "%llu", test_u64_fail);
}
static void __init test_kstrtos64_ok(void)
{
DECLARE_TEST_OK(s64, struct test_s64);
static DEFINE_TEST_OK(struct test_s64, test_s64_ok) = {
{"-128", 10, -128},
{"-127", 10, -127},
{"-1", 10, -1},
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
{"32768", 10, 32768},
{"32769", 10, 32769},
{"65534", 10, 65534},
{"65535", 10, 65535},
{"65536", 10, 65536},
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
{"2147483648", 10, 2147483648LL},
{"2147483649", 10, 2147483649LL},
{"4294967294", 10, 4294967294LL},
{"4294967295", 10, 4294967295LL},
{"4294967296", 10, 4294967296LL},
{"4294967297", 10, 4294967297LL},
{"9223372036854775806", 10, 9223372036854775806LL},
{"9223372036854775807", 10, 9223372036854775807LL},
};
TEST_OK(kstrtos64, s64, "%lld", test_s64_ok);
}
static void __init test_kstrtos64_fail(void)
{
static DEFINE_TEST_FAIL(test_s64_fail) = {
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtos64, s64, "%lld", test_s64_fail);
}
static void __init test_kstrtou32_ok(void)
{
DECLARE_TEST_OK(u32, struct test_u32);
static DEFINE_TEST_OK(struct test_u32, test_u32_ok) = {
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
{"32768", 10, 32768},
{"32769", 10, 32769},
{"65534", 10, 65534},
{"65535", 10, 65535},
{"65536", 10, 65536},
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
{"2147483648", 10, 2147483648U},
{"2147483649", 10, 2147483649U},
{"4294967294", 10, 4294967294U},
{"4294967295", 10, 4294967295U},
};
TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
}
static void __init test_kstrtou32_fail(void)
{
static DEFINE_TEST_FAIL(test_u32_fail) = {
{"-2", 10},
{"-1", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtou32, u32, "%u", test_u32_fail);
}
static void __init test_kstrtos32_ok(void)
{
DECLARE_TEST_OK(s32, struct test_s32);
static DEFINE_TEST_OK(struct test_s32, test_s32_ok) = {
{"-128", 10, -128},
{"-127", 10, -127},
{"-1", 10, -1},
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
{"32768", 10, 32768},
{"32769", 10, 32769},
{"65534", 10, 65534},
{"65535", 10, 65535},
{"65536", 10, 65536},
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
};
TEST_OK(kstrtos32, s32, "%d", test_s32_ok);
}
static void __init test_kstrtos32_fail(void)
{
static DEFINE_TEST_FAIL(test_s32_fail) = {
{"2147483648", 10},
{"2147483649", 10},
{"4294967294", 10},
{"4294967295", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtos32, s32, "%d", test_s32_fail);
}
static void __init test_kstrtou16_ok(void)
{
DECLARE_TEST_OK(u16, struct test_u16);
static DEFINE_TEST_OK(struct test_u16, test_u16_ok) = {
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
{"32768", 10, 32768},
{"32769", 10, 32769},
{"65534", 10, 65534},
{"65535", 10, 65535},
};
TEST_OK(kstrtou16, u16, "%hu", test_u16_ok);
}
static void __init test_kstrtou16_fail(void)
{
static DEFINE_TEST_FAIL(test_u16_fail) = {
{"-2", 10},
{"-1", 10},
{"65536", 10},
{"65537", 10},
{"2147483646", 10},
{"2147483647", 10},
{"2147483648", 10},
{"2147483649", 10},
{"4294967294", 10},
{"4294967295", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtou16, u16, "%hu", test_u16_fail);
}
static void __init test_kstrtos16_ok(void)
{
DECLARE_TEST_OK(s16, struct test_s16);
static DEFINE_TEST_OK(struct test_s16, test_s16_ok) = {
{"-130", 10, -130},
{"-129", 10, -129},
{"-128", 10, -128},
{"-127", 10, -127},
{"-1", 10, -1},
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
{"256", 10, 256},
{"257", 10, 257},
{"32766", 10, 32766},
{"32767", 10, 32767},
};
TEST_OK(kstrtos16, s16, "%hd", test_s16_ok);
}
static void __init test_kstrtos16_fail(void)
{
static DEFINE_TEST_FAIL(test_s16_fail) = {
{"32768", 10},
{"32769", 10},
{"65534", 10},
{"65535", 10},
{"65536", 10},
{"65537", 10},
{"2147483646", 10},
{"2147483647", 10},
{"2147483648", 10},
{"2147483649", 10},
{"4294967294", 10},
{"4294967295", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtos16, s16, "%hd", test_s16_fail);
}
static void __init test_kstrtou8_ok(void)
{
DECLARE_TEST_OK(u8, struct test_u8);
static DEFINE_TEST_OK(struct test_u8, test_u8_ok) = {
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
{"128", 10, 128},
{"129", 10, 129},
{"254", 10, 254},
{"255", 10, 255},
};
TEST_OK(kstrtou8, u8, "%hhu", test_u8_ok);
}
static void __init test_kstrtou8_fail(void)
{
static DEFINE_TEST_FAIL(test_u8_fail) = {
{"-2", 10},
{"-1", 10},
{"256", 10},
{"257", 10},
{"32766", 10},
{"32767", 10},
{"32768", 10},
{"32769", 10},
{"65534", 10},
{"65535", 10},
{"65536", 10},
{"65537", 10},
{"2147483646", 10},
{"2147483647", 10},
{"2147483648", 10},
{"2147483649", 10},
{"4294967294", 10},
{"4294967295", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtou8, u8, "%hhu", test_u8_fail);
}
static void __init test_kstrtos8_ok(void)
{
DECLARE_TEST_OK(s8, struct test_s8);
static DEFINE_TEST_OK(struct test_s8, test_s8_ok) = {
{"-128", 10, -128},
{"-127", 10, -127},
{"-1", 10, -1},
{"0", 10, 0},
{"1", 10, 1},
{"126", 10, 126},
{"127", 10, 127},
};
TEST_OK(kstrtos8, s8, "%hhd", test_s8_ok);
}
static void __init test_kstrtos8_fail(void)
{
static DEFINE_TEST_FAIL(test_s8_fail) = {
{"-130", 10},
{"-129", 10},
{"128", 10},
{"129", 10},
{"254", 10},
{"255", 10},
{"256", 10},
{"257", 10},
{"32766", 10},
{"32767", 10},
{"32768", 10},
{"32769", 10},
{"65534", 10},
{"65535", 10},
{"65536", 10},
{"65537", 10},
{"2147483646", 10},
{"2147483647", 10},
{"2147483648", 10},
{"2147483649", 10},
{"4294967294", 10},
{"4294967295", 10},
{"4294967296", 10},
{"4294967297", 10},
{"9223372036854775806", 10},
{"9223372036854775807", 10},
{"9223372036854775808", 10},
{"9223372036854775809", 10},
{"18446744073709551614", 10},
{"18446744073709551615", 10},
{"18446744073709551616", 10},
{"18446744073709551617", 10},
};
TEST_FAIL(kstrtos8, s8, "%hhd", test_s8_fail);
}
static int __init test_kstrtox_init(void)
{
test_kstrtoull_ok();
test_kstrtoull_fail();
test_kstrtoll_ok();
test_kstrtoll_fail();
test_kstrtou64_ok();
test_kstrtou64_fail();
test_kstrtos64_ok();
test_kstrtos64_fail();
test_kstrtou32_ok();
test_kstrtou32_fail();
test_kstrtos32_ok();
test_kstrtos32_fail();
test_kstrtou16_ok();
test_kstrtou16_fail();
test_kstrtos16_ok();
test_kstrtos16_fail();
test_kstrtou8_ok();
test_kstrtou8_fail();
test_kstrtos8_ok();
test_kstrtos8_fail();
return -EINVAL;
}
module_init(test_kstrtox_init);
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | lib/test-kstrtox.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/fault-inject.h>
#include <linux/fault-inject-usercopy.h>
static struct {
struct fault_attr attr;
} fail_usercopy = {
.attr = FAULT_ATTR_INITIALIZER,
};
static int __init setup_fail_usercopy(char *str)
{
return setup_fault_attr(&fail_usercopy.attr, str);
}
__setup("fail_usercopy=", setup_fail_usercopy);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init fail_usercopy_debugfs(void)
{
struct dentry *dir;
dir = fault_create_debugfs_attr("fail_usercopy", NULL,
&fail_usercopy.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
return 0;
}
late_initcall(fail_usercopy_debugfs);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
bool should_fail_usercopy(void)
{
return should_fail(&fail_usercopy.attr, 1);
}
EXPORT_SYMBOL_GPL(should_fail_usercopy);
| linux-master | lib/fault-inject-usercopy.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <[email protected]>
*/
#include <linux/packing.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/bitrev.h>
static int get_le_offset(int offset)
{
int closest_multiple_of_4;
closest_multiple_of_4 = (offset / 4) * 4;
offset -= closest_multiple_of_4;
return closest_multiple_of_4 + (3 - offset);
}
static int get_reverse_lsw32_offset(int offset, size_t len)
{
int closest_multiple_of_4;
int word_index;
word_index = offset / 4;
closest_multiple_of_4 = word_index * 4;
offset -= closest_multiple_of_4;
word_index = (len / 4) - word_index - 1;
return word_index * 4 + offset;
}
static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
int *box_end_bit, u8 *box_mask)
{
int box_bit_width = *box_start_bit - *box_end_bit + 1;
int new_box_start_bit, new_box_end_bit;
*to_write >>= *box_end_bit;
*to_write = bitrev8(*to_write) >> (8 - box_bit_width);
*to_write <<= *box_end_bit;
new_box_end_bit = box_bit_width - *box_start_bit - 1;
new_box_start_bit = box_bit_width - *box_end_bit - 1;
*box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit);
*box_start_bit = new_box_start_bit;
*box_end_bit = new_box_end_bit;
}
/**
* packing - Convert numbers (currently u64) between a packed and an unpacked
* format. Unpacked means laid out in memory in the CPU's native
* understanding of integers, while packed means anything else that
* requires translation.
*
* @pbuf: Pointer to a buffer holding the packed value.
* @uval: Pointer to an u64 holding the unpacked value.
* @startbit: The index (in logical notation, compensated for quirks) where
* the packed value starts within pbuf. Must be larger than, or
* equal to, endbit.
* @endbit: The index (in logical notation, compensated for quirks) where
* the packed value ends within pbuf. Must be smaller than, or equal
* to, startbit.
* @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
* @op: If PACK, then uval will be treated as const pointer and copied (packed)
* into pbuf, between startbit and endbit.
* If UNPACK, then pbuf will be treated as const pointer and the logical
* value between startbit and endbit will be copied (unpacked) to uval.
* @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
* QUIRK_MSB_ON_THE_RIGHT.
*
* Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
* correct usage, return code may be discarded.
* If op is PACK, pbuf is modified.
* If op is UNPACK, uval is modified.
*/
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
enum packing_op op, u8 quirks)
{
/* Number of bits for storing "uval"
* also width of the field to access in the pbuf
*/
u64 value_width;
/* Logical byte indices corresponding to the
* start and end of the field.
*/
int plogical_first_u8, plogical_last_u8, box;
/* startbit is expected to be larger than endbit */
if (startbit < endbit)
/* Invalid function call */
return -EINVAL;
value_width = startbit - endbit + 1;
if (value_width > 64)
return -ERANGE;
/* Check if "uval" fits in "value_width" bits.
* If value_width is 64, the check will fail, but any
* 64-bit uval will surely fit.
*/
if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width)))
/* Cannot store "uval" inside "value_width" bits.
* Truncating "uval" is most certainly not desirable,
* so simply erroring out is appropriate.
*/
return -ERANGE;
/* Initialize parameter */
if (op == UNPACK)
*uval = 0;
/* Iterate through an idealistic view of the pbuf as an u64 with
* no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
* logical bit significance. "box" denotes the current logical u8.
*/
plogical_first_u8 = startbit / 8;
plogical_last_u8 = endbit / 8;
for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
/* Bit indices into the currently accessed 8-bit box */
int box_start_bit, box_end_bit, box_addr;
u8 box_mask;
/* Corresponding bits from the unpacked u64 parameter */
int proj_start_bit, proj_end_bit;
u64 proj_mask;
/* This u8 may need to be accessed in its entirety
* (from bit 7 to bit 0), or not, depending on the
* input arguments startbit and endbit.
*/
if (box == plogical_first_u8)
box_start_bit = startbit % 8;
else
box_start_bit = 7;
if (box == plogical_last_u8)
box_end_bit = endbit % 8;
else
box_end_bit = 0;
/* We have determined the box bit start and end.
* Now we calculate where this (masked) u8 box would fit
* in the unpacked (CPU-readable) u64 - the u8 box's
* projection onto the unpacked u64. Though the
* box is u8, the projection is u64 because it may fall
* anywhere within the unpacked u64.
*/
proj_start_bit = ((box * 8) + box_start_bit) - endbit;
proj_end_bit = ((box * 8) + box_end_bit) - endbit;
proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
box_mask = GENMASK_ULL(box_start_bit, box_end_bit);
/* Determine the offset of the u8 box inside the pbuf,
* adjusted for quirks. The adjusted box_addr will be used for
* effective addressing inside the pbuf (so it's not
* logical any longer).
*/
box_addr = pbuflen - box - 1;
if (quirks & QUIRK_LITTLE_ENDIAN)
box_addr = get_le_offset(box_addr);
if (quirks & QUIRK_LSW32_IS_FIRST)
box_addr = get_reverse_lsw32_offset(box_addr,
pbuflen);
if (op == UNPACK) {
u64 pval;
/* Read from pbuf, write to uval */
pval = ((u8 *)pbuf)[box_addr] & box_mask;
if (quirks & QUIRK_MSB_ON_THE_RIGHT)
adjust_for_msb_right_quirk(&pval,
&box_start_bit,
&box_end_bit,
&box_mask);
pval >>= box_end_bit;
pval <<= proj_end_bit;
*uval &= ~proj_mask;
*uval |= pval;
} else {
u64 pval;
/* Write to pbuf, read from uval */
pval = (*uval) & proj_mask;
pval >>= proj_end_bit;
if (quirks & QUIRK_MSB_ON_THE_RIGHT)
adjust_for_msb_right_quirk(&pval,
&box_start_bit,
&box_end_bit,
&box_mask);
pval <<= box_end_bit;
((u8 *)pbuf)[box_addr] &= ~box_mask;
((u8 *)pbuf)[box_addr] |= pval;
}
}
return 0;
}
EXPORT_SYMBOL(packing);
MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
| linux-master | lib/packing.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Test cases for memcpy(), memmove(), and memset().
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
struct some_bytes {
union {
u8 data[32];
struct {
u32 one;
u16 two;
u8 three;
/* 1 byte hole */
u32 four[4];
};
};
};
#define check(instance, v) do { \
BUILD_BUG_ON(sizeof(instance.data) != 32); \
for (size_t i = 0; i < sizeof(instance.data); i++) { \
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
"line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
__LINE__, #instance, v, i, instance.data[i]); \
} \
} while (0)
#define compare(name, one, two) do { \
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
for (size_t i = 0; i < sizeof(one); i++) { \
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
"line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
} \
kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
} while (0)
static void memcpy_test(struct kunit *test)
{
#define TEST_OP "memcpy"
struct some_bytes control = {
.data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
},
};
struct some_bytes zero = { };
struct some_bytes middle = {
.data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
},
};
struct some_bytes three = {
.data = { 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
},
};
struct some_bytes dest = { };
int count;
u8 *ptr;
/* Verify static initializers. */
check(control, 0x20);
check(zero, 0);
compare("static initializers", dest, zero);
/* Verify assignment. */
dest = control;
compare("direct assignment", dest, control);
/* Verify complete overwrite. */
memcpy(dest.data, zero.data, sizeof(dest.data));
compare("complete overwrite", dest, zero);
/* Verify middle overwrite. */
dest = control;
memcpy(dest.data + 12, zero.data, 7);
compare("middle overwrite", dest, middle);
/* Verify argument side-effects aren't repeated. */
dest = control;
ptr = dest.data;
count = 1;
memcpy(ptr++, zero.data, count++);
ptr += 8;
memcpy(ptr++, zero.data, count++);
compare("argument side-effects", dest, three);
#undef TEST_OP
}
static unsigned char larger_array [2048];
static void memmove_test(struct kunit *test)
{
#define TEST_OP "memmove"
struct some_bytes control = {
.data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
},
};
struct some_bytes zero = { };
struct some_bytes middle = {
.data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
},
};
struct some_bytes five = {
.data = { 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x00, 0x00, 0x00, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
},
};
struct some_bytes overlap = {
.data = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
},
};
struct some_bytes overlap_expected = {
.data = { 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x07,
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99,
},
};
struct some_bytes dest = { };
int count;
u8 *ptr;
/* Verify static initializers. */
check(control, 0x99);
check(zero, 0);
compare("static initializers", zero, dest);
/* Verify assignment. */
dest = control;
compare("direct assignment", dest, control);
/* Verify complete overwrite. */
memmove(dest.data, zero.data, sizeof(dest.data));
compare("complete overwrite", dest, zero);
/* Verify middle overwrite. */
dest = control;
memmove(dest.data + 12, zero.data, 7);
compare("middle overwrite", dest, middle);
/* Verify argument side-effects aren't repeated. */
dest = control;
ptr = dest.data;
count = 2;
memmove(ptr++, zero.data, count++);
ptr += 9;
memmove(ptr++, zero.data, count++);
compare("argument side-effects", dest, five);
/* Verify overlapping overwrite is correct. */
ptr = &overlap.data[2];
memmove(ptr, overlap.data, 5);
compare("overlapping write", overlap, overlap_expected);
/* Verify larger overlapping moves. */
larger_array[256] = 0xAAu;
/*
* Test a backwards overlapping memmove first. 256 and 1024 are
* important for i386 to use rep movsl.
*/
memmove(larger_array, larger_array + 256, 1024);
KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
KUNIT_ASSERT_NULL(test,
memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
/* Test a forwards overlapping memmove. */
larger_array[0] = 0xBBu;
memmove(larger_array + 256, larger_array, 1024);
KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
KUNIT_ASSERT_NULL(test,
memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
#undef TEST_OP
}
static void memset_test(struct kunit *test)
{
#define TEST_OP "memset"
struct some_bytes control = {
.data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
},
};
struct some_bytes complete = {
.data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
},
};
struct some_bytes middle = {
.data = { 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31,
0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
},
};
struct some_bytes three = {
.data = { 0x60, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x61, 0x61, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
},
};
struct some_bytes after = {
.data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72,
0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72,
},
};
struct some_bytes startat = {
.data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79,
},
};
struct some_bytes dest = { };
int count, value;
u8 *ptr;
/* Verify static initializers. */
check(control, 0x30);
check(dest, 0);
/* Verify assignment. */
dest = control;
compare("direct assignment", dest, control);
/* Verify complete overwrite. */
memset(dest.data, 0xff, sizeof(dest.data));
compare("complete overwrite", dest, complete);
/* Verify middle overwrite. */
dest = control;
memset(dest.data + 4, 0x31, 16);
compare("middle overwrite", dest, middle);
/* Verify argument side-effects aren't repeated. */
dest = control;
ptr = dest.data;
value = 0x60;
count = 1;
memset(ptr++, value++, count++);
ptr += 8;
memset(ptr++, value++, count++);
compare("argument side-effects", dest, three);
/* Verify memset_after() */
dest = control;
memset_after(&dest, 0x72, three);
compare("memset_after()", dest, after);
/* Verify memset_startat() */
dest = control;
memset_startat(&dest, 0x79, four);
compare("memset_startat()", dest, startat);
#undef TEST_OP
}
static u8 large_src[1024];
static u8 large_dst[2048];
static const u8 large_zero[2048];
static void set_random_nonzero(struct kunit *test, u8 *byte)
{
int failed_rng = 0;
while (*byte == 0) {
get_random_bytes(byte, 1);
KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
"Is the RNG broken?");
}
}
static void init_large(struct kunit *test)
{
if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
/* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src));
/* Make sure we have non-zero edges. */
set_random_nonzero(test, &large_src[0]);
set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
/* Explicitly zero the entire destination. */
memset(large_dst, 0, ARRAY_SIZE(large_dst));
}
/*
* Instead of an indirect function call for "copy" or a giant macro,
* use a bool to pick memcpy or memmove.
*/
static void copy_large_test(struct kunit *test, bool use_memmove)
{
init_large(test);
/* Copy a growing number of non-overlapping bytes ... */
for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
/* Over a shifting destination window ... */
for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
int right_zero_pos = offset + bytes;
int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
/* Copy! */
if (use_memmove)
memmove(large_dst + offset, large_src, bytes);
else
memcpy(large_dst + offset, large_src, bytes);
/* Did we touch anything before the copy area? */
KUNIT_ASSERT_EQ_MSG(test,
memcmp(large_dst, large_zero, offset), 0,
"with size %d at offset %d", bytes, offset);
/* Did we touch anything after the copy area? */
KUNIT_ASSERT_EQ_MSG(test,
memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
"with size %d at offset %d", bytes, offset);
/* Are we byte-for-byte exact across the copy? */
KUNIT_ASSERT_EQ_MSG(test,
memcmp(large_dst + offset, large_src, bytes), 0,
"with size %d at offset %d", bytes, offset);
/* Zero out what we copied for the next cycle. */
memset(large_dst + offset, 0, bytes);
}
/* Avoid stall warnings if this loop gets slow. */
cond_resched();
}
}
static void memcpy_large_test(struct kunit *test)
{
copy_large_test(test, false);
}
static void memmove_large_test(struct kunit *test)
{
copy_large_test(test, true);
}
/*
* On the assumption that boundary conditions are going to be the most
* sensitive, instead of taking a full step (inc) each iteration,
* take single index steps for at least the first "inc"-many indexes
* from the "start" and at least the last "inc"-many indexes before
* the "end". When in the middle, take full "inc"-wide steps. For
* example, calling next_step(idx, 1, 15, 3) with idx starting at 0
* would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
*/
static int next_step(int idx, int start, int end, int inc)
{
start += inc;
end -= inc;
if (idx < start || idx + inc > end)
inc = 1;
return idx + inc;
}
static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
{
int left_zero_pos, left_zero_size;
int right_zero_pos, right_zero_size;
int src_pos, src_orig_pos, src_size;
int pos;
/* Place the source in the destination buffer. */
memcpy(&large_dst[s_off], large_src, bytes);
/* Copy to destination offset. */
memmove(&large_dst[d_off], &large_dst[s_off], bytes);
/* Make sure destination entirely matches. */
KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
"with size %d at src offset %d and dest offset %d",
bytes, s_off, d_off);
/* Calculate the expected zero spans. */
if (s_off < d_off) {
left_zero_pos = 0;
left_zero_size = s_off;
right_zero_pos = d_off + bytes;
right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
src_pos = s_off;
src_orig_pos = 0;
src_size = d_off - s_off;
} else {
left_zero_pos = 0;
left_zero_size = d_off;
right_zero_pos = s_off + bytes;
right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
src_pos = d_off + bytes;
src_orig_pos = src_pos - s_off;
src_size = right_zero_pos - src_pos;
}
/* Check non-overlapping source is unchanged.*/
KUNIT_ASSERT_EQ_MSG(test,
memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
"with size %d at src offset %d and dest offset %d",
bytes, s_off, d_off);
/* Check leading buffer contents are zero. */
KUNIT_ASSERT_EQ_MSG(test,
memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
"with size %d at src offset %d and dest offset %d",
bytes, s_off, d_off);
/* Check trailing buffer contents are zero. */
KUNIT_ASSERT_EQ_MSG(test,
memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
"with size %d at src offset %d and dest offset %d",
bytes, s_off, d_off);
/* Zero out everything not already zeroed.*/
pos = left_zero_pos + left_zero_size;
memset(&large_dst[pos], 0, right_zero_pos - pos);
}
static void memmove_overlap_test(struct kunit *test)
{
/*
* Running all possible offset and overlap combinations takes a
* very long time. Instead, only check up to 128 bytes offset
* into the destination buffer (which should result in crossing
* cachelines), with a step size of 1 through 7 to try to skip some
* redundancy.
*/
static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
static const int bytes_step = 7;
static const int window_step = 7;
static const int bytes_start = 1;
static const int bytes_end = ARRAY_SIZE(large_src) + 1;
init_large(test);
/* Copy a growing number of overlapping bytes ... */
for (int bytes = bytes_start; bytes < bytes_end;
bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
/* Over a shifting destination window ... */
for (int d_off = 0; d_off < offset_max; d_off++) {
int s_start = max(d_off - bytes, 0);
int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
/* Over a shifting source window ... */
for (int s_off = s_start; s_off < s_end;
s_off = next_step(s_off, s_start, s_end, window_step))
inner_loop(test, bytes, d_off, s_off);
/* Avoid stall warnings. */
cond_resched();
}
}
}
static void strtomem_test(struct kunit *test)
{
static const char input[sizeof(unsigned long)] = "hi";
static const char truncate[] = "this is too long";
struct {
unsigned long canary1;
unsigned char output[sizeof(unsigned long)] __nonstring;
unsigned long canary2;
} wrap;
memset(&wrap, 0xFF, sizeof(wrap));
KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
"bad initial canary value");
KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
"bad initial canary value");
/* Check unpadded copy leaves surroundings untouched. */
strtomem(wrap.output, input);
KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
for (size_t i = 2; i < sizeof(wrap.output); i++)
KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
/* Check truncated copy leaves surroundings untouched. */
memset(&wrap, 0xFF, sizeof(wrap));
strtomem(wrap.output, truncate);
KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
for (size_t i = 0; i < sizeof(wrap.output); i++)
KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
/* Check padded copy leaves only string padded. */
memset(&wrap, 0xFF, sizeof(wrap));
strtomem_pad(wrap.output, input, 0xAA);
KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
for (size_t i = 2; i < sizeof(wrap.output); i++)
KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
/* Check truncated padded copy has no padding. */
memset(&wrap, 0xFF, sizeof(wrap));
strtomem(wrap.output, truncate);
KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
for (size_t i = 0; i < sizeof(wrap.output); i++)
KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
}
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
KUNIT_CASE_SLOW(memcpy_large_test),
KUNIT_CASE_SLOW(memmove_test),
KUNIT_CASE_SLOW(memmove_large_test),
KUNIT_CASE_SLOW(memmove_overlap_test),
KUNIT_CASE(strtomem_test),
{}
};
static struct kunit_suite memcpy_test_suite = {
.name = "memcpy",
.test_cases = memcpy_test_cases,
};
kunit_test_suite(memcpy_test_suite);
MODULE_LICENSE("GPL");
| linux-master | lib/memcpy_kunit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6/algos.c
*
* Algorithm list and algorithm selection for RAID-6
*/
#include <linux/raid/pq.h>
#ifndef __KERNEL__
#include <sys/mman.h>
#include <stdio.h>
#else
#include <linux/module.h>
#include <linux/gfp.h>
/* In .bss so it's zeroed */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
EXPORT_SYMBOL(raid6_empty_zero_page);
#endif
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
#if defined(__i386__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512
&raid6_avx512x2,
&raid6_avx512x1,
#endif
&raid6_avx2x2,
&raid6_avx2x1,
&raid6_sse2x2,
&raid6_sse2x1,
&raid6_sse1x2,
&raid6_sse1x1,
&raid6_mmxx2,
&raid6_mmxx1,
#endif
#if defined(__x86_64__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512
&raid6_avx512x4,
&raid6_avx512x2,
&raid6_avx512x1,
#endif
&raid6_avx2x4,
&raid6_avx2x2,
&raid6_avx2x1,
&raid6_sse2x4,
&raid6_sse2x2,
&raid6_sse2x1,
#endif
#ifdef CONFIG_ALTIVEC
&raid6_vpermxor8,
&raid6_vpermxor4,
&raid6_vpermxor2,
&raid6_vpermxor1,
&raid6_altivec8,
&raid6_altivec4,
&raid6_altivec2,
&raid6_altivec1,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
#endif
#ifdef CONFIG_KERNEL_MODE_NEON
&raid6_neonx8,
&raid6_neonx4,
&raid6_neonx2,
&raid6_neonx1,
#endif
#ifdef CONFIG_LOONGARCH
#ifdef CONFIG_CPU_HAS_LASX
&raid6_lasx,
#endif
#ifdef CONFIG_CPU_HAS_LSX
&raid6_lsx,
#endif
#endif
#if defined(__ia64__)
&raid6_intx32,
&raid6_intx16,
#endif
&raid6_intx8,
&raid6_intx4,
&raid6_intx2,
&raid6_intx1,
NULL
};
void (*raid6_2data_recov)(int, size_t, int, int, void **);
EXPORT_SYMBOL_GPL(raid6_2data_recov);
void (*raid6_datap_recov)(int, size_t, int, void **);
EXPORT_SYMBOL_GPL(raid6_datap_recov);
const struct raid6_recov_calls *const raid6_recov_algos[] = {
#ifdef CONFIG_X86
#ifdef CONFIG_AS_AVX512
&raid6_recov_avx512,
#endif
&raid6_recov_avx2,
&raid6_recov_ssse3,
#endif
#ifdef CONFIG_S390
&raid6_recov_s390xc,
#endif
#if defined(CONFIG_KERNEL_MODE_NEON)
&raid6_recov_neon,
#endif
#ifdef CONFIG_LOONGARCH
#ifdef CONFIG_CPU_HAS_LASX
&raid6_recov_lasx,
#endif
#ifdef CONFIG_CPU_HAS_LSX
&raid6_recov_lsx,
#endif
#endif
&raid6_recov_intx1,
NULL
};
#ifdef __KERNEL__
#define RAID6_TIME_JIFFIES_LG2 4
#else
/* Need more time to be stable in userspace */
#define RAID6_TIME_JIFFIES_LG2 9
#define time_before(x, y) ((x) < (y))
#endif
#define RAID6_TEST_DISKS 8
#define RAID6_TEST_DISKS_ORDER 3
static inline const struct raid6_recov_calls *raid6_choose_recov(void)
{
const struct raid6_recov_calls *const *algo;
const struct raid6_recov_calls *best;
for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
if (!best || (*algo)->priority > best->priority)
if (!(*algo)->valid || (*algo)->valid())
best = *algo;
if (best) {
raid6_2data_recov = best->data2;
raid6_datap_recov = best->datap;
pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
pr_err("raid6: Yikes! No recovery algorithm found!\n");
return best;
}
static inline const struct raid6_calls *raid6_choose_gen(
void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
{
unsigned long perf, bestgenperf, j0, j1;
int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
const struct raid6_calls *const *algo;
const struct raid6_calls *best;
for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
if (!best || (*algo)->priority >= best->priority) {
if ((*algo)->valid && !(*algo)->valid())
continue;
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
best = *algo;
break;
}
perf = 0;
preempt_disable();
j0 = jiffies;
while ((j1 = jiffies) == j0)
cpu_relax();
while (time_before(jiffies,
j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
(*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
perf++;
}
preempt_enable();
if (perf > bestgenperf) {
bestgenperf = perf;
best = *algo;
}
pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
(perf * HZ * (disks-2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
}
}
if (!best) {
pr_err("raid6: Yikes! No algorithm found!\n");
goto out;
}
raid6_call = *best;
if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
pr_info("raid6: skipped pq benchmark and selected %s\n",
best->name);
goto out;
}
pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
best->name,
(bestgenperf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
if (best->xor_syndrome) {
perf = 0;
preempt_disable();
j0 = jiffies;
while ((j1 = jiffies) == j0)
cpu_relax();
while (time_before(jiffies,
j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
best->xor_syndrome(disks, start, stop,
PAGE_SIZE, *dptrs);
perf++;
}
preempt_enable();
pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
(perf * HZ * (disks - 2)) >>
(20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
}
out:
return best;
}
/* Try to pick the best algorithm */
/* This code uses the gfmul table as convenient data set to abuse */
int __init raid6_select_algo(void)
{
const int disks = RAID6_TEST_DISKS;
const struct raid6_calls *gen_best;
const struct raid6_recov_calls *rec_best;
char *disk_ptr, *p;
void *dptrs[RAID6_TEST_DISKS];
int i, cycle;
/* prepare the buffer and fill it circularly with gfmul table */
disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
if (!disk_ptr) {
pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
p = disk_ptr;
for (i = 0; i < disks; i++)
dptrs[i] = p + PAGE_SIZE * i;
cycle = ((disks - 2) * PAGE_SIZE) / 65536;
for (i = 0; i < cycle; i++) {
memcpy(p, raid6_gfmul, 65536);
p += 65536;
}
if ((disks - 2) * PAGE_SIZE % 65536)
memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
/* select raid gen_syndrome function */
gen_best = raid6_choose_gen(&dptrs, disks);
/* select raid recover functions */
rec_best = raid6_choose_recov();
free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
return gen_best && rec_best ? 0 : -EINVAL;
}
static void raid6_exit(void)
{
do { } while (0);
}
subsys_initcall(raid6_select_algo);
module_exit(raid6_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
| linux-master | lib/raid6/algos.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Intel Corporation
* Copyright (C) 2017 Linaro Ltd. <[email protected]>
*/
#include <arm_neon.h>
#include "neon.h"
#ifdef CONFIG_ARM
/*
* AArch32 does not provide this intrinsic natively because it does not
* implement the underlying instruction. AArch32 only provides a 64-bit
* wide vtbl.8 instruction, so use that instead.
*/
static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
{
union {
uint8x16_t val;
uint8x8x2_t pair;
} __a = { a };
return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
vtbl2_u8(__a.pair, vget_high_u8(b)));
}
#endif
void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
uint8_t *dq, const uint8_t *pbmul,
const uint8_t *qmul)
{
uint8x16_t pm0 = vld1q_u8(pbmul);
uint8x16_t pm1 = vld1q_u8(pbmul + 16);
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while ( bytes-- ) {
* uint8_t px, qx, db;
*
* px = *p ^ *dp;
* qx = qmul[*q ^ *dq];
* *dq++ = db = pbmul[px] ^ qx;
* *dp++ = db ^ px;
* p++; q++;
* }
*/
while (bytes) {
uint8x16_t vx, vy, px, qx, db;
px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
vy = vqtbl1q_u8(qm1, vy);
qx = veorq_u8(vx, vy);
vy = vshrq_n_u8(px, 4);
vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
vy = vqtbl1q_u8(pm1, vy);
vx = veorq_u8(vx, vy);
db = veorq_u8(vx, qx);
vst1q_u8(dq, db);
vst1q_u8(dp, veorq_u8(db, px));
bytes -= 16;
p += 16;
q += 16;
dp += 16;
dq += 16;
}
}
void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
const uint8_t *qmul)
{
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while (bytes--) {
* *p++ ^= *dq = qmul[*q ^ *dq];
* q++; dq++;
* }
*/
while (bytes) {
uint8x16_t vx, vy;
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
vy = vqtbl1q_u8(qm1, vy);
vx = veorq_u8(vx, vy);
vy = veorq_u8(vx, vld1q_u8(p));
vst1q_u8(dq, vx);
vst1q_u8(p, vy);
bytes -= 16;
p += 16;
q += 16;
dq += 16;
}
}
| linux-master | lib/raid6/recov_neon_inner.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RAID-6 data recovery in dual failure mode based on the XC instruction.
*
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <[email protected]>
*/
#include <linux/export.h>
#include <linux/raid/pq.h>
static inline void xor_block(u8 *p1, u8 *p2)
{
typedef struct { u8 _[256]; } addrtype;
asm volatile(
" xc 0(256,%[p1]),0(%[p2])\n"
: "+m" (*(addrtype *) p1) : "m" (*(addrtype *) p2),
[p1] "a" (p1), [p2] "a" (p2) : "cc");
}
/* Recover two failed data blocks. */
static void raid6_2data_recov_s390xc(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
int i;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
/* Now do it... */
while (bytes) {
xor_block(dp, p);
xor_block(dq, q);
for (i = 0; i < 256; i++)
dq[i] = pbmul[dp[i]] ^ qmul[dq[i]];
xor_block(dp, dq);
p += 256;
q += 256;
dp += 256;
dq += 256;
bytes -= 256;
}
}
/* Recover failure of one data block plus the P block */
static void raid6_datap_recov_s390xc(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
int i;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
/* Now do it... */
while (bytes) {
xor_block(dq, q);
for (i = 0; i < 256; i++)
dq[i] = qmul[dq[i]];
xor_block(p, dq);
p += 256;
q += 256;
dq += 256;
bytes -= 256;
}
}
const struct raid6_recov_calls raid6_recov_s390xc = {
.data2 = raid6_2data_recov_s390xc,
.datap = raid6_datap_recov_s390xc,
.valid = NULL,
.name = "s390xc",
.priority = 1,
};
| linux-master | lib/raid6/recov_s390xc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Intel Corporation
* Copyright (C) 2017 Linaro Ltd. <[email protected]>
*/
#include <linux/raid/pq.h>
#ifdef __KERNEL__
#include <asm/neon.h>
#include "neon.h"
#else
#define kernel_neon_begin()
#define kernel_neon_end()
#define cpu_has_neon() (1)
#endif
static int raid6_has_neon(void)
{
return cpu_has_neon();
}
static void raid6_2data_recov_neon(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks - 2] = p;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
raid6_gfexp[failb]]];
kernel_neon_begin();
__raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul);
kernel_neon_end();
}
static void raid6_datap_recov_neon(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_neon_begin();
__raid6_datap_recov_neon(bytes, p, q, dq, qmul);
kernel_neon_end();
}
const struct raid6_recov_calls raid6_recov_neon = {
.data2 = raid6_2data_recov_neon,
.datap = raid6_datap_recov_neon,
.valid = raid6_has_neon,
.name = "neon",
.priority = 10,
};
| linux-master | lib/raid6/recov_neon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* mktables.c
*
* Make RAID-6 tables. This is a host user space program to be run at
* compile time.
*/
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <stdlib.h>
#include <time.h>
static uint8_t gfmul(uint8_t a, uint8_t b)
{
uint8_t v = 0;
while (b) {
if (b & 1)
v ^= a;
a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
b >>= 1;
}
return v;
}
static uint8_t gfpow(uint8_t a, int b)
{
uint8_t v = 1;
b %= 255;
if (b < 0)
b += 255;
while (b) {
if (b & 1)
v = gfmul(v, a);
a = gfmul(a, a);
b >>= 1;
}
return v;
}
int main(int argc, char *argv[])
{
int i, j, k;
uint8_t v;
uint8_t exptbl[256], invtbl[256];
printf("#ifdef __KERNEL__\n");
printf("#include <linux/export.h>\n");
printf("#endif\n");
printf("#include <linux/raid/pq.h>\n");
/* Compute multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfmul[256][256] =\n"
"{\n");
for (i = 0; i < 256; i++) {
printf("\t{\n");
for (j = 0; j < 256; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, j + k),
(k == 7) ? '\n' : ' ');
}
printf("\t},\n");
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfmul);\n");
printf("#endif\n");
/* Compute vector multiplication table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_vgfmul[256][32] =\n"
"{\n");
for (i = 0; i < 256; i++) {
printf("\t{\n");
for (j = 0; j < 16; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, j + k),
(k == 7) ? '\n' : ' ');
}
for (j = 0; j < 16; j += 8) {
printf("\t\t");
for (k = 0; k < 8; k++)
printf("0x%02x,%c", gfmul(i, (j + k) << 4),
(k == 7) ? '\n' : ' ');
}
printf("\t},\n");
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_vgfmul);\n");
printf("#endif\n");
/* Compute power-of-2 table (exponent) */
v = 1;
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexp[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
exptbl[i + j] = v;
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
v = gfmul(v, 2);
if (v == 1)
v = 0; /* For entry 255, not a real entry */
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
/* Compute log-of-2 table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gflog[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
v = 255;
for (k = 0; k < 256; k++)
if (exptbl[k] == (i + j)) {
v = k;
break;
}
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gflog);\n");
printf("#endif\n");
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++) {
invtbl[i + j] = v = gfpow(i + j, 254);
printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
}
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfinv);\n");
printf("#endif\n");
/* Compute inv(2^x + 1) (exponent-xor-inverse) table */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfexi[256] =\n" "{\n");
for (i = 0; i < 256; i += 8) {
printf("\t");
for (j = 0; j < 8; j++)
printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
(j == 7) ? '\n' : ' ');
}
printf("};\n");
printf("#ifdef __KERNEL__\n");
printf("EXPORT_SYMBOL(raid6_gfexi);\n");
printf("#endif\n");
return 0;
}
| linux-master | lib/raid6/mktables.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Intel Corporation
* Author: Jim Kukunas <[email protected]>
*/
#include <linux/raid/pq.h>
#include "x86.h"
static int raid6_has_avx2(void)
{
return boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX);
}
static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
const u8 x0f = 0x0f;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
raid6_gfexp[failb]]];
kernel_fpu_begin();
/* ymm0 = x0f[16] */
asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
while (bytes) {
#ifdef CONFIG_X86_64
asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
/*
* 1 = dq[0] ^ q[0]
* 9 = dq[32] ^ q[32]
* 0 = dp[0] ^ p[0]
* 8 = dp[32] ^ p[32]
*/
asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
asm volatile("vpsraw $4, %ymm1, %ymm3");
asm volatile("vpsraw $4, %ymm9, %ymm12");
asm volatile("vpand %ymm7, %ymm1, %ymm1");
asm volatile("vpand %ymm7, %ymm9, %ymm9");
asm volatile("vpand %ymm7, %ymm3, %ymm3");
asm volatile("vpand %ymm7, %ymm12, %ymm12");
asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
asm volatile("vpxor %ymm14, %ymm15, %ymm15");
asm volatile("vpxor %ymm4, %ymm5, %ymm5");
/*
* 5 = qx[0]
* 15 = qx[32]
*/
asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
asm volatile("vpsraw $4, %ymm0, %ymm2");
asm volatile("vpsraw $4, %ymm8, %ymm6");
asm volatile("vpand %ymm7, %ymm0, %ymm3");
asm volatile("vpand %ymm7, %ymm8, %ymm14");
asm volatile("vpand %ymm7, %ymm2, %ymm2");
asm volatile("vpand %ymm7, %ymm6, %ymm6");
asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
asm volatile("vpxor %ymm4, %ymm1, %ymm1");
asm volatile("vpxor %ymm12, %ymm13, %ymm13");
/*
* 1 = pbmul[px[0]]
* 13 = pbmul[px[32]]
*/
asm volatile("vpxor %ymm5, %ymm1, %ymm1");
asm volatile("vpxor %ymm15, %ymm13, %ymm13");
/*
* 1 = db = DQ
* 13 = db[32] = DQ[32]
*/
asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
asm volatile("vpxor %ymm1, %ymm0, %ymm0");
asm volatile("vpxor %ymm13, %ymm8, %ymm8");
asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
bytes -= 64;
p += 64;
q += 64;
dp += 64;
dq += 64;
#else
asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
/* 1 = dq ^ q; 0 = dp ^ p */
asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
/*
* 1 = dq ^ q
* 3 = dq ^ p >> 4
*/
asm volatile("vpsraw $4, %ymm1, %ymm3");
asm volatile("vpand %ymm7, %ymm1, %ymm1");
asm volatile("vpand %ymm7, %ymm3, %ymm3");
asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
asm volatile("vpxor %ymm4, %ymm5, %ymm5");
/* 5 = qx */
asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
asm volatile("vpsraw $4, %ymm0, %ymm2");
asm volatile("vpand %ymm7, %ymm0, %ymm3");
asm volatile("vpand %ymm7, %ymm2, %ymm2");
asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
asm volatile("vpxor %ymm4, %ymm1, %ymm1");
/* 1 = pbmul[px] */
asm volatile("vpxor %ymm5, %ymm1, %ymm1");
/* 1 = db = DQ */
asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
asm volatile("vpxor %ymm1, %ymm0, %ymm0");
asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
bytes -= 32;
p += 32;
q += 32;
dp += 32;
dq += 32;
#endif
}
kernel_fpu_end();
}
static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
const u8 x0f = 0x0f;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_fpu_begin();
asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
while (bytes) {
#ifdef CONFIG_X86_64
asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
/*
* 3 = q[0] ^ dq[0]
* 8 = q[32] ^ dq[32]
*/
asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
asm volatile("vmovapd %ymm0, %ymm13");
asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
asm volatile("vmovapd %ymm1, %ymm14");
asm volatile("vpsraw $4, %ymm3, %ymm6");
asm volatile("vpsraw $4, %ymm8, %ymm12");
asm volatile("vpand %ymm7, %ymm3, %ymm3");
asm volatile("vpand %ymm7, %ymm8, %ymm8");
asm volatile("vpand %ymm7, %ymm6, %ymm6");
asm volatile("vpand %ymm7, %ymm12, %ymm12");
asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
asm volatile("vpxor %ymm0, %ymm1, %ymm1");
asm volatile("vpxor %ymm13, %ymm14, %ymm14");
/*
* 1 = qmul[q[0] ^ dq[0]]
* 14 = qmul[q[32] ^ dq[32]]
*/
asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
asm volatile("vpxor %ymm1, %ymm2, %ymm2");
asm volatile("vpxor %ymm14, %ymm12, %ymm12");
/*
* 2 = p[0] ^ qmul[q[0] ^ dq[0]]
* 12 = p[32] ^ qmul[q[32] ^ dq[32]]
*/
asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
bytes -= 64;
p += 64;
q += 64;
dq += 64;
#else
asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
/* 3 = q ^ dq */
asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
asm volatile("vpsraw $4, %ymm3, %ymm6");
asm volatile("vpand %ymm7, %ymm3, %ymm3");
asm volatile("vpand %ymm7, %ymm6, %ymm6");
asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
asm volatile("vpxor %ymm0, %ymm1, %ymm1");
/* 1 = qmul[q ^ dq] */
asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
asm volatile("vpxor %ymm1, %ymm2, %ymm2");
/* 2 = p ^ qmul[q ^ dq] */
asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
bytes -= 32;
p += 32;
q += 32;
dq += 32;
#endif
}
kernel_fpu_end();
}
const struct raid6_recov_calls raid6_recov_avx2 = {
.data2 = raid6_2data_recov_avx2,
.datap = raid6_datap_recov_avx2,
.valid = raid6_has_avx2,
#ifdef CONFIG_X86_64
.name = "avx2x2",
#else
.name = "avx2x1",
#endif
.priority = 2,
};
| linux-master | lib/raid6/recov_avx2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RAID6 syndrome calculations in LoongArch SIMD (LSX & LASX)
*
* Copyright 2023 WANG Xuerui <[email protected]>
*
* Based on the generic RAID-6 code (int.uc):
*
* Copyright 2002-2004 H. Peter Anvin
*/
#include <linux/raid/pq.h>
#include "loongarch.h"
/*
* The vector algorithms are currently priority 0, which means the generic
* scalar algorithms are not being disabled if vector support is present.
* This is like the similar LoongArch RAID5 XOR code, with the main reason
* repeated here: it cannot be ruled out at this point of time, that some
* future (maybe reduced) models could run the vector algorithms slower than
* the scalar ones, maybe for errata or micro-op reasons. It may be
* appropriate to revisit this after one or two more uarch generations.
*/
#ifdef CONFIG_CPU_HAS_LSX
#define NSIZE 16
static int raid6_has_lsx(void)
{
return cpu_has_lsx;
}
static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
/*
* $vr0, $vr1, $vr2, $vr3: wp
* $vr4, $vr5, $vr6, $vr7: wq
* $vr8, $vr9, $vr10, $vr11: wd
* $vr12, $vr13, $vr14, $vr15: w2
* $vr16, $vr17, $vr18, $vr19: w1
*/
for (d = 0; d < bytes; d += NSIZE*4) {
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
asm volatile("vori.b $vr4, $vr0, 0");
asm volatile("vori.b $vr5, $vr1, 0");
asm volatile("vori.b $vr6, $vr2, 0");
asm volatile("vori.b $vr7, $vr3, 0");
for (z = z0-1; z >= 0; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("vxor.v $vr0, $vr0, $vr8");
asm volatile("vxor.v $vr1, $vr1, $vr9");
asm volatile("vxor.v $vr2, $vr2, $vr10");
asm volatile("vxor.v $vr3, $vr3, $vr11");
/* w2$$ = MASK(wq$$); */
asm volatile("vslti.b $vr12, $vr4, 0");
asm volatile("vslti.b $vr13, $vr5, 0");
asm volatile("vslti.b $vr14, $vr6, 0");
asm volatile("vslti.b $vr15, $vr7, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("vslli.b $vr16, $vr4, 1");
asm volatile("vslli.b $vr17, $vr5, 1");
asm volatile("vslli.b $vr18, $vr6, 1");
asm volatile("vslli.b $vr19, $vr7, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("vandi.b $vr12, $vr12, 0x1d");
asm volatile("vandi.b $vr13, $vr13, 0x1d");
asm volatile("vandi.b $vr14, $vr14, 0x1d");
asm volatile("vandi.b $vr15, $vr15, 0x1d");
/* w1$$ ^= w2$$; */
asm volatile("vxor.v $vr16, $vr16, $vr12");
asm volatile("vxor.v $vr17, $vr17, $vr13");
asm volatile("vxor.v $vr18, $vr18, $vr14");
asm volatile("vxor.v $vr19, $vr19, $vr15");
/* wq$$ = w1$$ ^ wd$$; */
asm volatile("vxor.v $vr4, $vr16, $vr8");
asm volatile("vxor.v $vr5, $vr17, $vr9");
asm volatile("vxor.v $vr6, $vr18, $vr10");
asm volatile("vxor.v $vr7, $vr19, $vr11");
}
/* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
/* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
}
kernel_fpu_end();
}
static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
/*
* $vr0, $vr1, $vr2, $vr3: wp
* $vr4, $vr5, $vr6, $vr7: wq
* $vr8, $vr9, $vr10, $vr11: wd
* $vr12, $vr13, $vr14, $vr15: w2
* $vr16, $vr17, $vr18, $vr19: w1
*/
for (d = 0; d < bytes; d += NSIZE*4) {
/* P/Q data pages */
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
asm volatile("vori.b $vr4, $vr0, 0");
asm volatile("vori.b $vr5, $vr1, 0");
asm volatile("vori.b $vr6, $vr2, 0");
asm volatile("vori.b $vr7, $vr3, 0");
for (z = z0-1; z >= start; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("vxor.v $vr0, $vr0, $vr8");
asm volatile("vxor.v $vr1, $vr1, $vr9");
asm volatile("vxor.v $vr2, $vr2, $vr10");
asm volatile("vxor.v $vr3, $vr3, $vr11");
/* w2$$ = MASK(wq$$); */
asm volatile("vslti.b $vr12, $vr4, 0");
asm volatile("vslti.b $vr13, $vr5, 0");
asm volatile("vslti.b $vr14, $vr6, 0");
asm volatile("vslti.b $vr15, $vr7, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("vslli.b $vr16, $vr4, 1");
asm volatile("vslli.b $vr17, $vr5, 1");
asm volatile("vslli.b $vr18, $vr6, 1");
asm volatile("vslli.b $vr19, $vr7, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("vandi.b $vr12, $vr12, 0x1d");
asm volatile("vandi.b $vr13, $vr13, 0x1d");
asm volatile("vandi.b $vr14, $vr14, 0x1d");
asm volatile("vandi.b $vr15, $vr15, 0x1d");
/* w1$$ ^= w2$$; */
asm volatile("vxor.v $vr16, $vr16, $vr12");
asm volatile("vxor.v $vr17, $vr17, $vr13");
asm volatile("vxor.v $vr18, $vr18, $vr14");
asm volatile("vxor.v $vr19, $vr19, $vr15");
/* wq$$ = w1$$ ^ wd$$; */
asm volatile("vxor.v $vr4, $vr16, $vr8");
asm volatile("vxor.v $vr5, $vr17, $vr9");
asm volatile("vxor.v $vr6, $vr18, $vr10");
asm volatile("vxor.v $vr7, $vr19, $vr11");
}
/* P/Q left side optimization */
for (z = start-1; z >= 0; z--) {
/* w2$$ = MASK(wq$$); */
asm volatile("vslti.b $vr12, $vr4, 0");
asm volatile("vslti.b $vr13, $vr5, 0");
asm volatile("vslti.b $vr14, $vr6, 0");
asm volatile("vslti.b $vr15, $vr7, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("vslli.b $vr16, $vr4, 1");
asm volatile("vslli.b $vr17, $vr5, 1");
asm volatile("vslli.b $vr18, $vr6, 1");
asm volatile("vslli.b $vr19, $vr7, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("vandi.b $vr12, $vr12, 0x1d");
asm volatile("vandi.b $vr13, $vr13, 0x1d");
asm volatile("vandi.b $vr14, $vr14, 0x1d");
asm volatile("vandi.b $vr15, $vr15, 0x1d");
/* wq$$ = w1$$ ^ w2$$; */
asm volatile("vxor.v $vr4, $vr16, $vr12");
asm volatile("vxor.v $vr5, $vr17, $vr13");
asm volatile("vxor.v $vr6, $vr18, $vr14");
asm volatile("vxor.v $vr7, $vr19, $vr15");
}
/*
* *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
* *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
*/
asm volatile(
"vld $vr20, %0\n\t"
"vld $vr21, %1\n\t"
"vld $vr22, %2\n\t"
"vld $vr23, %3\n\t"
"vld $vr24, %4\n\t"
"vld $vr25, %5\n\t"
"vld $vr26, %6\n\t"
"vld $vr27, %7\n\t"
"vxor.v $vr20, $vr20, $vr0\n\t"
"vxor.v $vr21, $vr21, $vr1\n\t"
"vxor.v $vr22, $vr22, $vr2\n\t"
"vxor.v $vr23, $vr23, $vr3\n\t"
"vxor.v $vr24, $vr24, $vr4\n\t"
"vxor.v $vr25, $vr25, $vr5\n\t"
"vxor.v $vr26, $vr26, $vr6\n\t"
"vxor.v $vr27, $vr27, $vr7\n\t"
"vst $vr20, %0\n\t"
"vst $vr21, %1\n\t"
"vst $vr22, %2\n\t"
"vst $vr23, %3\n\t"
"vst $vr24, %4\n\t"
"vst $vr25, %5\n\t"
"vst $vr26, %6\n\t"
"vst $vr27, %7\n\t"
: "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
"+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
"+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
"+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
);
}
kernel_fpu_end();
}
const struct raid6_calls raid6_lsx = {
raid6_lsx_gen_syndrome,
raid6_lsx_xor_syndrome,
raid6_has_lsx,
"lsx",
.priority = 0 /* see the comment near the top of the file for reason */
};
#undef NSIZE
#endif /* CONFIG_CPU_HAS_LSX */
#ifdef CONFIG_CPU_HAS_LASX
#define NSIZE 32
static int raid6_has_lasx(void)
{
return cpu_has_lasx;
}
static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
/*
* $xr0, $xr1: wp
* $xr2, $xr3: wq
* $xr4, $xr5: wd
* $xr6, $xr7: w2
* $xr8, $xr9: w1
*/
for (d = 0; d < bytes; d += NSIZE*2) {
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
asm volatile("xvori.b $xr2, $xr0, 0");
asm volatile("xvori.b $xr3, $xr1, 0");
for (z = z0-1; z >= 0; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
/* w2$$ = MASK(wq$$); */
asm volatile("xvslti.b $xr6, $xr2, 0");
asm volatile("xvslti.b $xr7, $xr3, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("xvslli.b $xr8, $xr2, 1");
asm volatile("xvslli.b $xr9, $xr3, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("xvandi.b $xr6, $xr6, 0x1d");
asm volatile("xvandi.b $xr7, $xr7, 0x1d");
/* w1$$ ^= w2$$; */
asm volatile("xvxor.v $xr8, $xr8, $xr6");
asm volatile("xvxor.v $xr9, $xr9, $xr7");
/* wq$$ = w1$$ ^ wd$$; */
asm volatile("xvxor.v $xr2, $xr8, $xr4");
asm volatile("xvxor.v $xr3, $xr9, $xr5");
}
/* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
/* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
}
kernel_fpu_end();
}
static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
/*
* $xr0, $xr1: wp
* $xr2, $xr3: wq
* $xr4, $xr5: wd
* $xr6, $xr7: w2
* $xr8, $xr9: w1
*/
for (d = 0; d < bytes; d += NSIZE*2) {
/* P/Q data pages */
/* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
asm volatile("xvori.b $xr2, $xr0, 0");
asm volatile("xvori.b $xr3, $xr1, 0");
for (z = z0-1; z >= start; z--) {
/* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
/* wp$$ ^= wd$$; */
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
/* w2$$ = MASK(wq$$); */
asm volatile("xvslti.b $xr6, $xr2, 0");
asm volatile("xvslti.b $xr7, $xr3, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("xvslli.b $xr8, $xr2, 1");
asm volatile("xvslli.b $xr9, $xr3, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("xvandi.b $xr6, $xr6, 0x1d");
asm volatile("xvandi.b $xr7, $xr7, 0x1d");
/* w1$$ ^= w2$$; */
asm volatile("xvxor.v $xr8, $xr8, $xr6");
asm volatile("xvxor.v $xr9, $xr9, $xr7");
/* wq$$ = w1$$ ^ wd$$; */
asm volatile("xvxor.v $xr2, $xr8, $xr4");
asm volatile("xvxor.v $xr3, $xr9, $xr5");
}
/* P/Q left side optimization */
for (z = start-1; z >= 0; z--) {
/* w2$$ = MASK(wq$$); */
asm volatile("xvslti.b $xr6, $xr2, 0");
asm volatile("xvslti.b $xr7, $xr3, 0");
/* w1$$ = SHLBYTE(wq$$); */
asm volatile("xvslli.b $xr8, $xr2, 1");
asm volatile("xvslli.b $xr9, $xr3, 1");
/* w2$$ &= NBYTES(0x1d); */
asm volatile("xvandi.b $xr6, $xr6, 0x1d");
asm volatile("xvandi.b $xr7, $xr7, 0x1d");
/* wq$$ = w1$$ ^ w2$$; */
asm volatile("xvxor.v $xr2, $xr8, $xr6");
asm volatile("xvxor.v $xr3, $xr9, $xr7");
}
/*
* *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
* *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
*/
asm volatile(
"xvld $xr10, %0\n\t"
"xvld $xr11, %1\n\t"
"xvld $xr12, %2\n\t"
"xvld $xr13, %3\n\t"
"xvxor.v $xr10, $xr10, $xr0\n\t"
"xvxor.v $xr11, $xr11, $xr1\n\t"
"xvxor.v $xr12, $xr12, $xr2\n\t"
"xvxor.v $xr13, $xr13, $xr3\n\t"
"xvst $xr10, %0\n\t"
"xvst $xr11, %1\n\t"
"xvst $xr12, %2\n\t"
"xvst $xr13, %3\n\t"
: "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
"+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
);
}
kernel_fpu_end();
}
const struct raid6_calls raid6_lasx = {
raid6_lasx_gen_syndrome,
raid6_lasx_xor_syndrome,
raid6_has_lasx,
"lasx",
.priority = 0 /* see the comment near the top of the file for reason */
};
#undef NSIZE
#endif /* CONFIG_CPU_HAS_LASX */
| linux-master | lib/raid6/loongarch_simd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 2012 Intel Corporation
* Author: Yuanhan Liu <[email protected]>
*
* Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* AVX2 implementation of RAID-6 syndrome functions
*
*/
#include <linux/raid/pq.h>
#include "x86.h"
static const struct raid6_avx2_constants {
u64 x1d[4];
} raid6_avx2_constants __aligned(32) = {
{ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
};
static int raid6_have_avx2(void)
{
return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
}
/*
* Plain AVX2 implementation
*/
static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
for (d = 0; d < bytes; d += 32) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
for (z = z0-2; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm6,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm4,%ymm4");
asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
}
asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm6,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm4,%ymm4");
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vpxor %ymm2,%ymm2,%ymm2");
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vpxor %ymm4,%ymm4,%ymm4");
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 32) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
}
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x1 = {
raid6_avx21_gen_syndrome,
raid6_avx21_xor_syndrome,
raid6_have_avx2,
"avx2x1",
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
/*
* Unrolled-by-2 AVX2 implementation
*/
static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 32 bytes */
for (d = 0; d < bytes; d += 64) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
for (z = z0-1; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 64) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
:: "m" (dptr[z][d+32]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
}
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x2 = {
raid6_avx22_gen_syndrome,
raid6_avx22_xor_syndrome,
raid6_have_avx2,
"avx2x2",
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
/*
* Unrolled-by-4 AVX2 implementation
*/
static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
for (d = 0; d < bytes; d += 128) {
for (z = z0; z >= 0; z--) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpand %ymm0,%ymm13,%ymm13");
asm volatile("vpand %ymm0,%ymm15,%ymm15");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm13,%ymm10,%ymm10");
asm volatile("vpxor %ymm15,%ymm11,%ymm11");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vpxor %ymm2,%ymm2,%ymm2");
asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
asm volatile("vpxor %ymm3,%ymm3,%ymm3");
asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
asm volatile("vpxor %ymm10,%ymm10,%ymm10");
asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
asm volatile("vpxor %ymm11,%ymm11,%ymm11");
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vpxor %ymm4,%ymm4,%ymm4");
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
asm volatile("vpxor %ymm6,%ymm6,%ymm6");
asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
asm volatile("vpxor %ymm12,%ymm12,%ymm12");
asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
asm volatile("vpxor %ymm14,%ymm14,%ymm14");
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 128) {
asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
asm volatile("vpxor %ymm4,%ymm2,%ymm2");
asm volatile("vpxor %ymm6,%ymm3,%ymm3");
asm volatile("vpxor %ymm12,%ymm10,%ymm10");
asm volatile("vpxor %ymm14,%ymm11,%ymm11");
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
asm volatile("vpxor %ymm15,%ymm15,%ymm15");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpand %ymm0,%ymm13,%ymm13");
asm volatile("vpand %ymm0,%ymm15,%ymm15");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
asm volatile("vmovdqa %0,%%ymm7"
:: "m" (dptr[z][d+32]));
asm volatile("vmovdqa %0,%%ymm13"
:: "m" (dptr[z][d+64]));
asm volatile("vmovdqa %0,%%ymm15"
:: "m" (dptr[z][d+96]));
asm volatile("vpxor %ymm5,%ymm2,%ymm2");
asm volatile("vpxor %ymm7,%ymm3,%ymm3");
asm volatile("vpxor %ymm13,%ymm10,%ymm10");
asm volatile("vpxor %ymm15,%ymm11,%ymm11");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("prefetchnta %0" :: "m" (q[d]));
asm volatile("prefetchnta %0" :: "m" (q[d+64]));
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxor %ymm5,%ymm5,%ymm5");
asm volatile("vpxor %ymm7,%ymm7,%ymm7");
asm volatile("vpxor %ymm13,%ymm13,%ymm13");
asm volatile("vpxor %ymm15,%ymm15,%ymm15");
asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
asm volatile("vpand %ymm0,%ymm5,%ymm5");
asm volatile("vpand %ymm0,%ymm7,%ymm7");
asm volatile("vpand %ymm0,%ymm13,%ymm13");
asm volatile("vpand %ymm0,%ymm15,%ymm15");
asm volatile("vpxor %ymm5,%ymm4,%ymm4");
asm volatile("vpxor %ymm7,%ymm6,%ymm6");
asm volatile("vpxor %ymm13,%ymm12,%ymm12");
asm volatile("vpxor %ymm15,%ymm14,%ymm14");
}
asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx2x4 = {
raid6_avx24_gen_syndrome,
raid6_avx24_xor_syndrome,
raid6_have_avx2,
"avx2x4",
.priority = 2 /* Prefer AVX2 over priority 1 (SSE2 and others) */
};
#endif /* CONFIG_X86_64 */
| linux-master | lib/raid6/avx2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
*
* Copyright (C) 2013 Linaro Ltd <[email protected]>
*/
#include <linux/raid/pq.h>
#ifdef __KERNEL__
#include <asm/neon.h>
#else
#define kernel_neon_begin()
#define kernel_neon_end()
#define cpu_has_neon() (1)
#endif
/*
* There are 2 reasons these wrappers are kept in a separate compilation unit
* from the actual implementations in neonN.c (generated from neon.uc by
* unroll.awk):
* - the actual implementations use NEON intrinsics, and the GCC support header
* (arm_neon.h) is not fully compatible (type wise) with the kernel;
* - the neonN.c files are compiled with -mfpu=neon and optimization enabled,
* and we have to make sure that we never use *any* NEON/VFP instructions
* outside a kernel_neon_begin()/kernel_neon_end() pair.
*/
#define RAID6_NEON_WRAPPER(_n) \
static void raid6_neon ## _n ## _gen_syndrome(int disks, \
size_t bytes, void **ptrs) \
{ \
void raid6_neon ## _n ## _gen_syndrome_real(int, \
unsigned long, void**); \
kernel_neon_begin(); \
raid6_neon ## _n ## _gen_syndrome_real(disks, \
(unsigned long)bytes, ptrs); \
kernel_neon_end(); \
} \
static void raid6_neon ## _n ## _xor_syndrome(int disks, \
int start, int stop, \
size_t bytes, void **ptrs) \
{ \
void raid6_neon ## _n ## _xor_syndrome_real(int, \
int, int, unsigned long, void**); \
kernel_neon_begin(); \
raid6_neon ## _n ## _xor_syndrome_real(disks, \
start, stop, (unsigned long)bytes, ptrs); \
kernel_neon_end(); \
} \
struct raid6_calls const raid6_neonx ## _n = { \
raid6_neon ## _n ## _gen_syndrome, \
raid6_neon ## _n ## _xor_syndrome, \
raid6_have_neon, \
"neonx" #_n, \
0 \
}
static int raid6_have_neon(void)
{
return cpu_has_neon();
}
RAID6_NEON_WRAPPER(1);
RAID6_NEON_WRAPPER(2);
RAID6_NEON_WRAPPER(4);
RAID6_NEON_WRAPPER(8);
| linux-master | lib/raid6/neon.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6/recov.c
*
* RAID-6 data recovery in dual failure mode. In single failure mode,
* use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
* the syndrome.)
*/
#include <linux/raid/pq.h>
/* Recover two failed data blocks. */
static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
u8 px, qx, db;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
/* Now do it... */
while ( bytes-- ) {
px = *p ^ *dp;
qx = qmul[*q ^ *dq];
*dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
*dp++ = db ^ px; /* Reconstructed A */
p++; q++;
}
}
/* Recover failure of one data block plus the P block */
static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
/* Now do it... */
while ( bytes-- ) {
*p++ ^= *dq = qmul[*q ^ *dq];
q++; dq++;
}
}
const struct raid6_recov_calls raid6_recov_intx1 = {
.data2 = raid6_2data_recov_intx1,
.datap = raid6_datap_recov_intx1,
.valid = NULL,
.name = "intx1",
.priority = 0,
};
#ifndef __KERNEL__
/* Testing only */
/* Recover two failed blocks. */
void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
{
if ( faila > failb ) {
int tmp = faila;
faila = failb;
failb = tmp;
}
if ( failb == disks-1 ) {
if ( faila == disks-2 ) {
/* P+Q failure. Just rebuild the syndrome. */
raid6_call.gen_syndrome(disks, bytes, ptrs);
} else {
/* data+Q failure. Reconstruct data from P,
then rebuild syndrome. */
/* NOT IMPLEMENTED - equivalent to RAID-5 */
}
} else {
if ( failb == disks-2 ) {
/* data+P failure. */
raid6_datap_recov(disks, bytes, faila, ptrs);
} else {
/* data+data failure. */
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
}
}
}
#endif
| linux-master | lib/raid6/recov.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6/mmx.c
*
* MMX implementation of RAID-6 syndrome functions
*/
#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
#include "x86.h"
/* Shared with raid6/sse1.c */
const struct raid6_mmx_constants {
u64 x1d;
} raid6_mmx_constants = {
0x1d1d1d1d1d1d1d1dULL,
};
static int raid6_have_mmx(void)
{
/* Not really "boot_cpu" but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX);
}
/*
* Plain MMX implementation
*/
static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 8 ) {
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movq %mm2,%mm4"); /* Q[0] */
for ( z = z0-1 ; z >= 0 ; z-- ) {
asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
asm volatile("pand %mm0,%mm5");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm6,%mm2");
asm volatile("pxor %mm6,%mm4");
}
asm volatile("movq %%mm2,%0" : "=m" (p[d]));
asm volatile("pxor %mm2,%mm2");
asm volatile("movq %%mm4,%0" : "=m" (q[d]));
asm volatile("pxor %mm4,%mm4");
}
kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx1 = {
raid6_mmx1_gen_syndrome,
NULL, /* XOR not yet implemented */
raid6_have_mmx,
"mmxx1",
0
};
/*
* Unrolled-by-2 MMX implementation
*/
static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm7,%mm7"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 16 ) {
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8]));
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %mm3,%mm6"); /* Q[1] */
for ( z = z0-1 ; z >= 0 ; z-- ) {
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("pcmpgtb %mm6,%mm7");
asm volatile("paddb %mm4,%mm4");
asm volatile("paddb %mm6,%mm6");
asm volatile("pand %mm0,%mm5");
asm volatile("pand %mm0,%mm7");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
asm volatile("pxor %mm5,%mm2");
asm volatile("pxor %mm7,%mm3");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm7,%mm7");
}
asm volatile("movq %%mm2,%0" : "=m" (p[d]));
asm volatile("movq %%mm3,%0" : "=m" (p[d+8]));
asm volatile("movq %%mm4,%0" : "=m" (q[d]));
asm volatile("movq %%mm6,%0" : "=m" (q[d+8]));
}
kernel_fpu_end();
}
const struct raid6_calls raid6_mmxx2 = {
raid6_mmx2_gen_syndrome,
NULL, /* XOR not yet implemented */
raid6_have_mmx,
"mmxx2",
0
};
#endif
| linux-master | lib/raid6/mmx.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RAID6 recovery algorithms in LoongArch SIMD (LSX & LASX)
*
* Copyright (C) 2023 WANG Xuerui <[email protected]>
*
* Originally based on recov_avx2.c and recov_ssse3.c:
*
* Copyright (C) 2012 Intel Corporation
* Author: Jim Kukunas <[email protected]>
*/
#include <linux/raid/pq.h>
#include "loongarch.h"
/*
* Unlike with the syndrome calculation algorithms, there's no boot-time
* selection of recovery algorithms by benchmarking, so we have to specify
* the priorities and hope the future cores will all have decent vector
* support (i.e. no LASX slower than LSX, or even scalar code).
*/
#ifdef CONFIG_CPU_HAS_LSX
static int raid6_has_lsx(void)
{
return cpu_has_lsx;
}
static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks - 2] = p;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
kernel_fpu_begin();
/*
* vr20, vr21: qmul
* vr22, vr23: pbmul
*/
asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
while (bytes) {
/* vr4 - vr7: Q */
asm volatile("vld $vr4, %0" : : "m" (q[0]));
asm volatile("vld $vr5, %0" : : "m" (q[16]));
asm volatile("vld $vr6, %0" : : "m" (q[32]));
asm volatile("vld $vr7, %0" : : "m" (q[48]));
/* vr4 - vr7: Q + Qxy */
asm volatile("vld $vr8, %0" : : "m" (dq[0]));
asm volatile("vld $vr9, %0" : : "m" (dq[16]));
asm volatile("vld $vr10, %0" : : "m" (dq[32]));
asm volatile("vld $vr11, %0" : : "m" (dq[48]));
asm volatile("vxor.v $vr4, $vr4, $vr8");
asm volatile("vxor.v $vr5, $vr5, $vr9");
asm volatile("vxor.v $vr6, $vr6, $vr10");
asm volatile("vxor.v $vr7, $vr7, $vr11");
/* vr0 - vr3: P */
asm volatile("vld $vr0, %0" : : "m" (p[0]));
asm volatile("vld $vr1, %0" : : "m" (p[16]));
asm volatile("vld $vr2, %0" : : "m" (p[32]));
asm volatile("vld $vr3, %0" : : "m" (p[48]));
/* vr0 - vr3: P + Pxy */
asm volatile("vld $vr8, %0" : : "m" (dp[0]));
asm volatile("vld $vr9, %0" : : "m" (dp[16]));
asm volatile("vld $vr10, %0" : : "m" (dp[32]));
asm volatile("vld $vr11, %0" : : "m" (dp[48]));
asm volatile("vxor.v $vr0, $vr0, $vr8");
asm volatile("vxor.v $vr1, $vr1, $vr9");
asm volatile("vxor.v $vr2, $vr2, $vr10");
asm volatile("vxor.v $vr3, $vr3, $vr11");
/* vr8 - vr11: higher 4 bits of each byte of (Q + Qxy) */
asm volatile("vsrli.b $vr8, $vr4, 4");
asm volatile("vsrli.b $vr9, $vr5, 4");
asm volatile("vsrli.b $vr10, $vr6, 4");
asm volatile("vsrli.b $vr11, $vr7, 4");
/* vr4 - vr7: lower 4 bits of each byte of (Q + Qxy) */
asm volatile("vandi.b $vr4, $vr4, 0x0f");
asm volatile("vandi.b $vr5, $vr5, 0x0f");
asm volatile("vandi.b $vr6, $vr6, 0x0f");
asm volatile("vandi.b $vr7, $vr7, 0x0f");
/* lookup from qmul[0] */
asm volatile("vshuf.b $vr4, $vr20, $vr20, $vr4");
asm volatile("vshuf.b $vr5, $vr20, $vr20, $vr5");
asm volatile("vshuf.b $vr6, $vr20, $vr20, $vr6");
asm volatile("vshuf.b $vr7, $vr20, $vr20, $vr7");
/* lookup from qmul[16] */
asm volatile("vshuf.b $vr8, $vr21, $vr21, $vr8");
asm volatile("vshuf.b $vr9, $vr21, $vr21, $vr9");
asm volatile("vshuf.b $vr10, $vr21, $vr21, $vr10");
asm volatile("vshuf.b $vr11, $vr21, $vr21, $vr11");
/* vr16 - vr19: B(Q + Qxy) */
asm volatile("vxor.v $vr16, $vr8, $vr4");
asm volatile("vxor.v $vr17, $vr9, $vr5");
asm volatile("vxor.v $vr18, $vr10, $vr6");
asm volatile("vxor.v $vr19, $vr11, $vr7");
/* vr4 - vr7: higher 4 bits of each byte of (P + Pxy) */
asm volatile("vsrli.b $vr4, $vr0, 4");
asm volatile("vsrli.b $vr5, $vr1, 4");
asm volatile("vsrli.b $vr6, $vr2, 4");
asm volatile("vsrli.b $vr7, $vr3, 4");
/* vr12 - vr15: lower 4 bits of each byte of (P + Pxy) */
asm volatile("vandi.b $vr12, $vr0, 0x0f");
asm volatile("vandi.b $vr13, $vr1, 0x0f");
asm volatile("vandi.b $vr14, $vr2, 0x0f");
asm volatile("vandi.b $vr15, $vr3, 0x0f");
/* lookup from pbmul[0] */
asm volatile("vshuf.b $vr12, $vr22, $vr22, $vr12");
asm volatile("vshuf.b $vr13, $vr22, $vr22, $vr13");
asm volatile("vshuf.b $vr14, $vr22, $vr22, $vr14");
asm volatile("vshuf.b $vr15, $vr22, $vr22, $vr15");
/* lookup from pbmul[16] */
asm volatile("vshuf.b $vr4, $vr23, $vr23, $vr4");
asm volatile("vshuf.b $vr5, $vr23, $vr23, $vr5");
asm volatile("vshuf.b $vr6, $vr23, $vr23, $vr6");
asm volatile("vshuf.b $vr7, $vr23, $vr23, $vr7");
/* vr4 - vr7: A(P + Pxy) */
asm volatile("vxor.v $vr4, $vr4, $vr12");
asm volatile("vxor.v $vr5, $vr5, $vr13");
asm volatile("vxor.v $vr6, $vr6, $vr14");
asm volatile("vxor.v $vr7, $vr7, $vr15");
/* vr4 - vr7: A(P + Pxy) + B(Q + Qxy) = Dx */
asm volatile("vxor.v $vr4, $vr4, $vr16");
asm volatile("vxor.v $vr5, $vr5, $vr17");
asm volatile("vxor.v $vr6, $vr6, $vr18");
asm volatile("vxor.v $vr7, $vr7, $vr19");
asm volatile("vst $vr4, %0" : "=m" (dq[0]));
asm volatile("vst $vr5, %0" : "=m" (dq[16]));
asm volatile("vst $vr6, %0" : "=m" (dq[32]));
asm volatile("vst $vr7, %0" : "=m" (dq[48]));
/* vr0 - vr3: P + Pxy + Dx = Dy */
asm volatile("vxor.v $vr0, $vr0, $vr4");
asm volatile("vxor.v $vr1, $vr1, $vr5");
asm volatile("vxor.v $vr2, $vr2, $vr6");
asm volatile("vxor.v $vr3, $vr3, $vr7");
asm volatile("vst $vr0, %0" : "=m" (dp[0]));
asm volatile("vst $vr1, %0" : "=m" (dp[16]));
asm volatile("vst $vr2, %0" : "=m" (dp[32]));
asm volatile("vst $vr3, %0" : "=m" (dp[48]));
bytes -= 64;
p += 64;
q += 64;
dp += 64;
dq += 64;
}
kernel_fpu_end();
}
static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_fpu_begin();
/* vr22, vr23: qmul */
asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
while (bytes) {
/* vr0 - vr3: P + Dx */
asm volatile("vld $vr0, %0" : : "m" (p[0]));
asm volatile("vld $vr1, %0" : : "m" (p[16]));
asm volatile("vld $vr2, %0" : : "m" (p[32]));
asm volatile("vld $vr3, %0" : : "m" (p[48]));
/* vr4 - vr7: Qx */
asm volatile("vld $vr4, %0" : : "m" (dq[0]));
asm volatile("vld $vr5, %0" : : "m" (dq[16]));
asm volatile("vld $vr6, %0" : : "m" (dq[32]));
asm volatile("vld $vr7, %0" : : "m" (dq[48]));
/* vr4 - vr7: Q + Qx */
asm volatile("vld $vr8, %0" : : "m" (q[0]));
asm volatile("vld $vr9, %0" : : "m" (q[16]));
asm volatile("vld $vr10, %0" : : "m" (q[32]));
asm volatile("vld $vr11, %0" : : "m" (q[48]));
asm volatile("vxor.v $vr4, $vr4, $vr8");
asm volatile("vxor.v $vr5, $vr5, $vr9");
asm volatile("vxor.v $vr6, $vr6, $vr10");
asm volatile("vxor.v $vr7, $vr7, $vr11");
/* vr8 - vr11: higher 4 bits of each byte of (Q + Qx) */
asm volatile("vsrli.b $vr8, $vr4, 4");
asm volatile("vsrli.b $vr9, $vr5, 4");
asm volatile("vsrli.b $vr10, $vr6, 4");
asm volatile("vsrli.b $vr11, $vr7, 4");
/* vr4 - vr7: lower 4 bits of each byte of (Q + Qx) */
asm volatile("vandi.b $vr4, $vr4, 0x0f");
asm volatile("vandi.b $vr5, $vr5, 0x0f");
asm volatile("vandi.b $vr6, $vr6, 0x0f");
asm volatile("vandi.b $vr7, $vr7, 0x0f");
/* lookup from qmul[0] */
asm volatile("vshuf.b $vr4, $vr22, $vr22, $vr4");
asm volatile("vshuf.b $vr5, $vr22, $vr22, $vr5");
asm volatile("vshuf.b $vr6, $vr22, $vr22, $vr6");
asm volatile("vshuf.b $vr7, $vr22, $vr22, $vr7");
/* lookup from qmul[16] */
asm volatile("vshuf.b $vr8, $vr23, $vr23, $vr8");
asm volatile("vshuf.b $vr9, $vr23, $vr23, $vr9");
asm volatile("vshuf.b $vr10, $vr23, $vr23, $vr10");
asm volatile("vshuf.b $vr11, $vr23, $vr23, $vr11");
/* vr4 - vr7: qmul(Q + Qx) = Dx */
asm volatile("vxor.v $vr4, $vr4, $vr8");
asm volatile("vxor.v $vr5, $vr5, $vr9");
asm volatile("vxor.v $vr6, $vr6, $vr10");
asm volatile("vxor.v $vr7, $vr7, $vr11");
asm volatile("vst $vr4, %0" : "=m" (dq[0]));
asm volatile("vst $vr5, %0" : "=m" (dq[16]));
asm volatile("vst $vr6, %0" : "=m" (dq[32]));
asm volatile("vst $vr7, %0" : "=m" (dq[48]));
/* vr0 - vr3: P + Dx + Dx = P */
asm volatile("vxor.v $vr0, $vr0, $vr4");
asm volatile("vxor.v $vr1, $vr1, $vr5");
asm volatile("vxor.v $vr2, $vr2, $vr6");
asm volatile("vxor.v $vr3, $vr3, $vr7");
asm volatile("vst $vr0, %0" : "=m" (p[0]));
asm volatile("vst $vr1, %0" : "=m" (p[16]));
asm volatile("vst $vr2, %0" : "=m" (p[32]));
asm volatile("vst $vr3, %0" : "=m" (p[48]));
bytes -= 64;
p += 64;
q += 64;
dq += 64;
}
kernel_fpu_end();
}
const struct raid6_recov_calls raid6_recov_lsx = {
.data2 = raid6_2data_recov_lsx,
.datap = raid6_datap_recov_lsx,
.valid = raid6_has_lsx,
.name = "lsx",
.priority = 1,
};
#endif /* CONFIG_CPU_HAS_LSX */
#ifdef CONFIG_CPU_HAS_LASX
static int raid6_has_lasx(void)
{
return cpu_has_lasx;
}
static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks - 2] = p;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
kernel_fpu_begin();
/*
* xr20, xr21: qmul
* xr22, xr23: pbmul
*/
asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
asm volatile("xvreplve0.q $xr20, $xr20");
asm volatile("xvreplve0.q $xr21, $xr21");
asm volatile("xvreplve0.q $xr22, $xr22");
asm volatile("xvreplve0.q $xr23, $xr23");
while (bytes) {
/* xr0, xr1: Q */
asm volatile("xvld $xr0, %0" : : "m" (q[0]));
asm volatile("xvld $xr1, %0" : : "m" (q[32]));
/* xr0, xr1: Q + Qxy */
asm volatile("xvld $xr4, %0" : : "m" (dq[0]));
asm volatile("xvld $xr5, %0" : : "m" (dq[32]));
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
/* xr2, xr3: P */
asm volatile("xvld $xr2, %0" : : "m" (p[0]));
asm volatile("xvld $xr3, %0" : : "m" (p[32]));
/* xr2, xr3: P + Pxy */
asm volatile("xvld $xr4, %0" : : "m" (dp[0]));
asm volatile("xvld $xr5, %0" : : "m" (dp[32]));
asm volatile("xvxor.v $xr2, $xr2, $xr4");
asm volatile("xvxor.v $xr3, $xr3, $xr5");
/* xr4, xr5: higher 4 bits of each byte of (Q + Qxy) */
asm volatile("xvsrli.b $xr4, $xr0, 4");
asm volatile("xvsrli.b $xr5, $xr1, 4");
/* xr0, xr1: lower 4 bits of each byte of (Q + Qxy) */
asm volatile("xvandi.b $xr0, $xr0, 0x0f");
asm volatile("xvandi.b $xr1, $xr1, 0x0f");
/* lookup from qmul[0] */
asm volatile("xvshuf.b $xr0, $xr20, $xr20, $xr0");
asm volatile("xvshuf.b $xr1, $xr20, $xr20, $xr1");
/* lookup from qmul[16] */
asm volatile("xvshuf.b $xr4, $xr21, $xr21, $xr4");
asm volatile("xvshuf.b $xr5, $xr21, $xr21, $xr5");
/* xr6, xr7: B(Q + Qxy) */
asm volatile("xvxor.v $xr6, $xr4, $xr0");
asm volatile("xvxor.v $xr7, $xr5, $xr1");
/* xr4, xr5: higher 4 bits of each byte of (P + Pxy) */
asm volatile("xvsrli.b $xr4, $xr2, 4");
asm volatile("xvsrli.b $xr5, $xr3, 4");
/* xr0, xr1: lower 4 bits of each byte of (P + Pxy) */
asm volatile("xvandi.b $xr0, $xr2, 0x0f");
asm volatile("xvandi.b $xr1, $xr3, 0x0f");
/* lookup from pbmul[0] */
asm volatile("xvshuf.b $xr0, $xr22, $xr22, $xr0");
asm volatile("xvshuf.b $xr1, $xr22, $xr22, $xr1");
/* lookup from pbmul[16] */
asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
/* xr0, xr1: A(P + Pxy) */
asm volatile("xvxor.v $xr0, $xr0, $xr4");
asm volatile("xvxor.v $xr1, $xr1, $xr5");
/* xr0, xr1: A(P + Pxy) + B(Q + Qxy) = Dx */
asm volatile("xvxor.v $xr0, $xr0, $xr6");
asm volatile("xvxor.v $xr1, $xr1, $xr7");
/* xr2, xr3: P + Pxy + Dx = Dy */
asm volatile("xvxor.v $xr2, $xr2, $xr0");
asm volatile("xvxor.v $xr3, $xr3, $xr1");
asm volatile("xvst $xr0, %0" : "=m" (dq[0]));
asm volatile("xvst $xr1, %0" : "=m" (dq[32]));
asm volatile("xvst $xr2, %0" : "=m" (dp[0]));
asm volatile("xvst $xr3, %0" : "=m" (dp[32]));
bytes -= 64;
p += 64;
q += 64;
dp += 64;
dq += 64;
}
kernel_fpu_end();
}
static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
p = (u8 *)ptrs[disks - 2];
q = (u8 *)ptrs[disks - 1];
/*
* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks - 1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks - 1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_fpu_begin();
/* xr22, xr23: qmul */
asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
asm volatile("xvreplve0.q $xr22, $xr22");
asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
asm volatile("xvreplve0.q $xr23, $xr23");
while (bytes) {
/* xr0, xr1: P + Dx */
asm volatile("xvld $xr0, %0" : : "m" (p[0]));
asm volatile("xvld $xr1, %0" : : "m" (p[32]));
/* xr2, xr3: Qx */
asm volatile("xvld $xr2, %0" : : "m" (dq[0]));
asm volatile("xvld $xr3, %0" : : "m" (dq[32]));
/* xr2, xr3: Q + Qx */
asm volatile("xvld $xr4, %0" : : "m" (q[0]));
asm volatile("xvld $xr5, %0" : : "m" (q[32]));
asm volatile("xvxor.v $xr2, $xr2, $xr4");
asm volatile("xvxor.v $xr3, $xr3, $xr5");
/* xr4, xr5: higher 4 bits of each byte of (Q + Qx) */
asm volatile("xvsrli.b $xr4, $xr2, 4");
asm volatile("xvsrli.b $xr5, $xr3, 4");
/* xr2, xr3: lower 4 bits of each byte of (Q + Qx) */
asm volatile("xvandi.b $xr2, $xr2, 0x0f");
asm volatile("xvandi.b $xr3, $xr3, 0x0f");
/* lookup from qmul[0] */
asm volatile("xvshuf.b $xr2, $xr22, $xr22, $xr2");
asm volatile("xvshuf.b $xr3, $xr22, $xr22, $xr3");
/* lookup from qmul[16] */
asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
/* xr2, xr3: qmul(Q + Qx) = Dx */
asm volatile("xvxor.v $xr2, $xr2, $xr4");
asm volatile("xvxor.v $xr3, $xr3, $xr5");
/* xr0, xr1: P + Dx + Dx = P */
asm volatile("xvxor.v $xr0, $xr0, $xr2");
asm volatile("xvxor.v $xr1, $xr1, $xr3");
asm volatile("xvst $xr2, %0" : "=m" (dq[0]));
asm volatile("xvst $xr3, %0" : "=m" (dq[32]));
asm volatile("xvst $xr0, %0" : "=m" (p[0]));
asm volatile("xvst $xr1, %0" : "=m" (p[32]));
bytes -= 64;
p += 64;
q += 64;
dq += 64;
}
kernel_fpu_end();
}
const struct raid6_recov_calls raid6_recov_lasx = {
.data2 = raid6_2data_recov_lasx,
.datap = raid6_datap_recov_lasx,
.valid = raid6_has_lasx,
.name = "lasx",
.priority = 2,
};
#endif /* CONFIG_CPU_HAS_LASX */
| linux-master | lib/raid6/recov_loongarch_simd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6/sse1.c
*
* SSE-1/MMXEXT implementation of RAID-6 syndrome functions
*
* This is really an MMX implementation, but it requires SSE-1 or
* AMD MMXEXT for prefetch support and a few other features. The
* support for nontemporal memory accesses is enough to make this
* worthwhile as a separate implementation.
*/
#ifdef CONFIG_X86_32
#include <linux/raid/pq.h>
#include "x86.h"
/* Defined in raid6/mmx.c */
extern const struct raid6_mmx_constants {
u64 x1d;
} raid6_mmx_constants;
static int raid6_have_sse1_or_mmxext(void)
{
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
(boot_cpu_has(X86_FEATURE_XMM) ||
boot_cpu_has(X86_FEATURE_MMXEXT));
}
/*
* Plain SSE1 implementation
*/
static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 8 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %0,%%mm6" : : "m" (dptr[z0-1][d]));
for ( z = z0-2 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
asm volatile("pand %mm0,%mm5");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm6,%mm2");
asm volatile("pxor %mm6,%mm4");
asm volatile("movq %0,%%mm6" : : "m" (dptr[z][d]));
}
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("paddb %mm4,%mm4");
asm volatile("pand %mm0,%mm5");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm6,%mm2");
asm volatile("pxor %mm6,%mm4");
asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x1 = {
raid6_sse11_gen_syndrome,
NULL, /* XOR not yet implemented */
raid6_have_sse1_or_mmxext,
"sse1x1",
1 /* Has cache hints */
};
/*
* Unrolled-by-2 SSE1 implementation
*/
static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movq %0,%%mm0" : : "m" (raid6_mmx_constants.x1d));
asm volatile("pxor %mm5,%mm5"); /* Zero temp */
asm volatile("pxor %mm7,%mm7"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 16 bytes */
for ( d = 0 ; d < bytes ; d += 16 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movq %0,%%mm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movq %0,%%mm3" : : "m" (dptr[z0][d+8])); /* P[1] */
asm volatile("movq %mm2,%mm4"); /* Q[0] */
asm volatile("movq %mm3,%mm6"); /* Q[1] */
for ( z = z0-1 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %mm4,%mm5");
asm volatile("pcmpgtb %mm6,%mm7");
asm volatile("paddb %mm4,%mm4");
asm volatile("paddb %mm6,%mm6");
asm volatile("pand %mm0,%mm5");
asm volatile("pand %mm0,%mm7");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("movq %0,%%mm5" : : "m" (dptr[z][d]));
asm volatile("movq %0,%%mm7" : : "m" (dptr[z][d+8]));
asm volatile("pxor %mm5,%mm2");
asm volatile("pxor %mm7,%mm3");
asm volatile("pxor %mm5,%mm4");
asm volatile("pxor %mm7,%mm6");
asm volatile("pxor %mm5,%mm5");
asm volatile("pxor %mm7,%mm7");
}
asm volatile("movntq %%mm2,%0" : "=m" (p[d]));
asm volatile("movntq %%mm3,%0" : "=m" (p[d+8]));
asm volatile("movntq %%mm4,%0" : "=m" (q[d]));
asm volatile("movntq %%mm6,%0" : "=m" (q[d+8]));
}
asm volatile("sfence" : :: "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse1x2 = {
raid6_sse12_gen_syndrome,
NULL, /* XOR not yet implemented */
raid6_have_sse1_or_mmxext,
"sse1x2",
1 /* Has cache hints */
};
#endif
| linux-master | lib/raid6/sse1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- --------------------------------------------------------
*
* Copyright (C) 2016 Intel Corporation
*
* Author: Gayatri Kammela <[email protected]>
* Author: Megha Dey <[email protected]>
*
* Based on avx2.c: Copyright 2012 Yuanhan Liu All Rights Reserved
* Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* -----------------------------------------------------------------------
*/
/*
* AVX512 implementation of RAID-6 syndrome functions
*
*/
#ifdef CONFIG_AS_AVX512
#include <linux/raid/pq.h>
#include "x86.h"
static const struct raid6_avx512_constants {
u64 x1d[8];
} raid6_avx512_constants __aligned(512/8) = {
{ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
};
static int raid6_have_avx512(void)
{
return boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512BW) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
boot_cpu_has(X86_FEATURE_AVX512DQ);
}
static void raid6_avx5121_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0\n\t"
"vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
:
: "m" (raid6_avx512_constants.x1d[0]));
for (d = 0; d < bytes; d += 64) {
asm volatile("prefetchnta %0\n\t"
"vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
"prefetchnta %1\n\t"
"vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
"vmovdqa64 %1,%%zmm6"
:
: "m" (dptr[z0][d]), "m" (dptr[z0-1][d]));
for (z = z0-2; z >= 0; z--) {
asm volatile("prefetchnta %0\n\t"
"vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
"vmovdqa64 %0,%%zmm6"
:
: "m" (dptr[z][d]));
}
asm volatile("vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm6,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm6,%%zmm4,%%zmm4\n\t"
"vmovntdq %%zmm2,%0\n\t"
"vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
"vmovntdq %%zmm4,%1\n\t"
"vpxorq %%zmm4,%%zmm4,%%zmm4"
:
: "m" (p[d]), "m" (q[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx5121_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
: : "m" (raid6_avx512_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 64) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm2\n\t"
"vpxorq %%zmm4,%%zmm2,%%zmm2"
:
: "m" (dptr[z0][d]), "m" (p[d]));
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vmovdqa64 %0,%%zmm5\n\t"
"vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4"
:
: "m" (dptr[z][d]));
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4"
:
: );
}
asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
/* Don't use movntdq for r/w memory area < cache line */
"vmovdqa64 %%zmm4,%0\n\t"
"vmovdqa64 %%zmm2,%1"
:
: "m" (q[d]), "m" (p[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx512x1 = {
raid6_avx5121_gen_syndrome,
raid6_avx5121_xor_syndrome,
raid6_have_avx512,
"avx512x1",
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
/*
* Unrolled-by-2 AVX512 implementation
*/
static void raid6_avx5122_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0\n\t"
"vpxorq %%zmm1,%%zmm1,%%zmm1" /* Zero temp */
:
: "m" (raid6_avx512_constants.x1d[0]));
/* We uniformly assume a single prefetch covers at least 64 bytes */
for (d = 0; d < bytes; d += 128) {
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
"vmovdqa64 %0,%%zmm2\n\t" /* P[0] */
"vmovdqa64 %1,%%zmm3\n\t" /* P[1] */
"vmovdqa64 %%zmm2,%%zmm4\n\t" /* Q[0] */
"vmovdqa64 %%zmm3,%%zmm6" /* Q[1] */
:
: "m" (dptr[z0][d]), "m" (dptr[z0][d+64]));
for (z = z0-1; z >= 0; z--) {
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
"vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vmovdqa64 %0,%%zmm5\n\t"
"vmovdqa64 %1,%%zmm7\n\t"
"vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6"
:
: "m" (dptr[z][d]), "m" (dptr[z][d+64]));
}
asm volatile("vmovntdq %%zmm2,%0\n\t"
"vmovntdq %%zmm3,%1\n\t"
"vmovntdq %%zmm4,%2\n\t"
"vmovntdq %%zmm6,%3"
:
: "m" (p[d]), "m" (p[d+64]), "m" (q[d]),
"m" (q[d+64]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx5122_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
: : "m" (raid6_avx512_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 128) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm6\n\t"
"vmovdqa64 %2,%%zmm2\n\t"
"vmovdqa64 %3,%%zmm3\n\t"
"vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm6,%%zmm3,%%zmm3"
:
: "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
"m" (p[d]), "m" (p[d+64]));
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vmovdqa64 %0,%%zmm5\n\t"
"vmovdqa64 %1,%%zmm7\n\t"
"vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6"
:
: "m" (dptr[z][d]), "m" (dptr[z][d+64]));
}
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6"
:
: );
}
asm volatile("vpxorq %0,%%zmm4,%%zmm4\n\t"
"vpxorq %1,%%zmm6,%%zmm6\n\t"
/* Don't use movntdq for r/w
* memory area < cache line
*/
"vmovdqa64 %%zmm4,%0\n\t"
"vmovdqa64 %%zmm6,%1\n\t"
"vmovdqa64 %%zmm2,%2\n\t"
"vmovdqa64 %%zmm3,%3"
:
: "m" (q[d]), "m" (q[d+64]), "m" (p[d]),
"m" (p[d+64]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx512x2 = {
raid6_avx5122_gen_syndrome,
raid6_avx5122_xor_syndrome,
raid6_have_avx512,
"avx512x2",
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#ifdef CONFIG_X86_64
/*
* Unrolled-by-4 AVX2 implementation
*/
static void raid6_avx5124_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0\n\t"
"vpxorq %%zmm1,%%zmm1,%%zmm1\n\t" /* Zero temp */
"vpxorq %%zmm2,%%zmm2,%%zmm2\n\t" /* P[0] */
"vpxorq %%zmm3,%%zmm3,%%zmm3\n\t" /* P[1] */
"vpxorq %%zmm4,%%zmm4,%%zmm4\n\t" /* Q[0] */
"vpxorq %%zmm6,%%zmm6,%%zmm6\n\t" /* Q[1] */
"vpxorq %%zmm10,%%zmm10,%%zmm10\n\t" /* P[2] */
"vpxorq %%zmm11,%%zmm11,%%zmm11\n\t" /* P[3] */
"vpxorq %%zmm12,%%zmm12,%%zmm12\n\t" /* Q[2] */
"vpxorq %%zmm14,%%zmm14,%%zmm14" /* Q[3] */
:
: "m" (raid6_avx512_constants.x1d[0]));
for (d = 0; d < bytes; d += 256) {
for (z = z0; z >= 0; z--) {
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
"prefetchnta %2\n\t"
"prefetchnta %3\n\t"
"vpcmpgtb %%zmm4,%%zmm1,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm1,%%k2\n\t"
"vpcmpgtb %%zmm12,%%zmm1,%%k3\n\t"
"vpcmpgtb %%zmm14,%%zmm1,%%k4\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpmovm2b %%k3,%%zmm13\n\t"
"vpmovm2b %%k4,%%zmm15\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
"vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
"vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
"vmovdqa64 %0,%%zmm5\n\t"
"vmovdqa64 %1,%%zmm7\n\t"
"vmovdqa64 %2,%%zmm13\n\t"
"vmovdqa64 %3,%%zmm15\n\t"
"vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
"vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
"vpxorq %%zmm15,%%zmm11,%%zmm11\n"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14"
:
: "m" (dptr[z][d]), "m" (dptr[z][d+64]),
"m" (dptr[z][d+128]), "m" (dptr[z][d+192]));
}
asm volatile("vmovntdq %%zmm2,%0\n\t"
"vpxorq %%zmm2,%%zmm2,%%zmm2\n\t"
"vmovntdq %%zmm3,%1\n\t"
"vpxorq %%zmm3,%%zmm3,%%zmm3\n\t"
"vmovntdq %%zmm10,%2\n\t"
"vpxorq %%zmm10,%%zmm10,%%zmm10\n\t"
"vmovntdq %%zmm11,%3\n\t"
"vpxorq %%zmm11,%%zmm11,%%zmm11\n\t"
"vmovntdq %%zmm4,%4\n\t"
"vpxorq %%zmm4,%%zmm4,%%zmm4\n\t"
"vmovntdq %%zmm6,%5\n\t"
"vpxorq %%zmm6,%%zmm6,%%zmm6\n\t"
"vmovntdq %%zmm12,%6\n\t"
"vpxorq %%zmm12,%%zmm12,%%zmm12\n\t"
"vmovntdq %%zmm14,%7\n\t"
"vpxorq %%zmm14,%%zmm14,%%zmm14"
:
: "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
"m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
"m" (q[d+128]), "m" (q[d+192]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_avx5124_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("vmovdqa64 %0,%%zmm0"
:: "m" (raid6_avx512_constants.x1d[0]));
for (d = 0 ; d < bytes ; d += 256) {
asm volatile("vmovdqa64 %0,%%zmm4\n\t"
"vmovdqa64 %1,%%zmm6\n\t"
"vmovdqa64 %2,%%zmm12\n\t"
"vmovdqa64 %3,%%zmm14\n\t"
"vmovdqa64 %4,%%zmm2\n\t"
"vmovdqa64 %5,%%zmm3\n\t"
"vmovdqa64 %6,%%zmm10\n\t"
"vmovdqa64 %7,%%zmm11\n\t"
"vpxorq %%zmm4,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm6,%%zmm3,%%zmm3\n\t"
"vpxorq %%zmm12,%%zmm10,%%zmm10\n\t"
"vpxorq %%zmm14,%%zmm11,%%zmm11"
:
: "m" (dptr[z0][d]), "m" (dptr[z0][d+64]),
"m" (dptr[z0][d+128]), "m" (dptr[z0][d+192]),
"m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
"m" (p[d+192]));
/* P/Q data pages */
for (z = z0-1 ; z >= start ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
"vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
"prefetchnta %0\n\t"
"prefetchnta %2\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
"vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
"vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpmovm2b %%k3,%%zmm13\n\t"
"vpmovm2b %%k4,%%zmm15\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
"vpaddb %%Zmm14,%%zmm14,%%zmm14\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
"vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14\n\t"
"vmovdqa64 %0,%%zmm5\n\t"
"vmovdqa64 %1,%%zmm7\n\t"
"vmovdqa64 %2,%%zmm13\n\t"
"vmovdqa64 %3,%%zmm15\n\t"
"vpxorq %%zmm5,%%zmm2,%%zmm2\n\t"
"vpxorq %%zmm7,%%zmm3,%%zmm3\n\t"
"vpxorq %%zmm13,%%zmm10,%%zmm10\n\t"
"vpxorq %%zmm15,%%zmm11,%%zmm11\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14"
:
: "m" (dptr[z][d]), "m" (dptr[z][d+64]),
"m" (dptr[z][d+128]),
"m" (dptr[z][d+192]));
}
asm volatile("prefetchnta %0\n\t"
"prefetchnta %1\n\t"
:
: "m" (q[d]), "m" (q[d+128]));
/* P/Q left side optimization */
for (z = start-1 ; z >= 0 ; z--) {
asm volatile("vpxorq %%zmm5,%%zmm5,%%zmm5\n\t"
"vpxorq %%zmm7,%%zmm7,%%zmm7\n\t"
"vpxorq %%zmm13,%%zmm13,%%zmm13\n\t"
"vpxorq %%zmm15,%%zmm15,%%zmm15\n\t"
"vpcmpgtb %%zmm4,%%zmm5,%%k1\n\t"
"vpcmpgtb %%zmm6,%%zmm7,%%k2\n\t"
"vpcmpgtb %%zmm12,%%zmm13,%%k3\n\t"
"vpcmpgtb %%zmm14,%%zmm15,%%k4\n\t"
"vpmovm2b %%k1,%%zmm5\n\t"
"vpmovm2b %%k2,%%zmm7\n\t"
"vpmovm2b %%k3,%%zmm13\n\t"
"vpmovm2b %%k4,%%zmm15\n\t"
"vpaddb %%zmm4,%%zmm4,%%zmm4\n\t"
"vpaddb %%zmm6,%%zmm6,%%zmm6\n\t"
"vpaddb %%zmm12,%%zmm12,%%zmm12\n\t"
"vpaddb %%zmm14,%%zmm14,%%zmm14\n\t"
"vpandq %%zmm0,%%zmm5,%%zmm5\n\t"
"vpandq %%zmm0,%%zmm7,%%zmm7\n\t"
"vpandq %%zmm0,%%zmm13,%%zmm13\n\t"
"vpandq %%zmm0,%%zmm15,%%zmm15\n\t"
"vpxorq %%zmm5,%%zmm4,%%zmm4\n\t"
"vpxorq %%zmm7,%%zmm6,%%zmm6\n\t"
"vpxorq %%zmm13,%%zmm12,%%zmm12\n\t"
"vpxorq %%zmm15,%%zmm14,%%zmm14"
:
: );
}
asm volatile("vmovntdq %%zmm2,%0\n\t"
"vmovntdq %%zmm3,%1\n\t"
"vmovntdq %%zmm10,%2\n\t"
"vmovntdq %%zmm11,%3\n\t"
"vpxorq %4,%%zmm4,%%zmm4\n\t"
"vpxorq %5,%%zmm6,%%zmm6\n\t"
"vpxorq %6,%%zmm12,%%zmm12\n\t"
"vpxorq %7,%%zmm14,%%zmm14\n\t"
"vmovntdq %%zmm4,%4\n\t"
"vmovntdq %%zmm6,%5\n\t"
"vmovntdq %%zmm12,%6\n\t"
"vmovntdq %%zmm14,%7"
:
: "m" (p[d]), "m" (p[d+64]), "m" (p[d+128]),
"m" (p[d+192]), "m" (q[d]), "m" (q[d+64]),
"m" (q[d+128]), "m" (q[d+192]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_avx512x4 = {
raid6_avx5124_gen_syndrome,
raid6_avx5124_xor_syndrome,
raid6_have_avx512,
"avx512x4",
.priority = 2 /* Prefer AVX512 over priority 1 (SSE2 and others) */
};
#endif
#endif /* CONFIG_AS_AVX512 */
| linux-master | lib/raid6/avx512.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Intel Corporation
*
* Author: Gayatri Kammela <[email protected]>
* Author: Megha Dey <[email protected]>
*/
#ifdef CONFIG_AS_AVX512
#include <linux/raid/pq.h>
#include "x86.h"
static int raid6_has_avx512(void)
{
return boot_cpu_has(X86_FEATURE_AVX2) &&
boot_cpu_has(X86_FEATURE_AVX) &&
boot_cpu_has(X86_FEATURE_AVX512F) &&
boot_cpu_has(X86_FEATURE_AVX512BW) &&
boot_cpu_has(X86_FEATURE_AVX512VL) &&
boot_cpu_has(X86_FEATURE_AVX512DQ);
}
static void raid6_2data_recov_avx512(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
const u8 x0f = 0x0f;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/*
* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
raid6_gfexp[failb]]];
kernel_fpu_begin();
/* zmm0 = x0f[16] */
asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
while (bytes) {
#ifdef CONFIG_X86_64
asm volatile("vmovdqa64 %0, %%zmm1\n\t"
"vmovdqa64 %1, %%zmm9\n\t"
"vmovdqa64 %2, %%zmm0\n\t"
"vmovdqa64 %3, %%zmm8\n\t"
"vpxorq %4, %%zmm1, %%zmm1\n\t"
"vpxorq %5, %%zmm9, %%zmm9\n\t"
"vpxorq %6, %%zmm0, %%zmm0\n\t"
"vpxorq %7, %%zmm8, %%zmm8"
:
: "m" (q[0]), "m" (q[64]), "m" (p[0]),
"m" (p[64]), "m" (dq[0]), "m" (dq[64]),
"m" (dp[0]), "m" (dp[64]));
/*
* 1 = dq[0] ^ q[0]
* 9 = dq[64] ^ q[64]
* 0 = dp[0] ^ p[0]
* 8 = dp[64] ^ p[64]
*/
asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
"vbroadcasti64x2 %1, %%zmm5"
:
: "m" (qmul[0]), "m" (qmul[16]));
asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
"vpsraw $4, %%zmm9, %%zmm12\n\t"
"vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
"vpandq %%zmm7, %%zmm9, %%zmm9\n\t"
"vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
"vpshufb %%zmm9, %%zmm4, %%zmm14\n\t"
"vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
"vpshufb %%zmm12, %%zmm5, %%zmm15\n\t"
"vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
"vpxorq %%zmm14, %%zmm15, %%zmm15\n\t"
"vpxorq %%zmm4, %%zmm5, %%zmm5"
:
: );
/*
* 5 = qx[0]
* 15 = qx[64]
*/
asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
"vbroadcasti64x2 %1, %%zmm1\n\t"
"vpsraw $4, %%zmm0, %%zmm2\n\t"
"vpsraw $4, %%zmm8, %%zmm6\n\t"
"vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm8, %%zmm14\n\t"
"vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
"vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
"vpshufb %%zmm14, %%zmm4, %%zmm12\n\t"
"vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
"vpshufb %%zmm6, %%zmm1, %%zmm13\n\t"
"vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm4, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm12, %%zmm13, %%zmm13"
:
: "m" (pbmul[0]), "m" (pbmul[16]));
/*
* 1 = pbmul[px[0]]
* 13 = pbmul[px[64]]
*/
asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm15, %%zmm13, %%zmm13"
:
: );
/*
* 1 = db = DQ
* 13 = db[64] = DQ[64]
*/
asm volatile("vmovdqa64 %%zmm1, %0\n\t"
"vmovdqa64 %%zmm13,%1\n\t"
"vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
"vpxorq %%zmm13, %%zmm8, %%zmm8"
:
: "m" (dq[0]), "m" (dq[64]));
asm volatile("vmovdqa64 %%zmm0, %0\n\t"
"vmovdqa64 %%zmm8, %1"
:
: "m" (dp[0]), "m" (dp[64]));
bytes -= 128;
p += 128;
q += 128;
dp += 128;
dq += 128;
#else
asm volatile("vmovdqa64 %0, %%zmm1\n\t"
"vmovdqa64 %1, %%zmm0\n\t"
"vpxorq %2, %%zmm1, %%zmm1\n\t"
"vpxorq %3, %%zmm0, %%zmm0"
:
: "m" (*q), "m" (*p), "m"(*dq), "m" (*dp));
/* 1 = dq ^ q; 0 = dp ^ p */
asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
"vbroadcasti64x2 %1, %%zmm5"
:
: "m" (qmul[0]), "m" (qmul[16]));
/*
* 1 = dq ^ q
* 3 = dq ^ p >> 4
*/
asm volatile("vpsraw $4, %%zmm1, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm1, %%zmm1\n\t"
"vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
"vpshufb %%zmm1, %%zmm4, %%zmm4\n\t"
"vpshufb %%zmm3, %%zmm5, %%zmm5\n\t"
"vpxorq %%zmm4, %%zmm5, %%zmm5"
:
: );
/* 5 = qx */
asm volatile("vbroadcasti64x2 %0, %%zmm4\n\t"
"vbroadcasti64x2 %1, %%zmm1"
:
: "m" (pbmul[0]), "m" (pbmul[16]));
asm volatile("vpsraw $4, %%zmm0, %%zmm2\n\t"
"vpandq %%zmm7, %%zmm0, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm2, %%zmm2\n\t"
"vpshufb %%zmm3, %%zmm4, %%zmm4\n\t"
"vpshufb %%zmm2, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm4, %%zmm1, %%zmm1"
:
: );
/* 1 = pbmul[px] */
asm volatile("vpxorq %%zmm5, %%zmm1, %%zmm1\n\t"
/* 1 = db = DQ */
"vmovdqa64 %%zmm1, %0\n\t"
:
: "m" (dq[0]));
asm volatile("vpxorq %%zmm1, %%zmm0, %%zmm0\n\t"
"vmovdqa64 %%zmm0, %0"
:
: "m" (dp[0]));
bytes -= 64;
p += 64;
q += 64;
dp += 64;
dq += 64;
#endif
}
kernel_fpu_end();
}
static void raid6_datap_recov_avx512(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
const u8 x0f = 0x0f;
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/*
* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_fpu_begin();
asm volatile("vpbroadcastb %0, %%zmm7" : : "m" (x0f));
while (bytes) {
#ifdef CONFIG_X86_64
asm volatile("vmovdqa64 %0, %%zmm3\n\t"
"vmovdqa64 %1, %%zmm8\n\t"
"vpxorq %2, %%zmm3, %%zmm3\n\t"
"vpxorq %3, %%zmm8, %%zmm8"
:
: "m" (dq[0]), "m" (dq[64]), "m" (q[0]),
"m" (q[64]));
/*
* 3 = q[0] ^ dq[0]
* 8 = q[64] ^ dq[64]
*/
asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
"vmovapd %%zmm0, %%zmm13\n\t"
"vbroadcasti64x2 %1, %%zmm1\n\t"
"vmovapd %%zmm1, %%zmm14"
:
: "m" (qmul[0]), "m" (qmul[16]));
asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
"vpsraw $4, %%zmm8, %%zmm12\n\t"
"vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm8, %%zmm8\n\t"
"vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
"vpandq %%zmm7, %%zmm12, %%zmm12\n\t"
"vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
"vpshufb %%zmm8, %%zmm13, %%zmm13\n\t"
"vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
"vpshufb %%zmm12, %%zmm14, %%zmm14\n\t"
"vpxorq %%zmm0, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm13, %%zmm14, %%zmm14"
:
: );
/*
* 1 = qmul[q[0] ^ dq[0]]
* 14 = qmul[q[64] ^ dq[64]]
*/
asm volatile("vmovdqa64 %0, %%zmm2\n\t"
"vmovdqa64 %1, %%zmm12\n\t"
"vpxorq %%zmm1, %%zmm2, %%zmm2\n\t"
"vpxorq %%zmm14, %%zmm12, %%zmm12"
:
: "m" (p[0]), "m" (p[64]));
/*
* 2 = p[0] ^ qmul[q[0] ^ dq[0]]
* 12 = p[64] ^ qmul[q[64] ^ dq[64]]
*/
asm volatile("vmovdqa64 %%zmm1, %0\n\t"
"vmovdqa64 %%zmm14, %1\n\t"
"vmovdqa64 %%zmm2, %2\n\t"
"vmovdqa64 %%zmm12,%3"
:
: "m" (dq[0]), "m" (dq[64]), "m" (p[0]),
"m" (p[64]));
bytes -= 128;
p += 128;
q += 128;
dq += 128;
#else
asm volatile("vmovdqa64 %0, %%zmm3\n\t"
"vpxorq %1, %%zmm3, %%zmm3"
:
: "m" (dq[0]), "m" (q[0]));
/* 3 = q ^ dq */
asm volatile("vbroadcasti64x2 %0, %%zmm0\n\t"
"vbroadcasti64x2 %1, %%zmm1"
:
: "m" (qmul[0]), "m" (qmul[16]));
asm volatile("vpsraw $4, %%zmm3, %%zmm6\n\t"
"vpandq %%zmm7, %%zmm3, %%zmm3\n\t"
"vpandq %%zmm7, %%zmm6, %%zmm6\n\t"
"vpshufb %%zmm3, %%zmm0, %%zmm0\n\t"
"vpshufb %%zmm6, %%zmm1, %%zmm1\n\t"
"vpxorq %%zmm0, %%zmm1, %%zmm1"
:
: );
/* 1 = qmul[q ^ dq] */
asm volatile("vmovdqa64 %0, %%zmm2\n\t"
"vpxorq %%zmm1, %%zmm2, %%zmm2"
:
: "m" (p[0]));
/* 2 = p ^ qmul[q ^ dq] */
asm volatile("vmovdqa64 %%zmm1, %0\n\t"
"vmovdqa64 %%zmm2, %1"
:
: "m" (dq[0]), "m" (p[0]));
bytes -= 64;
p += 64;
q += 64;
dq += 64;
#endif
}
kernel_fpu_end();
}
const struct raid6_recov_calls raid6_recov_avx512 = {
.data2 = raid6_2data_recov_avx512,
.datap = raid6_datap_recov_avx512,
.valid = raid6_has_avx512,
#ifdef CONFIG_X86_64
.name = "avx512x2",
#else
.name = "avx512x1",
#endif
.priority = 3,
};
#else
#warning "your version of binutils lacks AVX512 support"
#endif
| linux-master | lib/raid6/recov_avx512.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Intel Corporation
*/
#include <linux/raid/pq.h>
#include "x86.h"
static int raid6_has_ssse3(void)
{
return boot_cpu_has(X86_FEATURE_XMM) &&
boot_cpu_has(X86_FEATURE_XMM2) &&
boot_cpu_has(X86_FEATURE_SSSE3);
}
static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
int failb, void **ptrs)
{
u8 *p, *q, *dp, *dq;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
static const u8 __aligned(16) x0f[16] = {
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
raid6_gfexp[failb]]];
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
#ifdef CONFIG_X86_64
asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
#endif
/* Now do it... */
while (bytes) {
#ifdef CONFIG_X86_64
/* xmm6, xmm14, xmm15 */
asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
asm volatile("pxor %0,%%xmm1" : : "m" (dq[0]));
asm volatile("pxor %0,%%xmm9" : : "m" (dq[16]));
asm volatile("pxor %0,%%xmm0" : : "m" (dp[0]));
asm volatile("pxor %0,%%xmm8" : : "m" (dp[16]));
/* xmm0/8 = px */
asm volatile("movdqa %xmm6,%xmm4");
asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
asm volatile("movdqa %xmm6,%xmm12");
asm volatile("movdqa %xmm5,%xmm13");
asm volatile("movdqa %xmm1,%xmm3");
asm volatile("movdqa %xmm9,%xmm11");
asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
asm volatile("movdqa %xmm8,%xmm10");
asm volatile("psraw $4,%xmm1");
asm volatile("psraw $4,%xmm9");
asm volatile("pand %xmm7,%xmm3");
asm volatile("pand %xmm7,%xmm11");
asm volatile("pand %xmm7,%xmm1");
asm volatile("pand %xmm7,%xmm9");
asm volatile("pshufb %xmm3,%xmm4");
asm volatile("pshufb %xmm11,%xmm12");
asm volatile("pshufb %xmm1,%xmm5");
asm volatile("pshufb %xmm9,%xmm13");
asm volatile("pxor %xmm4,%xmm5");
asm volatile("pxor %xmm12,%xmm13");
/* xmm5/13 = qx */
asm volatile("movdqa %xmm14,%xmm4");
asm volatile("movdqa %xmm15,%xmm1");
asm volatile("movdqa %xmm14,%xmm12");
asm volatile("movdqa %xmm15,%xmm9");
asm volatile("movdqa %xmm2,%xmm3");
asm volatile("movdqa %xmm10,%xmm11");
asm volatile("psraw $4,%xmm2");
asm volatile("psraw $4,%xmm10");
asm volatile("pand %xmm7,%xmm3");
asm volatile("pand %xmm7,%xmm11");
asm volatile("pand %xmm7,%xmm2");
asm volatile("pand %xmm7,%xmm10");
asm volatile("pshufb %xmm3,%xmm4");
asm volatile("pshufb %xmm11,%xmm12");
asm volatile("pshufb %xmm2,%xmm1");
asm volatile("pshufb %xmm10,%xmm9");
asm volatile("pxor %xmm4,%xmm1");
asm volatile("pxor %xmm12,%xmm9");
/* xmm1/9 = pbmul[px] */
asm volatile("pxor %xmm5,%xmm1");
asm volatile("pxor %xmm13,%xmm9");
/* xmm1/9 = db = DQ */
asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
asm volatile("pxor %xmm1,%xmm0");
asm volatile("pxor %xmm9,%xmm8");
asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
bytes -= 32;
p += 32;
q += 32;
dp += 32;
dq += 32;
#else
asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
asm volatile("pxor %0,%%xmm1" : : "m" (*dq));
asm volatile("pxor %0,%%xmm0" : : "m" (*dp));
/* 1 = dq ^ q
* 0 = dp ^ p
*/
asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
asm volatile("movdqa %xmm1,%xmm3");
asm volatile("psraw $4,%xmm1");
asm volatile("pand %xmm7,%xmm3");
asm volatile("pand %xmm7,%xmm1");
asm volatile("pshufb %xmm3,%xmm4");
asm volatile("pshufb %xmm1,%xmm5");
asm volatile("pxor %xmm4,%xmm5");
asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
/* xmm5 = qx */
asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
asm volatile("movdqa %xmm2,%xmm3");
asm volatile("psraw $4,%xmm2");
asm volatile("pand %xmm7,%xmm3");
asm volatile("pand %xmm7,%xmm2");
asm volatile("pshufb %xmm3,%xmm4");
asm volatile("pshufb %xmm2,%xmm1");
asm volatile("pxor %xmm4,%xmm1");
/* xmm1 = pbmul[px] */
asm volatile("pxor %xmm5,%xmm1");
/* xmm1 = db = DQ */
asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
asm volatile("pxor %xmm1,%xmm0");
asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
bytes -= 16;
p += 16;
q += 16;
dp += 16;
dq += 16;
#endif
}
kernel_fpu_end();
}
static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
static const u8 __aligned(16) x0f[16] = {
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
kernel_fpu_begin();
asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
while (bytes) {
#ifdef CONFIG_X86_64
asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
/* xmm3 = q[0] ^ dq[0] */
asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
/* xmm4 = q[16] ^ dq[16] */
asm volatile("movdqa %xmm3, %xmm6");
asm volatile("movdqa %xmm4, %xmm8");
/* xmm4 = xmm8 = q[16] ^ dq[16] */
asm volatile("psraw $4, %xmm3");
asm volatile("pand %xmm7, %xmm6");
asm volatile("pand %xmm7, %xmm3");
asm volatile("pshufb %xmm6, %xmm0");
asm volatile("pshufb %xmm3, %xmm1");
asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
asm volatile("pxor %xmm0, %xmm1");
asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
/* xmm1 = qmul[q[0] ^ dq[0]] */
asm volatile("psraw $4, %xmm4");
asm volatile("pand %xmm7, %xmm8");
asm volatile("pand %xmm7, %xmm4");
asm volatile("pshufb %xmm8, %xmm10");
asm volatile("pshufb %xmm4, %xmm11");
asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
asm volatile("pxor %xmm10, %xmm11");
asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
/* xmm11 = qmul[q[16] ^ dq[16]] */
asm volatile("pxor %xmm1, %xmm2");
/* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
asm volatile("pxor %xmm11, %xmm12");
/* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
bytes -= 32;
p += 32;
q += 32;
dq += 32;
#else
asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
/* xmm3 = *q ^ *dq */
asm volatile("movdqa %xmm3, %xmm6");
asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
asm volatile("psraw $4, %xmm3");
asm volatile("pand %xmm7, %xmm6");
asm volatile("pand %xmm7, %xmm3");
asm volatile("pshufb %xmm6, %xmm0");
asm volatile("pshufb %xmm3, %xmm1");
asm volatile("pxor %xmm0, %xmm1");
/* xmm1 = qmul[*q ^ *dq */
asm volatile("pxor %xmm1, %xmm2");
/* xmm2 = *p ^ qmul[*q ^ *dq] */
asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
bytes -= 16;
p += 16;
q += 16;
dq += 16;
#endif
}
kernel_fpu_end();
}
const struct raid6_recov_calls raid6_recov_ssse3 = {
.data2 = raid6_2data_recov_ssse3,
.datap = raid6_datap_recov_ssse3,
.valid = raid6_has_ssse3,
#ifdef CONFIG_X86_64
.name = "ssse3x2",
#else
.name = "ssse3x1",
#endif
.priority = 1,
};
| linux-master | lib/raid6/recov_ssse3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6/sse2.c
*
* SSE-2 implementation of RAID-6 syndrome functions
*
*/
#include <linux/raid/pq.h>
#include "x86.h"
static const struct raid6_sse_constants {
u64 x1d[2];
} raid6_sse_constants __attribute__((aligned(16))) = {
{ 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
};
static int raid6_have_sse2(void)
{
/* Not really boot_cpu but "all_cpus" */
return boot_cpu_has(X86_FEATURE_MMX) &&
boot_cpu_has(X86_FEATURE_FXSR) &&
boot_cpu_has(X86_FEATURE_XMM) &&
boot_cpu_has(X86_FEATURE_XMM2);
}
/*
* Plain SSE2 implementation
*/
static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 16 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
for ( z = z0-2 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm6,%xmm2");
asm volatile("pxor %xmm6,%xmm4");
asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
}
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm6,%xmm2");
asm volatile("pxor %xmm6,%xmm4");
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
asm volatile("pxor %xmm2,%xmm2");
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
asm volatile("pxor %xmm4,%xmm4");
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
for ( d = 0 ; d < bytes ; d += 16 ) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
asm volatile("pxor %xmm4,%xmm2");
/* P/Q data pages */
for ( z = z0-1 ; z >= start ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm5,%xmm4");
}
/* P/Q left side optimization */
for ( z = start-1 ; z >= 0 ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pxor %xmm5,%xmm4");
}
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x1 = {
raid6_sse21_gen_syndrome,
raid6_sse21_xor_syndrome,
raid6_have_sse2,
"sse2x1",
1 /* Has cache hints */
};
/*
* Unrolled-by-2 SSE2 implementation
*/
static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
/* We uniformly assume a single prefetch covers at least 32 bytes */
for ( d = 0 ; d < bytes ; d += 32 ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
for ( z = z0-1 ; z >= 0 ; z-- ) {
asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
}
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
for ( d = 0 ; d < bytes ; d += 32 ) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
asm volatile("pxor %xmm4,%xmm2");
asm volatile("pxor %xmm6,%xmm3");
/* P/Q data pages */
for ( z = z0-1 ; z >= start ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
}
/* P/Q left side optimization */
for ( z = start-1 ; z >= 0 ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
}
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
/* Don't use movntdq for r/w memory area < cache line */
asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x2 = {
raid6_sse22_gen_syndrome,
raid6_sse22_xor_syndrome,
raid6_have_sse2,
"sse2x2",
1 /* Has cache hints */
};
#ifdef CONFIG_X86_64
/*
* Unrolled-by-4 SSE2 implementation
*/
static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
q = dptr[z0+2]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
for ( d = 0 ; d < bytes ; d += 64 ) {
for ( z = z0 ; z >= 0 ; z-- ) {
/* The second prefetch seems to improve performance... */
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("pcmpgtb %xmm12,%xmm13");
asm volatile("pcmpgtb %xmm14,%xmm15");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("paddb %xmm12,%xmm12");
asm volatile("paddb %xmm14,%xmm14");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pand %xmm0,%xmm13");
asm volatile("pand %xmm0,%xmm15");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm13,%xmm10");
asm volatile("pxor %xmm15,%xmm11");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pxor %xmm13,%xmm13");
asm volatile("pxor %xmm15,%xmm15");
}
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
asm volatile("pxor %xmm2,%xmm2");
asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
asm volatile("pxor %xmm3,%xmm3");
asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
asm volatile("pxor %xmm10,%xmm10");
asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
asm volatile("pxor %xmm11,%xmm11");
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
asm volatile("pxor %xmm4,%xmm4");
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
asm volatile("pxor %xmm6,%xmm6");
asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
asm volatile("pxor %xmm12,%xmm12");
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
asm volatile("pxor %xmm14,%xmm14");
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
u8 **dptr = (u8 **)ptrs;
u8 *p, *q;
int d, z, z0;
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
q = dptr[disks-1]; /* RS syndrome */
kernel_fpu_begin();
asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
for ( d = 0 ; d < bytes ; d += 64 ) {
asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
asm volatile("pxor %xmm4,%xmm2");
asm volatile("pxor %xmm6,%xmm3");
asm volatile("pxor %xmm12,%xmm10");
asm volatile("pxor %xmm14,%xmm11");
/* P/Q data pages */
for ( z = z0-1 ; z >= start ; z-- ) {
asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pxor %xmm13,%xmm13");
asm volatile("pxor %xmm15,%xmm15");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("pcmpgtb %xmm12,%xmm13");
asm volatile("pcmpgtb %xmm14,%xmm15");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("paddb %xmm12,%xmm12");
asm volatile("paddb %xmm14,%xmm14");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pand %xmm0,%xmm13");
asm volatile("pand %xmm0,%xmm15");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
asm volatile("pxor %xmm5,%xmm2");
asm volatile("pxor %xmm7,%xmm3");
asm volatile("pxor %xmm13,%xmm10");
asm volatile("pxor %xmm15,%xmm11");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
}
asm volatile("prefetchnta %0" :: "m" (q[d]));
asm volatile("prefetchnta %0" :: "m" (q[d+32]));
/* P/Q left side optimization */
for ( z = start-1 ; z >= 0 ; z-- ) {
asm volatile("pxor %xmm5,%xmm5");
asm volatile("pxor %xmm7,%xmm7");
asm volatile("pxor %xmm13,%xmm13");
asm volatile("pxor %xmm15,%xmm15");
asm volatile("pcmpgtb %xmm4,%xmm5");
asm volatile("pcmpgtb %xmm6,%xmm7");
asm volatile("pcmpgtb %xmm12,%xmm13");
asm volatile("pcmpgtb %xmm14,%xmm15");
asm volatile("paddb %xmm4,%xmm4");
asm volatile("paddb %xmm6,%xmm6");
asm volatile("paddb %xmm12,%xmm12");
asm volatile("paddb %xmm14,%xmm14");
asm volatile("pand %xmm0,%xmm5");
asm volatile("pand %xmm0,%xmm7");
asm volatile("pand %xmm0,%xmm13");
asm volatile("pand %xmm0,%xmm15");
asm volatile("pxor %xmm5,%xmm4");
asm volatile("pxor %xmm7,%xmm6");
asm volatile("pxor %xmm13,%xmm12");
asm volatile("pxor %xmm15,%xmm14");
}
asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
}
asm volatile("sfence" : : : "memory");
kernel_fpu_end();
}
const struct raid6_calls raid6_sse2x4 = {
raid6_sse24_gen_syndrome,
raid6_sse24_xor_syndrome,
raid6_have_sse2,
"sse2x4",
1 /* Has cache hints */
};
#endif /* CONFIG_X86_64 */
| linux-master | lib/raid6/sse2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
* ----------------------------------------------------------------------- */
/*
* raid6test.c
*
* Test RAID-6 recovery with various algorithms
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <linux/raid/pq.h>
#define NDISKS 16 /* Including P and Q */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
char recovi[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
char recovj[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
static void makedata(int start, int stop)
{
int i, j;
for (i = start; i <= stop; i++) {
for (j = 0; j < PAGE_SIZE; j++)
data[i][j] = rand();
dataptrs[i] = data[i];
}
}
static char disk_type(int d)
{
switch (d) {
case NDISKS-2:
return 'P';
case NDISKS-1:
return 'Q';
default:
return 'D';
}
}
static int test_disks(int i, int j)
{
int erra, errb;
memset(recovi, 0xf0, PAGE_SIZE);
memset(recovj, 0xba, PAGE_SIZE);
dataptrs[i] = recovi;
dataptrs[j] = recovj;
raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
erra = memcmp(data[i], recovi, PAGE_SIZE);
errb = memcmp(data[j], recovj, PAGE_SIZE);
if (i < NDISKS-2 && j == NDISKS-1) {
/* We don't implement the DQ failure scenario, since it's
equivalent to a RAID-5 failure (XOR, then recompute Q) */
erra = errb = 0;
} else {
printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
raid6_call.name,
i, disk_type(i),
j, disk_type(j),
(!erra && !errb) ? "OK" :
!erra ? "ERRB" :
!errb ? "ERRA" : "ERRAB");
}
dataptrs[i] = data[i];
dataptrs[j] = data[j];
return erra || errb;
}
int main(int argc, char *argv[])
{
const struct raid6_calls *const *algo;
const struct raid6_recov_calls *const *ra;
int i, j, p1, p2;
int err = 0;
makedata(0, NDISKS-1);
for (ra = raid6_recov_algos; *ra; ra++) {
if ((*ra)->valid && !(*ra)->valid())
continue;
raid6_2data_recov = (*ra)->data2;
raid6_datap_recov = (*ra)->datap;
printf("using recovery %s\n", (*ra)->name);
for (algo = raid6_algos; *algo; algo++) {
if ((*algo)->valid && !(*algo)->valid())
continue;
raid6_call = **algo;
/* Nuke syndromes */
memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
/* Generate assumed good syndrome */
raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
(void **)&dataptrs);
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
err += test_disks(i, j);
if (!raid6_call.xor_syndrome)
continue;
for (p1 = 0; p1 < NDISKS-2; p1++)
for (p2 = p1; p2 < NDISKS-2; p2++) {
/* Simulate rmw run */
raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
(void **)&dataptrs);
makedata(p1, p2);
raid6_call.xor_syndrome(NDISKS, p1, p2, PAGE_SIZE,
(void **)&dataptrs);
for (i = 0; i < NDISKS-1; i++)
for (j = i+1; j < NDISKS; j++)
err += test_disks(i, j);
}
}
printf("\n");
}
printf("\n");
/* Pick the best algorithm test */
raid6_select_algo();
if (err)
printf("\n*** ERRORS FOUND ***\n");
return err;
}
| linux-master | lib/raid6/test/test.c |
/* +++ deflate.c */
/* deflate.c -- compress data using the deflation algorithm
* Copyright (C) 1995-1996 Jean-loup Gailly.
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* ALGORITHM
*
* The "deflation" process depends on being able to identify portions
* of the input text which are identical to earlier input (within a
* sliding window trailing behind the input currently being processed).
*
* The most straightforward technique turns out to be the fastest for
* most input files: try all possible matches and select the longest.
* The key feature of this algorithm is that insertions into the string
* dictionary are very simple and thus fast, and deletions are avoided
* completely. Insertions are performed at each input character, whereas
* string matches are performed only when the previous match ends. So it
* is preferable to spend more time in matches to allow very fast string
* insertions and avoid deletions. The matching algorithm for small
* strings is inspired from that of Rabin & Karp. A brute force approach
* is used to find longer strings when a small match has been found.
* A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
* (by Leonid Broukhis).
* A previous version of this file used a more sophisticated algorithm
* (by Fiala and Greene) which is guaranteed to run in linear amortized
* time, but has a larger average cost, uses more memory and is patented.
* However the F&G algorithm may be faster for some highly redundant
* files if the parameter max_chain_length (described below) is too large.
*
* ACKNOWLEDGEMENTS
*
* The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
* I found it in 'freeze' written by Leonid Broukhis.
* Thanks to many people for bug reports and testing.
*
* REFERENCES
*
* Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
* Available in ftp://ds.internic.net/rfc/rfc1951.txt
*
* A description of the Rabin and Karp algorithm is given in the book
* "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
*
* Fiala,E.R., and Greene,D.H.
* Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
*
*/
#include <linux/module.h>
#include <linux/zutil.h>
#include "defutil.h"
/* architecture-specific bits */
#ifdef CONFIG_ZLIB_DFLTCC
# include "../zlib_dfltcc/dfltcc_deflate.h"
#else
#define DEFLATE_RESET_HOOK(strm) do {} while (0)
#define DEFLATE_HOOK(strm, flush, bstate) 0
#define DEFLATE_NEED_CHECKSUM(strm) 1
#define DEFLATE_DFLTCC_ENABLED() 0
#endif
/* ===========================================================================
* Function prototypes.
*/
typedef block_state (*compress_func) (deflate_state *s, int flush);
/* Compression function. Returns the block state after the call. */
static void fill_window (deflate_state *s);
static block_state deflate_stored (deflate_state *s, int flush);
static block_state deflate_fast (deflate_state *s, int flush);
static block_state deflate_slow (deflate_state *s, int flush);
static void lm_init (deflate_state *s);
static void putShortMSB (deflate_state *s, uInt b);
static int read_buf (z_streamp strm, Byte *buf, unsigned size);
static uInt longest_match (deflate_state *s, IPos cur_match);
#ifdef DEBUG_ZLIB
static void check_match (deflate_state *s, IPos start, IPos match,
int length);
#endif
/* ===========================================================================
* Local data
*/
#define NIL 0
/* Tail of hash chains */
#ifndef TOO_FAR
# define TOO_FAR 4096
#endif
/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
/* Minimum amount of lookahead, except at the end of the input file.
* See deflate.c for comments about the MIN_MATCH+1.
*/
/* Workspace to be allocated for deflate processing */
typedef struct deflate_workspace {
/* State memory for the deflator */
deflate_state deflate_memory;
#ifdef CONFIG_ZLIB_DFLTCC
/* State memory for s390 hardware deflate */
struct dfltcc_deflate_state dfltcc_memory;
#endif
Byte *window_memory;
Pos *prev_memory;
Pos *head_memory;
char *overlay_memory;
} deflate_workspace;
#ifdef CONFIG_ZLIB_DFLTCC
/* dfltcc_state must be doubleword aligned for DFLTCC call */
static_assert(offsetof(struct deflate_workspace, dfltcc_memory) % 8 == 0);
#endif
/* Values for max_lazy_match, good_match and max_chain_length, depending on
* the desired pack level (0..9). The values given below have been tuned to
* exclude worst case performance for pathological files. Better values may be
* found for specific files.
*/
typedef struct config_s {
ush good_length; /* reduce lazy search above this match length */
ush max_lazy; /* do not perform lazy search above this match length */
ush nice_length; /* quit search above this match length */
ush max_chain;
compress_func func;
} config;
static const config configuration_table[10] = {
/* good lazy nice chain */
/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
/* 2 */ {4, 5, 16, 8, deflate_fast},
/* 3 */ {4, 6, 32, 32, deflate_fast},
/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
/* 5 */ {8, 16, 32, 32, deflate_slow},
/* 6 */ {8, 16, 128, 128, deflate_slow},
/* 7 */ {8, 32, 128, 256, deflate_slow},
/* 8 */ {32, 128, 258, 1024, deflate_slow},
/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
* For deflate_fast() (levels <= 3) good is ignored and lazy has a different
* meaning.
*/
#define EQUAL 0
/* result of memcmp for equal strings */
/* ===========================================================================
* Update a hash value with the given input byte
* IN assertion: all calls to UPDATE_HASH are made with consecutive
* input characters, so that a running hash key can be computed from the
* previous key instead of complete recalculation each time.
*/
#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
/* ===========================================================================
* Insert string str in the dictionary and set match_head to the previous head
* of the hash chain (the most recent string with same hash key). Return
* the previous length of the hash chain.
* IN assertion: all calls to INSERT_STRING are made with consecutive
* input characters and the first MIN_MATCH bytes of str are valid
* (except for the last MIN_MATCH-1 bytes of the input file).
*/
#define INSERT_STRING(s, str, match_head) \
(UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
s->head[s->ins_h] = (Pos)(str))
/* ===========================================================================
* Initialize the hash table (avoiding 64K overflow for 16 bit systems).
* prev[] will be initialized on the fly.
*/
#define CLEAR_HASH(s) \
s->head[s->hash_size-1] = NIL; \
memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head));
/* ========================================================================= */
int zlib_deflateInit2(
z_streamp strm,
int level,
int method,
int windowBits,
int memLevel,
int strategy
)
{
deflate_state *s;
int noheader = 0;
deflate_workspace *mem;
char *next;
ush *overlay;
/* We overlay pending_buf and d_buf+l_buf. This works since the average
* output size for (length,distance) codes is <= 24 bits.
*/
if (strm == NULL) return Z_STREAM_ERROR;
strm->msg = NULL;
if (level == Z_DEFAULT_COMPRESSION) level = 6;
mem = (deflate_workspace *) strm->workspace;
if (windowBits < 0) { /* undocumented feature: suppress zlib header */
noheader = 1;
windowBits = -windowBits;
}
if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
return Z_STREAM_ERROR;
}
/*
* Direct the workspace's pointers to the chunks that were allocated
* along with the deflate_workspace struct.
*/
next = (char *) mem;
next += sizeof(*mem);
#ifdef CONFIG_ZLIB_DFLTCC
/*
* DFLTCC requires the window to be page aligned.
* Thus, we overallocate and take the aligned portion of the buffer.
*/
mem->window_memory = (Byte *) PTR_ALIGN(next, PAGE_SIZE);
#else
mem->window_memory = (Byte *) next;
#endif
next += zlib_deflate_window_memsize(windowBits);
mem->prev_memory = (Pos *) next;
next += zlib_deflate_prev_memsize(windowBits);
mem->head_memory = (Pos *) next;
next += zlib_deflate_head_memsize(memLevel);
mem->overlay_memory = next;
s = (deflate_state *) &(mem->deflate_memory);
strm->state = (struct internal_state *)s;
s->strm = strm;
s->noheader = noheader;
s->w_bits = windowBits;
s->w_size = 1 << s->w_bits;
s->w_mask = s->w_size - 1;
s->hash_bits = memLevel + 7;
s->hash_size = 1 << s->hash_bits;
s->hash_mask = s->hash_size - 1;
s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
s->window = (Byte *) mem->window_memory;
s->prev = (Pos *) mem->prev_memory;
s->head = (Pos *) mem->head_memory;
s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
overlay = (ush *) mem->overlay_memory;
s->pending_buf = (uch *) overlay;
s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
s->level = level;
s->strategy = strategy;
s->method = (Byte)method;
return zlib_deflateReset(strm);
}
/* ========================================================================= */
int zlib_deflateReset(
z_streamp strm
)
{
deflate_state *s;
if (strm == NULL || strm->state == NULL)
return Z_STREAM_ERROR;
strm->total_in = strm->total_out = 0;
strm->msg = NULL;
strm->data_type = Z_UNKNOWN;
s = (deflate_state *)strm->state;
s->pending = 0;
s->pending_out = s->pending_buf;
if (s->noheader < 0) {
s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
}
s->status = s->noheader ? BUSY_STATE : INIT_STATE;
strm->adler = 1;
s->last_flush = Z_NO_FLUSH;
zlib_tr_init(s);
lm_init(s);
DEFLATE_RESET_HOOK(strm);
return Z_OK;
}
/* =========================================================================
* Put a short in the pending buffer. The 16-bit value is put in MSB order.
* IN assertion: the stream state is correct and there is enough room in
* pending_buf.
*/
static void putShortMSB(
deflate_state *s,
uInt b
)
{
put_byte(s, (Byte)(b >> 8));
put_byte(s, (Byte)(b & 0xff));
}
/* ========================================================================= */
int zlib_deflate(
z_streamp strm,
int flush
)
{
int old_flush; /* value of flush param for previous deflate call */
deflate_state *s;
if (strm == NULL || strm->state == NULL ||
flush > Z_FINISH || flush < 0) {
return Z_STREAM_ERROR;
}
s = (deflate_state *) strm->state;
if ((strm->next_in == NULL && strm->avail_in != 0) ||
(s->status == FINISH_STATE && flush != Z_FINISH)) {
return Z_STREAM_ERROR;
}
if (strm->avail_out == 0) return Z_BUF_ERROR;
s->strm = strm; /* just in case */
old_flush = s->last_flush;
s->last_flush = flush;
/* Write the zlib header */
if (s->status == INIT_STATE) {
uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
uInt level_flags = (s->level-1) >> 1;
if (level_flags > 3) level_flags = 3;
header |= (level_flags << 6);
if (s->strstart != 0) header |= PRESET_DICT;
header += 31 - (header % 31);
s->status = BUSY_STATE;
putShortMSB(s, header);
/* Save the adler32 of the preset dictionary: */
if (s->strstart != 0) {
putShortMSB(s, (uInt)(strm->adler >> 16));
putShortMSB(s, (uInt)(strm->adler & 0xffff));
}
strm->adler = 1L;
}
/* Flush as much pending output as possible */
if (s->pending != 0) {
flush_pending(strm);
if (strm->avail_out == 0) {
/* Since avail_out is 0, deflate will be called again with
* more output space, but possibly with both pending and
* avail_in equal to zero. There won't be anything to do,
* but this is not an error situation so make sure we
* return OK instead of BUF_ERROR at next call of deflate:
*/
s->last_flush = -1;
return Z_OK;
}
/* Make sure there is something to do and avoid duplicate consecutive
* flushes. For repeated and useless calls with Z_FINISH, we keep
* returning Z_STREAM_END instead of Z_BUFF_ERROR.
*/
} else if (strm->avail_in == 0 && flush <= old_flush &&
flush != Z_FINISH) {
return Z_BUF_ERROR;
}
/* User must not provide more input after the first FINISH: */
if (s->status == FINISH_STATE && strm->avail_in != 0) {
return Z_BUF_ERROR;
}
/* Start a new block or continue the current one.
*/
if (strm->avail_in != 0 || s->lookahead != 0 ||
(flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
block_state bstate;
bstate = DEFLATE_HOOK(strm, flush, &bstate) ? bstate :
(*(configuration_table[s->level].func))(s, flush);
if (bstate == finish_started || bstate == finish_done) {
s->status = FINISH_STATE;
}
if (bstate == need_more || bstate == finish_started) {
if (strm->avail_out == 0) {
s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
}
return Z_OK;
/* If flush != Z_NO_FLUSH && avail_out == 0, the next call
* of deflate should use the same flush parameter to make sure
* that the flush is complete. So we don't have to output an
* empty block here, this will be done at next call. This also
* ensures that for a very small output buffer, we emit at most
* one empty block.
*/
}
if (bstate == block_done) {
if (flush == Z_PARTIAL_FLUSH) {
zlib_tr_align(s);
} else if (flush == Z_PACKET_FLUSH) {
/* Output just the 3-bit `stored' block type value,
but not a zero length. */
zlib_tr_stored_type_only(s);
} else { /* FULL_FLUSH or SYNC_FLUSH */
zlib_tr_stored_block(s, (char*)0, 0L, 0);
/* For a full flush, this empty block will be recognized
* as a special marker by inflate_sync().
*/
if (flush == Z_FULL_FLUSH) {
CLEAR_HASH(s); /* forget history */
}
}
flush_pending(strm);
if (strm->avail_out == 0) {
s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
return Z_OK;
}
}
}
Assert(strm->avail_out > 0, "bug2");
if (flush != Z_FINISH) return Z_OK;
if (!s->noheader) {
/* Write zlib trailer (adler32) */
putShortMSB(s, (uInt)(strm->adler >> 16));
putShortMSB(s, (uInt)(strm->adler & 0xffff));
}
flush_pending(strm);
/* If avail_out is zero, the application will call deflate again
* to flush the rest.
*/
if (!s->noheader) {
s->noheader = -1; /* write the trailer only once! */
}
if (s->pending == 0) {
Assert(s->bi_valid == 0, "bi_buf not flushed");
return Z_STREAM_END;
}
return Z_OK;
}
/* ========================================================================= */
int zlib_deflateEnd(
z_streamp strm
)
{
int status;
deflate_state *s;
if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR;
s = (deflate_state *) strm->state;
status = s->status;
if (status != INIT_STATE && status != BUSY_STATE &&
status != FINISH_STATE) {
return Z_STREAM_ERROR;
}
strm->state = NULL;
return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
}
/* ===========================================================================
* Read a new buffer from the current input stream, update the adler32
* and total number of bytes read. All deflate() input goes through
* this function so some applications may wish to modify it to avoid
* allocating a large strm->next_in buffer and copying from it.
* (See also flush_pending()).
*/
static int read_buf(
z_streamp strm,
Byte *buf,
unsigned size
)
{
unsigned len = strm->avail_in;
if (len > size) len = size;
if (len == 0) return 0;
strm->avail_in -= len;
if (!DEFLATE_NEED_CHECKSUM(strm)) {}
else if (!((deflate_state *)(strm->state))->noheader) {
strm->adler = zlib_adler32(strm->adler, strm->next_in, len);
}
memcpy(buf, strm->next_in, len);
strm->next_in += len;
strm->total_in += len;
return (int)len;
}
/* ===========================================================================
* Initialize the "longest match" routines for a new zlib stream
*/
static void lm_init(
deflate_state *s
)
{
s->window_size = (ulg)2L*s->w_size;
CLEAR_HASH(s);
/* Set the default configuration parameters:
*/
s->max_lazy_match = configuration_table[s->level].max_lazy;
s->good_match = configuration_table[s->level].good_length;
s->nice_match = configuration_table[s->level].nice_length;
s->max_chain_length = configuration_table[s->level].max_chain;
s->strstart = 0;
s->block_start = 0L;
s->lookahead = 0;
s->match_length = s->prev_length = MIN_MATCH-1;
s->match_available = 0;
s->ins_h = 0;
}
/* ===========================================================================
* Set match_start to the longest match starting at the given string and
* return its length. Matches shorter or equal to prev_length are discarded,
* in which case the result is equal to prev_length and match_start is
* garbage.
* IN assertions: cur_match is the head of the hash chain for the current
* string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
* OUT assertion: the match length is not greater than s->lookahead.
*/
/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
* match.S. The code will be functionally equivalent.
*/
static uInt longest_match(
deflate_state *s,
IPos cur_match /* current match */
)
{
unsigned chain_length = s->max_chain_length;/* max hash chain length */
register Byte *scan = s->window + s->strstart; /* current string */
register Byte *match; /* matched string */
register int len; /* length of current match */
int best_len = s->prev_length; /* best match length so far */
int nice_match = s->nice_match; /* stop if match long enough */
IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
s->strstart - (IPos)MAX_DIST(s) : NIL;
/* Stop when cur_match becomes <= limit. To simplify the code,
* we prevent matches with the string of window index 0.
*/
Pos *prev = s->prev;
uInt wmask = s->w_mask;
#ifdef UNALIGNED_OK
/* Compare two bytes at a time. Note: this is not always beneficial.
* Try with and without -DUNALIGNED_OK to check.
*/
register Byte *strend = s->window + s->strstart + MAX_MATCH - 1;
register ush scan_start = *(ush*)scan;
register ush scan_end = *(ush*)(scan+best_len-1);
#else
register Byte *strend = s->window + s->strstart + MAX_MATCH;
register Byte scan_end1 = scan[best_len-1];
register Byte scan_end = scan[best_len];
#endif
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
* It is easy to get rid of this optimization if necessary.
*/
Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
/* Do not waste too much time if we already have a good match: */
if (s->prev_length >= s->good_match) {
chain_length >>= 2;
}
/* Do not look for matches beyond the end of the input. This is necessary
* to make deflate deterministic.
*/
if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
do {
Assert(cur_match < s->strstart, "no future");
match = s->window + cur_match;
/* Skip to next match if the match length cannot increase
* or if the match length is less than 2:
*/
#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
/* This code assumes sizeof(unsigned short) == 2. Do not use
* UNALIGNED_OK if your compiler uses a different size.
*/
if (*(ush*)(match+best_len-1) != scan_end ||
*(ush*)match != scan_start) continue;
/* It is not necessary to compare scan[2] and match[2] since they are
* always equal when the other bytes match, given that the hash keys
* are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
* strstart+3, +5, ... up to strstart+257. We check for insufficient
* lookahead only every 4th comparison; the 128th check will be made
* at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
* necessary to put more guard bytes at the end of the window, or
* to check more often for insufficient lookahead.
*/
Assert(scan[2] == match[2], "scan[2]?");
scan++, match++;
do {
} while (*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
*(ush*)(scan+=2) == *(ush*)(match+=2) &&
scan < strend);
/* The funny "do {}" generates better code on most compilers */
/* Here, scan <= window+strstart+257 */
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
if (*scan == *match) scan++;
len = (MAX_MATCH - 1) - (int)(strend-scan);
scan = strend - (MAX_MATCH-1);
#else /* UNALIGNED_OK */
if (match[best_len] != scan_end ||
match[best_len-1] != scan_end1 ||
*match != *scan ||
*++match != scan[1]) continue;
/* The check at best_len-1 can be removed because it will be made
* again later. (This heuristic is not always a win.)
* It is not necessary to compare scan[2] and match[2] since they
* are always equal when the other bytes match, given that
* the hash keys are equal and that HASH_BITS >= 8.
*/
scan += 2, match++;
Assert(*scan == *match, "match[2]?");
/* We check for insufficient lookahead only every 8th comparison;
* the 256th check will be made at strstart+258.
*/
do {
} while (*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
*++scan == *++match && *++scan == *++match &&
scan < strend);
Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
len = MAX_MATCH - (int)(strend - scan);
scan = strend - MAX_MATCH;
#endif /* UNALIGNED_OK */
if (len > best_len) {
s->match_start = cur_match;
best_len = len;
if (len >= nice_match) break;
#ifdef UNALIGNED_OK
scan_end = *(ush*)(scan+best_len-1);
#else
scan_end1 = scan[best_len-1];
scan_end = scan[best_len];
#endif
}
} while ((cur_match = prev[cur_match & wmask]) > limit
&& --chain_length != 0);
if ((uInt)best_len <= s->lookahead) return best_len;
return s->lookahead;
}
#ifdef DEBUG_ZLIB
/* ===========================================================================
* Check that the match at match_start is indeed a match.
*/
static void check_match(
deflate_state *s,
IPos start,
IPos match,
int length
)
{
/* check that the match is indeed a match */
if (memcmp((char *)s->window + match,
(char *)s->window + start, length) != EQUAL) {
fprintf(stderr, " start %u, match %u, length %d\n",
start, match, length);
do {
fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
} while (--length != 0);
z_error("invalid match");
}
if (z_verbose > 1) {
fprintf(stderr,"\\[%d,%d]", start-match, length);
do { putc(s->window[start++], stderr); } while (--length != 0);
}
}
#else
# define check_match(s, start, match, length)
#endif
/* ===========================================================================
* Fill the window when the lookahead becomes insufficient.
* Updates strstart and lookahead.
*
* IN assertion: lookahead < MIN_LOOKAHEAD
* OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
* At least one byte has been read, or avail_in == 0; reads are
* performed for at least two bytes (required for the zip translate_eol
* option -- not supported here).
*/
static void fill_window(
deflate_state *s
)
{
register unsigned n, m;
register Pos *p;
unsigned more; /* Amount of free space at the end of the window. */
uInt wsize = s->w_size;
do {
more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
/* Deal with !@#$% 64K limit: */
if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
more = wsize;
} else if (more == (unsigned)(-1)) {
/* Very unlikely, but possible on 16 bit machine if strstart == 0
* and lookahead == 1 (input done one byte at time)
*/
more--;
/* If the window is almost full and there is insufficient lookahead,
* move the upper half to the lower one to make room in the upper half.
*/
} else if (s->strstart >= wsize+MAX_DIST(s)) {
memcpy((char *)s->window, (char *)s->window+wsize,
(unsigned)wsize);
s->match_start -= wsize;
s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
s->block_start -= (long) wsize;
/* Slide the hash table (could be avoided with 32 bit values
at the expense of memory usage). We slide even when level == 0
to keep the hash table consistent if we switch back to level > 0
later. (Using level 0 permanently is not an optimal usage of
zlib, so we don't care about this pathological case.)
*/
n = s->hash_size;
p = &s->head[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m-wsize : NIL);
} while (--n);
n = wsize;
p = &s->prev[n];
do {
m = *--p;
*p = (Pos)(m >= wsize ? m-wsize : NIL);
/* If n is not on any hash chain, prev[n] is garbage but
* its value will never be used.
*/
} while (--n);
more += wsize;
}
if (s->strm->avail_in == 0) return;
/* If there was no sliding:
* strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
* more == window_size - lookahead - strstart
* => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
* => more >= window_size - 2*WSIZE + 2
* In the BIG_MEM or MMAP case (not yet supported),
* window_size == input_size + MIN_LOOKAHEAD &&
* strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
* Otherwise, window_size == 2*WSIZE so more >= 2.
* If there was sliding, more >= WSIZE. So in all cases, more >= 2.
*/
Assert(more >= 2, "more < 2");
n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
s->lookahead += n;
/* Initialize the hash value now that we have some input: */
if (s->lookahead >= MIN_MATCH) {
s->ins_h = s->window[s->strstart];
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
}
/* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
* but this is not important since only literal bytes will be emitted.
*/
} while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
}
/* ===========================================================================
* Flush the current block, with given end-of-file flag.
* IN assertion: strstart is set to the end of the current match.
*/
#define FLUSH_BLOCK_ONLY(s, eof) { \
zlib_tr_flush_block(s, (s->block_start >= 0L ? \
(char *)&s->window[(unsigned)s->block_start] : \
NULL), \
(ulg)((long)s->strstart - s->block_start), \
(eof)); \
s->block_start = s->strstart; \
flush_pending(s->strm); \
Tracev((stderr,"[FLUSH]")); \
}
/* Same but force premature exit if necessary. */
#define FLUSH_BLOCK(s, eof) { \
FLUSH_BLOCK_ONLY(s, eof); \
if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
}
/* ===========================================================================
* Copy without compression as much as possible from the input stream, return
* the current block state.
* This function does not insert new strings in the dictionary since
* uncompressible data is probably not useful. This function is used
* only for the level=0 compression option.
* NOTE: this function should be optimized to avoid extra copying from
* window to pending_buf.
*/
static block_state deflate_stored(
deflate_state *s,
int flush
)
{
/* Stored blocks are limited to 0xffff bytes, pending_buf is limited
* to pending_buf_size, and each stored block has a 5 byte header:
*/
ulg max_block_size = 0xffff;
ulg max_start;
if (max_block_size > s->pending_buf_size - 5) {
max_block_size = s->pending_buf_size - 5;
}
/* Copy as much as possible from input to output: */
for (;;) {
/* Fill the window as much as possible: */
if (s->lookahead <= 1) {
Assert(s->strstart < s->w_size+MAX_DIST(s) ||
s->block_start >= (long)s->w_size, "slide too late");
fill_window(s);
if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
if (s->lookahead == 0) break; /* flush the current block */
}
Assert(s->block_start >= 0L, "block gone");
s->strstart += s->lookahead;
s->lookahead = 0;
/* Emit a stored block if pending_buf will be full: */
max_start = s->block_start + max_block_size;
if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
/* strstart == 0 is possible when wraparound on 16-bit machine */
s->lookahead = (uInt)(s->strstart - max_start);
s->strstart = (uInt)max_start;
FLUSH_BLOCK(s, 0);
}
/* Flush if we may have to slide, otherwise block_start may become
* negative and the data will be gone:
*/
if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
FLUSH_BLOCK(s, 0);
}
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
/* ===========================================================================
* Compress as much as possible from the input stream, return the current
* block state.
* This function does not perform lazy evaluation of matches and inserts
* new strings in the dictionary only for unmatched strings or for short
* matches. It is used only for the fast compression options.
*/
static block_state deflate_fast(
deflate_state *s,
int flush
)
{
IPos hash_head = NIL; /* head of the hash chain */
int bflush; /* set if current block must be flushed */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the next match, plus MIN_MATCH bytes to insert the
* string following the next match.
*/
if (s->lookahead < MIN_LOOKAHEAD) {
fill_window(s);
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
return need_more;
}
if (s->lookahead == 0) break; /* flush the current block */
}
/* Insert the string window[strstart .. strstart+2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
if (s->lookahead >= MIN_MATCH) {
INSERT_STRING(s, s->strstart, hash_head);
}
/* Find the longest match, discarding those <= prev_length.
* At this point we have always match_length < MIN_MATCH
*/
if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
if (s->strategy != Z_HUFFMAN_ONLY) {
s->match_length = longest_match (s, hash_head);
}
/* longest_match() sets match_start */
}
if (s->match_length >= MIN_MATCH) {
check_match(s, s->strstart, s->match_start, s->match_length);
bflush = zlib_tr_tally(s, s->strstart - s->match_start,
s->match_length - MIN_MATCH);
s->lookahead -= s->match_length;
/* Insert new strings in the hash table only if the match length
* is not too large. This saves time but degrades compression.
*/
if (s->match_length <= s->max_insert_length &&
s->lookahead >= MIN_MATCH) {
s->match_length--; /* string at strstart already in hash table */
do {
s->strstart++;
INSERT_STRING(s, s->strstart, hash_head);
/* strstart never exceeds WSIZE-MAX_MATCH, so there are
* always MIN_MATCH bytes ahead.
*/
} while (--s->match_length != 0);
s->strstart++;
} else {
s->strstart += s->match_length;
s->match_length = 0;
s->ins_h = s->window[s->strstart];
UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
#if MIN_MATCH != 3
Call UPDATE_HASH() MIN_MATCH-3 more times
#endif
/* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
* matter since it will be recomputed at next deflate call.
*/
}
} else {
/* No match, output a literal byte */
Tracevv((stderr,"%c", s->window[s->strstart]));
bflush = zlib_tr_tally (s, 0, s->window[s->strstart]);
s->lookahead--;
s->strstart++;
}
if (bflush) FLUSH_BLOCK(s, 0);
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
/* ===========================================================================
* Same as above, but achieves better compression. We use a lazy
* evaluation for matches: a match is finally adopted only if there is
* no better match at the next window position.
*/
static block_state deflate_slow(
deflate_state *s,
int flush
)
{
IPos hash_head = NIL; /* head of hash chain */
int bflush; /* set if current block must be flushed */
/* Process the input block. */
for (;;) {
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need MAX_MATCH bytes
* for the next match, plus MIN_MATCH bytes to insert the
* string following the next match.
*/
if (s->lookahead < MIN_LOOKAHEAD) {
fill_window(s);
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
return need_more;
}
if (s->lookahead == 0) break; /* flush the current block */
}
/* Insert the string window[strstart .. strstart+2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
if (s->lookahead >= MIN_MATCH) {
INSERT_STRING(s, s->strstart, hash_head);
}
/* Find the longest match, discarding those <= prev_length.
*/
s->prev_length = s->match_length, s->prev_match = s->match_start;
s->match_length = MIN_MATCH-1;
if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
s->strstart - hash_head <= MAX_DIST(s)) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
if (s->strategy != Z_HUFFMAN_ONLY) {
s->match_length = longest_match (s, hash_head);
}
/* longest_match() sets match_start */
if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
(s->match_length == MIN_MATCH &&
s->strstart - s->match_start > TOO_FAR))) {
/* If prev_match is also MIN_MATCH, match_start is garbage
* but we will ignore the current match anyway.
*/
s->match_length = MIN_MATCH-1;
}
}
/* If there was a match at the previous step and the current
* match is not better, output the previous match:
*/
if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
check_match(s, s->strstart-1, s->prev_match, s->prev_length);
bflush = zlib_tr_tally(s, s->strstart -1 - s->prev_match,
s->prev_length - MIN_MATCH);
/* Insert in hash table all strings up to the end of the match.
* strstart-1 and strstart are already inserted. If there is not
* enough lookahead, the last two strings are not inserted in
* the hash table.
*/
s->lookahead -= s->prev_length-1;
s->prev_length -= 2;
do {
if (++s->strstart <= max_insert) {
INSERT_STRING(s, s->strstart, hash_head);
}
} while (--s->prev_length != 0);
s->match_available = 0;
s->match_length = MIN_MATCH-1;
s->strstart++;
if (bflush) FLUSH_BLOCK(s, 0);
} else if (s->match_available) {
/* If there was no match at the previous position, output a
* single literal. If there was a match but the current match
* is longer, truncate the previous match to a single literal.
*/
Tracevv((stderr,"%c", s->window[s->strstart-1]));
if (zlib_tr_tally (s, 0, s->window[s->strstart-1])) {
FLUSH_BLOCK_ONLY(s, 0);
}
s->strstart++;
s->lookahead--;
if (s->strm->avail_out == 0) return need_more;
} else {
/* There is no previous match to compare with, wait for
* the next step to decide.
*/
s->match_available = 1;
s->strstart++;
s->lookahead--;
}
}
Assert (flush != Z_NO_FLUSH, "no flush?");
if (s->match_available) {
Tracevv((stderr,"%c", s->window[s->strstart-1]));
zlib_tr_tally (s, 0, s->window[s->strstart-1]);
s->match_available = 0;
}
FLUSH_BLOCK(s, flush == Z_FINISH);
return flush == Z_FINISH ? finish_done : block_done;
}
int zlib_deflate_workspacesize(int windowBits, int memLevel)
{
if (windowBits < 0) /* undocumented feature: suppress zlib header */
windowBits = -windowBits;
/* Since the return value is typically passed to vmalloc() unchecked... */
BUG_ON(memLevel < 1 || memLevel > MAX_MEM_LEVEL || windowBits < 9 ||
windowBits > 15);
return sizeof(deflate_workspace)
+ zlib_deflate_window_memsize(windowBits)
+ zlib_deflate_prev_memsize(windowBits)
+ zlib_deflate_head_memsize(memLevel)
+ zlib_deflate_overlay_memsize(memLevel);
}
int zlib_deflate_dfltcc_enabled(void)
{
return DEFLATE_DFLTCC_ENABLED();
}
| linux-master | lib/zlib_deflate/deflate.c |
/* +++ trees.c */
/* trees.c -- output deflated data using Huffman coding
* Copyright (C) 1995-1996 Jean-loup Gailly
* For conditions of distribution and use, see copyright notice in zlib.h
*/
/*
* ALGORITHM
*
* The "deflation" process uses several Huffman trees. The more
* common source values are represented by shorter bit sequences.
*
* Each code tree is stored in a compressed form which is itself
* a Huffman encoding of the lengths of all the code strings (in
* ascending order by source values). The actual code strings are
* reconstructed from the lengths in the inflate process, as described
* in the deflate specification.
*
* REFERENCES
*
* Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
* Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
*
* Storer, James A.
* Data Compression: Methods and Theory, pp. 49-50.
* Computer Science Press, 1988. ISBN 0-7167-8156-5.
*
* Sedgewick, R.
* Algorithms, p290.
* Addison-Wesley, 1983. ISBN 0-201-06672-6.
*/
/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
/* #include "deflate.h" */
#include <linux/zutil.h>
#include <linux/bitrev.h>
#include "defutil.h"
#ifdef DEBUG_ZLIB
# include <ctype.h>
#endif
/* ===========================================================================
* Constants
*/
#define MAX_BL_BITS 7
/* Bit length codes must not exceed MAX_BL_BITS bits */
#define END_BLOCK 256
/* end of block literal code */
#define REP_3_6 16
/* repeat previous bit length 3-6 times (2 bits of repeat count) */
#define REPZ_3_10 17
/* repeat a zero length 3-10 times (3 bits of repeat count) */
#define REPZ_11_138 18
/* repeat a zero length 11-138 times (7 bits of repeat count) */
static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
= {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
static const int extra_dbits[D_CODES] /* extra bits for each distance code */
= {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
static const int extra_blbits[BL_CODES]/* extra bits for each bit length code */
= {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
static const uch bl_order[BL_CODES]
= {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
/* The lengths of the bit length codes are sent in order of decreasing
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
/* ===========================================================================
* Local data. These are initialized only once.
*/
static ct_data static_ltree[L_CODES+2];
/* The static literal tree. Since the bit lengths are imposed, there is no
* need for the L_CODES extra codes used during heap construction. However
* The codes 286 and 287 are needed to build a canonical tree (see zlib_tr_init
* below).
*/
static ct_data static_dtree[D_CODES];
/* The static distance tree. (Actually a trivial tree since all codes use
* 5 bits.)
*/
static uch dist_code[512];
/* distance codes. The first 256 values correspond to the distances
* 3 .. 258, the last 256 values correspond to the top 8 bits of
* the 15 bit distances.
*/
static uch length_code[MAX_MATCH-MIN_MATCH+1];
/* length code for each normalized match length (0 == MIN_MATCH) */
static int base_length[LENGTH_CODES];
/* First normalized length for each code (0 = MIN_MATCH) */
static int base_dist[D_CODES];
/* First normalized distance for each code (0 = distance of 1) */
struct static_tree_desc_s {
const ct_data *static_tree; /* static tree or NULL */
const int *extra_bits; /* extra bits for each code or NULL */
int extra_base; /* base index for extra_bits */
int elems; /* max number of elements in the tree */
int max_length; /* max bit length for the codes */
};
static static_tree_desc static_l_desc =
{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
static static_tree_desc static_d_desc =
{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
static static_tree_desc static_bl_desc =
{(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
/* ===========================================================================
* Local (static) routines in this file.
*/
static void tr_static_init (void);
static void init_block (deflate_state *s);
static void pqdownheap (deflate_state *s, ct_data *tree, int k);
static void gen_bitlen (deflate_state *s, tree_desc *desc);
static void gen_codes (ct_data *tree, int max_code, ush *bl_count);
static void build_tree (deflate_state *s, tree_desc *desc);
static void scan_tree (deflate_state *s, ct_data *tree, int max_code);
static void send_tree (deflate_state *s, ct_data *tree, int max_code);
static int build_bl_tree (deflate_state *s);
static void send_all_trees (deflate_state *s, int lcodes, int dcodes,
int blcodes);
static void compress_block (deflate_state *s, ct_data *ltree,
ct_data *dtree);
static void set_data_type (deflate_state *s);
static void bi_flush (deflate_state *s);
static void copy_block (deflate_state *s, char *buf, unsigned len,
int header);
#ifndef DEBUG_ZLIB
# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
/* Send a code of the given tree. c and tree must not have side effects */
#else /* DEBUG_ZLIB */
# define send_code(s, c, tree) \
{ if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
send_bits(s, tree[c].Code, tree[c].Len); }
#endif
#define d_code(dist) \
((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
/* Mapping from a distance to a distance code. dist is the distance - 1 and
* must not have side effects. dist_code[256] and dist_code[257] are never
* used.
*/
/* ===========================================================================
* Initialize the various 'constant' tables. In a multi-threaded environment,
* this function may be called by two threads concurrently, but this is
* harmless since both invocations do exactly the same thing.
*/
static void tr_static_init(void)
{
static int static_init_done;
int n; /* iterates over tree elements */
int bits; /* bit counter */
int length; /* length value */
int code; /* code value */
int dist; /* distance index */
ush bl_count[MAX_BITS+1];
/* number of codes at each bit length for an optimal tree */
if (static_init_done) return;
/* Initialize the mapping length (0..255) -> length code (0..28) */
length = 0;
for (code = 0; code < LENGTH_CODES-1; code++) {
base_length[code] = length;
for (n = 0; n < (1<<extra_lbits[code]); n++) {
length_code[length++] = (uch)code;
}
}
Assert (length == 256, "tr_static_init: length != 256");
/* Note that the length 255 (match length 258) can be represented
* in two different ways: code 284 + 5 bits or code 285, so we
* overwrite length_code[255] to use the best encoding:
*/
length_code[length-1] = (uch)code;
/* Initialize the mapping dist (0..32K) -> dist code (0..29) */
dist = 0;
for (code = 0 ; code < 16; code++) {
base_dist[code] = dist;
for (n = 0; n < (1<<extra_dbits[code]); n++) {
dist_code[dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: dist != 256");
dist >>= 7; /* from now on, all distances are divided by 128 */
for ( ; code < D_CODES; code++) {
base_dist[code] = dist << 7;
for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
dist_code[256 + dist++] = (uch)code;
}
}
Assert (dist == 256, "tr_static_init: 256+dist != 512");
/* Construct the codes of the static literal tree */
for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
n = 0;
while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
/* Codes 286 and 287 do not exist, but we must include them in the
* tree construction to get a canonical Huffman tree (longest code
* all ones)
*/
gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
/* The static distance tree is trivial: */
for (n = 0; n < D_CODES; n++) {
static_dtree[n].Len = 5;
static_dtree[n].Code = bitrev32((u32)n) >> (32 - 5);
}
static_init_done = 1;
}
/* ===========================================================================
* Initialize the tree data structures for a new zlib stream.
*/
void zlib_tr_init(
deflate_state *s
)
{
tr_static_init();
s->compressed_len = 0L;
s->l_desc.dyn_tree = s->dyn_ltree;
s->l_desc.stat_desc = &static_l_desc;
s->d_desc.dyn_tree = s->dyn_dtree;
s->d_desc.stat_desc = &static_d_desc;
s->bl_desc.dyn_tree = s->bl_tree;
s->bl_desc.stat_desc = &static_bl_desc;
s->bi_buf = 0;
s->bi_valid = 0;
s->last_eob_len = 8; /* enough lookahead for inflate */
#ifdef DEBUG_ZLIB
s->bits_sent = 0L;
#endif
/* Initialize the first block of the first file: */
init_block(s);
}
/* ===========================================================================
* Initialize a new block.
*/
static void init_block(
deflate_state *s
)
{
int n; /* iterates over tree elements */
/* Initialize the trees. */
for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
s->dyn_ltree[END_BLOCK].Freq = 1;
s->opt_len = s->static_len = 0L;
s->last_lit = s->matches = 0;
}
#define SMALLEST 1
/* Index within the heap array of least frequent node in the Huffman tree */
/* ===========================================================================
* Remove the smallest element from the heap and recreate the heap with
* one less element. Updates heap and heap_len.
*/
#define pqremove(s, tree, top) \
{\
top = s->heap[SMALLEST]; \
s->heap[SMALLEST] = s->heap[s->heap_len--]; \
pqdownheap(s, tree, SMALLEST); \
}
/* ===========================================================================
* Compares to subtrees, using the tree depth as tie breaker when
* the subtrees have equal frequency. This minimizes the worst case length.
*/
#define smaller(tree, n, m, depth) \
(tree[n].Freq < tree[m].Freq || \
(tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
/* ===========================================================================
* Restore the heap property by moving down the tree starting at node k,
* exchanging a node with the smallest of its two sons if necessary, stopping
* when the heap property is re-established (each father smaller than its
* two sons).
*/
static void pqdownheap(
deflate_state *s,
ct_data *tree, /* the tree to restore */
int k /* node to move down */
)
{
int v = s->heap[k];
int j = k << 1; /* left son of k */
while (j <= s->heap_len) {
/* Set j to the smallest of the two sons: */
if (j < s->heap_len &&
smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
j++;
}
/* Exit if v is smaller than both sons */
if (smaller(tree, v, s->heap[j], s->depth)) break;
/* Exchange v with the smallest son */
s->heap[k] = s->heap[j]; k = j;
/* And continue down the tree, setting j to the left son of k */
j <<= 1;
}
s->heap[k] = v;
}
/* ===========================================================================
* Compute the optimal bit lengths for a tree and update the total bit length
* for the current block.
* IN assertion: the fields freq and dad are set, heap[heap_max] and
* above are the tree nodes sorted by increasing frequency.
* OUT assertions: the field len is set to the optimal bit length, the
* array bl_count contains the frequencies for each bit length.
* The length opt_len is updated; static_len is also updated if stree is
* not null.
*/
static void gen_bitlen(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
int max_code = desc->max_code;
const ct_data *stree = desc->stat_desc->static_tree;
const int *extra = desc->stat_desc->extra_bits;
int base = desc->stat_desc->extra_base;
int max_length = desc->stat_desc->max_length;
int h; /* heap index */
int n, m; /* iterate over the tree elements */
int bits; /* bit length */
int xbits; /* extra bits */
ush f; /* frequency */
int overflow = 0; /* number of elements with bit length too large */
for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
/* In a first pass, compute the optimal bit lengths (which may
* overflow in the case of the bit length tree).
*/
tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
n = s->heap[h];
bits = tree[tree[n].Dad].Len + 1;
if (bits > max_length) bits = max_length, overflow++;
tree[n].Len = (ush)bits;
/* We overwrite tree[n].Dad which is no longer needed */
if (n > max_code) continue; /* not a leaf node */
s->bl_count[bits]++;
xbits = 0;
if (n >= base) xbits = extra[n-base];
f = tree[n].Freq;
s->opt_len += (ulg)f * (bits + xbits);
if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
}
if (overflow == 0) return;
Trace((stderr,"\nbit length overflow\n"));
/* This happens for example on obj2 and pic of the Calgary corpus */
/* Find the first bit length which could increase: */
do {
bits = max_length-1;
while (s->bl_count[bits] == 0) bits--;
s->bl_count[bits]--; /* move one leaf down the tree */
s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
s->bl_count[max_length]--;
/* The brother of the overflow item also moves one step up,
* but this does not affect bl_count[max_length]
*/
overflow -= 2;
} while (overflow > 0);
/* Now recompute all bit lengths, scanning in increasing frequency.
* h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
* lengths instead of fixing only the wrong ones. This idea is taken
* from 'ar' written by Haruhiko Okumura.)
*/
for (bits = max_length; bits != 0; bits--) {
n = s->bl_count[bits];
while (n != 0) {
m = s->heap[--h];
if (m > max_code) continue;
if (tree[m].Len != (unsigned) bits) {
Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
s->opt_len += ((long)bits - (long)tree[m].Len)
*(long)tree[m].Freq;
tree[m].Len = (ush)bits;
}
n--;
}
}
}
/* ===========================================================================
* Generate the codes for a given tree and bit counts (which need not be
* optimal).
* IN assertion: the array bl_count contains the bit length statistics for
* the given tree and the field len is set for all tree elements.
* OUT assertion: the field code is set for all tree elements of non
* zero code length.
*/
static void gen_codes(
ct_data *tree, /* the tree to decorate */
int max_code, /* largest code with non zero frequency */
ush *bl_count /* number of codes at each bit length */
)
{
ush next_code[MAX_BITS+1]; /* next code value for each bit length */
ush code = 0; /* running code value */
int bits; /* bit index */
int n; /* code index */
/* The distribution counts are first used to generate the code values
* without bit reversal.
*/
for (bits = 1; bits <= MAX_BITS; bits++) {
next_code[bits] = code = (code + bl_count[bits-1]) << 1;
}
/* Check that the bit counts in bl_count are consistent. The last code
* must be all ones.
*/
Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
"inconsistent bit counts");
Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
for (n = 0; n <= max_code; n++) {
int len = tree[n].Len;
if (len == 0) continue;
/* Now reverse the bits */
tree[n].Code = bitrev32((u32)(next_code[len]++)) >> (32 - len);
Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
}
}
/* ===========================================================================
* Construct one Huffman tree and assigns the code bit strings and lengths.
* Update the total bit length for the current block.
* IN assertion: the field freq is set for all tree elements.
* OUT assertions: the fields len and code are set to the optimal bit length
* and corresponding code. The length opt_len is updated; static_len is
* also updated if stree is not null. The field max_code is set.
*/
static void build_tree(
deflate_state *s,
tree_desc *desc /* the tree descriptor */
)
{
ct_data *tree = desc->dyn_tree;
const ct_data *stree = desc->stat_desc->static_tree;
int elems = desc->stat_desc->elems;
int n, m; /* iterate over heap elements */
int max_code = -1; /* largest code with non zero frequency */
int node; /* new node being created */
/* Construct the initial heap, with least frequent element in
* heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
* heap[0] is not used.
*/
s->heap_len = 0, s->heap_max = HEAP_SIZE;
for (n = 0; n < elems; n++) {
if (tree[n].Freq != 0) {
s->heap[++(s->heap_len)] = max_code = n;
s->depth[n] = 0;
} else {
tree[n].Len = 0;
}
}
/* The pkzip format requires that at least one distance code exists,
* and that at least one bit should be sent even if there is only one
* possible code. So to avoid special checks later on we force at least
* two codes of non zero frequency.
*/
while (s->heap_len < 2) {
node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
tree[node].Freq = 1;
s->depth[node] = 0;
s->opt_len--; if (stree) s->static_len -= stree[node].Len;
/* node is 0 or 1 so it does not have extra bits */
}
desc->max_code = max_code;
/* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
* establish sub-heaps of increasing lengths:
*/
for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
/* Construct the Huffman tree by repeatedly combining the least two
* frequent nodes.
*/
node = elems; /* next internal node of the tree */
do {
pqremove(s, tree, n); /* n = node of least frequency */
m = s->heap[SMALLEST]; /* m = node of next least frequency */
s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
s->heap[--(s->heap_max)] = m;
/* Create a new node father of n and m */
tree[node].Freq = tree[n].Freq + tree[m].Freq;
s->depth[node] = (uch) (max(s->depth[n], s->depth[m]) + 1);
tree[n].Dad = tree[m].Dad = (ush)node;
#ifdef DUMP_BL_TREE
if (tree == s->bl_tree) {
fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
}
#endif
/* and insert the new node in the heap */
s->heap[SMALLEST] = node++;
pqdownheap(s, tree, SMALLEST);
} while (s->heap_len >= 2);
s->heap[--(s->heap_max)] = s->heap[SMALLEST];
/* At this point, the fields freq and dad are set. We can now
* generate the bit lengths.
*/
gen_bitlen(s, (tree_desc *)desc);
/* The field len is now set, we can generate the bit codes */
gen_codes ((ct_data *)tree, max_code, s->bl_count);
}
/* ===========================================================================
* Scan a literal or distance tree to determine the frequencies of the codes
* in the bit length tree.
*/
static void scan_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
if (nextlen == 0) max_count = 138, min_count = 3;
tree[max_code+1].Len = (ush)0xffff; /* guard */
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
s->bl_tree[curlen].Freq += count;
} else if (curlen != 0) {
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
s->bl_tree[REP_3_6].Freq++;
} else if (count <= 10) {
s->bl_tree[REPZ_3_10].Freq++;
} else {
s->bl_tree[REPZ_11_138].Freq++;
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Send a literal or distance tree in compressed form, using the codes in
* bl_tree.
*/
static void send_tree(
deflate_state *s,
ct_data *tree, /* the tree to be scanned */
int max_code /* and its largest code of non zero frequency */
)
{
int n; /* iterates over all tree elements */
int prevlen = -1; /* last emitted length */
int curlen; /* length of current code */
int nextlen = tree[0].Len; /* length of next code */
int count = 0; /* repeat count of the current code */
int max_count = 7; /* max repeat count */
int min_count = 4; /* min repeat count */
/* tree[max_code+1].Len = -1; */ /* guard already set */
if (nextlen == 0) max_count = 138, min_count = 3;
for (n = 0; n <= max_code; n++) {
curlen = nextlen; nextlen = tree[n+1].Len;
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
} else if (curlen != 0) {
if (curlen != prevlen) {
send_code(s, curlen, s->bl_tree); count--;
}
Assert(count >= 3 && count <= 6, " 3_6?");
send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
} else if (count <= 10) {
send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
} else {
send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
}
count = 0; prevlen = curlen;
if (nextlen == 0) {
max_count = 138, min_count = 3;
} else if (curlen == nextlen) {
max_count = 6, min_count = 3;
} else {
max_count = 7, min_count = 4;
}
}
}
/* ===========================================================================
* Construct the Huffman tree for the bit lengths and return the index in
* bl_order of the last bit length code to send.
*/
static int build_bl_tree(
deflate_state *s
)
{
int max_blindex; /* index of last bit length code of non zero freq */
/* Determine the bit length frequencies for literal and distance trees */
scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
/* Build the bit length tree: */
build_tree(s, (tree_desc *)(&(s->bl_desc)));
/* opt_len now includes the length of the tree representations, except
* the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
*/
/* Determine the number of bit length codes to send. The pkzip format
* requires that at least 4 bit length codes be sent. (appnote.txt says
* 3 but the actual value used is 4.)
*/
for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
}
/* Update opt_len to include the bit length tree and counts */
s->opt_len += 3*(max_blindex+1) + 5+5+4;
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
s->opt_len, s->static_len));
return max_blindex;
}
/* ===========================================================================
* Send the header for a block using dynamic Huffman trees: the counts, the
* lengths of the bit length codes, the literal tree and the distance tree.
* IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
*/
static void send_all_trees(
deflate_state *s,
int lcodes, /* number of codes for each tree */
int dcodes, /* number of codes for each tree */
int blcodes /* number of codes for each tree */
)
{
int rank; /* index in bl_order */
Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
"too many codes");
Tracev((stderr, "\nbl counts: "));
send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
send_bits(s, dcodes-1, 5);
send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
for (rank = 0; rank < blcodes; rank++) {
Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
}
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
}
/* ===========================================================================
* Send a stored block
*/
void zlib_tr_stored_block(
deflate_state *s,
char *buf, /* input block */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
s->compressed_len += (stored_len + 4) << 3;
copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
}
/* Send just the `stored block' type code without any length bytes or data.
*/
void zlib_tr_stored_type_only(
deflate_state *s
)
{
send_bits(s, (STORED_BLOCK << 1), 3);
bi_windup(s);
s->compressed_len = (s->compressed_len + 3) & ~7L;
}
/* ===========================================================================
* Send one empty static block to give enough lookahead for inflate.
* This takes 10 bits, of which 7 may remain in the bit buffer.
* The current inflate code requires 9 bits of lookahead. If the
* last two codes for the previous block (real code plus EOB) were coded
* on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
* the last real code. In this case we send two empty static blocks instead
* of one. (There are no problems if the previous block is stored or fixed.)
* To simplify the code, we assume the worst case of last real code encoded
* on one bit only.
*/
void zlib_tr_align(
deflate_state *s
)
{
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
bi_flush(s);
/* Of the 10 bits for the empty block, we have already sent
* (10 - bi_valid) bits. The lookahead for the last real code (before
* the EOB of the previous block) was thus at least one plus the length
* of the EOB plus what we have just sent of the empty static block.
*/
if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
send_bits(s, STATIC_TREES<<1, 3);
send_code(s, END_BLOCK, static_ltree);
s->compressed_len += 10L;
bi_flush(s);
}
s->last_eob_len = 7;
}
/* ===========================================================================
* Determine the best encoding for the current block: dynamic trees, static
* trees or store, and output the encoded block to the zip file. This function
* returns the total compressed length for the file so far.
*/
ulg zlib_tr_flush_block(
deflate_state *s,
char *buf, /* input block, or NULL if too old */
ulg stored_len, /* length of input block */
int eof /* true if this is the last block for a file */
)
{
ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
int max_blindex = 0; /* index of last bit length code of non zero freq */
/* Build the Huffman trees unless a stored block is forced */
if (s->level > 0) {
/* Check if the file is ascii or binary */
if (s->data_type == Z_UNKNOWN) set_data_type(s);
/* Construct the literal and distance trees */
build_tree(s, (tree_desc *)(&(s->l_desc)));
Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
build_tree(s, (tree_desc *)(&(s->d_desc)));
Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
s->static_len));
/* At this point, opt_len and static_len are the total bit lengths of
* the compressed block data, excluding the tree representations.
*/
/* Build the bit length tree for the above two trees, and get the index
* in bl_order of the last bit length code to send.
*/
max_blindex = build_bl_tree(s);
/* Determine the best encoding. Compute first the block length in bytes*/
opt_lenb = (s->opt_len+3+7)>>3;
static_lenb = (s->static_len+3+7)>>3;
Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
s->last_lit));
if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
} else {
Assert(buf != (char*)0, "lost buf");
opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
}
/* If compression failed and this is the first and last block,
* and if the .zip file can be seeked (to rewrite the local header),
* the whole file is transformed into a stored file:
*/
#ifdef STORED_FILE_OK
# ifdef FORCE_STORED_FILE
if (eof && s->compressed_len == 0L) { /* force stored file */
# else
if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
# endif
/* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
if (buf == (char*)0) error ("block vanished");
copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
s->compressed_len = stored_len << 3;
s->method = STORED;
} else
#endif /* STORED_FILE_OK */
#ifdef FORCE_STORED
if (buf != (char*)0) { /* force stored block */
#else
if (stored_len+4 <= opt_lenb && buf != (char*)0) {
/* 4: two words for the lengths */
#endif
/* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
* Otherwise we can't have processed more than WSIZE input bytes since
* the last block flush, because compression would have been
* successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
* transform a block into a stored block.
*/
zlib_tr_stored_block(s, buf, stored_len, eof);
#ifdef FORCE_STATIC
} else if (static_lenb >= 0) { /* force static trees */
#else
} else if (static_lenb == opt_lenb) {
#endif
send_bits(s, (STATIC_TREES<<1)+eof, 3);
compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
s->compressed_len += 3 + s->static_len;
} else {
send_bits(s, (DYN_TREES<<1)+eof, 3);
send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
max_blindex+1);
compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
s->compressed_len += 3 + s->opt_len;
}
Assert (s->compressed_len == s->bits_sent, "bad compressed size");
init_block(s);
if (eof) {
bi_windup(s);
s->compressed_len += 7; /* align on byte boundary */
}
Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
s->compressed_len-7*eof));
return s->compressed_len >> 3;
}
/* ===========================================================================
* Save the match info and tally the frequency counts. Return true if
* the current block must be flushed.
*/
int zlib_tr_tally(
deflate_state *s,
unsigned dist, /* distance of matched string */
unsigned lc /* match length-MIN_MATCH or unmatched char (if dist==0) */
)
{
s->d_buf[s->last_lit] = (ush)dist;
s->l_buf[s->last_lit++] = (uch)lc;
if (dist == 0) {
/* lc is the unmatched char */
s->dyn_ltree[lc].Freq++;
} else {
s->matches++;
/* Here, lc is the match length - MIN_MATCH */
dist--; /* dist = match distance - 1 */
Assert((ush)dist < (ush)MAX_DIST(s) &&
(ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
(ush)d_code(dist) < (ush)D_CODES, "zlib_tr_tally: bad match");
s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
s->dyn_dtree[d_code(dist)].Freq++;
}
/* Try to guess if it is profitable to stop the current block here */
if ((s->last_lit & 0xfff) == 0 && s->level > 2) {
/* Compute an upper bound for the compressed length */
ulg out_length = (ulg)s->last_lit*8L;
ulg in_length = (ulg)((long)s->strstart - s->block_start);
int dcode;
for (dcode = 0; dcode < D_CODES; dcode++) {
out_length += (ulg)s->dyn_dtree[dcode].Freq *
(5L+extra_dbits[dcode]);
}
out_length >>= 3;
Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
s->last_lit, in_length, out_length,
100L - out_length*100L/in_length));
if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
}
return (s->last_lit == s->lit_bufsize-1);
/* We avoid equality with lit_bufsize because of wraparound at 64K
* on 16 bit machines and because stored blocks are restricted to
* 64K-1 bytes.
*/
}
/* ===========================================================================
* Send the block data compressed using the given Huffman trees
*/
static void compress_block(
deflate_state *s,
ct_data *ltree, /* literal tree */
ct_data *dtree /* distance tree */
)
{
unsigned dist; /* distance of matched string */
int lc; /* match length or unmatched char (if dist == 0) */
unsigned lx = 0; /* running index in l_buf */
unsigned code; /* the code to send */
int extra; /* number of extra bits to send */
if (s->last_lit != 0) do {
dist = s->d_buf[lx];
lc = s->l_buf[lx++];
if (dist == 0) {
send_code(s, lc, ltree); /* send a literal byte */
Tracecv(isgraph(lc), (stderr," '%c' ", lc));
} else {
/* Here, lc is the match length - MIN_MATCH */
code = length_code[lc];
send_code(s, code+LITERALS+1, ltree); /* send the length code */
extra = extra_lbits[code];
if (extra != 0) {
lc -= base_length[code];
send_bits(s, lc, extra); /* send the extra length bits */
}
dist--; /* dist is now the match distance - 1 */
code = d_code(dist);
Assert (code < D_CODES, "bad d_code");
send_code(s, code, dtree); /* send the distance code */
extra = extra_dbits[code];
if (extra != 0) {
dist -= base_dist[code];
send_bits(s, dist, extra); /* send the extra distance bits */
}
} /* literal or match pair ? */
/* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
} while (lx < s->last_lit);
send_code(s, END_BLOCK, ltree);
s->last_eob_len = ltree[END_BLOCK].Len;
}
/* ===========================================================================
* Set the data type to ASCII or BINARY, using a crude approximation:
* binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
* IN assertion: the fields freq of dyn_ltree are set and the total of all
* frequencies does not exceed 64K (to fit in an int on 16 bit machines).
*/
static void set_data_type(
deflate_state *s
)
{
int n = 0;
unsigned ascii_freq = 0;
unsigned bin_freq = 0;
while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
}
/* ===========================================================================
* Copy a stored block, storing first the length and its
* one's complement if requested.
*/
static void copy_block(
deflate_state *s,
char *buf, /* the input data */
unsigned len, /* its length */
int header /* true if block header must be written */
)
{
bi_windup(s); /* align on byte boundary */
s->last_eob_len = 8; /* enough lookahead for inflate */
if (header) {
put_short(s, (ush)len);
put_short(s, (ush)~len);
#ifdef DEBUG_ZLIB
s->bits_sent += 2*16;
#endif
}
#ifdef DEBUG_ZLIB
s->bits_sent += (ulg)len<<3;
#endif
/* bundle up the put_byte(s, *buf++) calls */
memcpy(&s->pending_buf[s->pending], buf, len);
s->pending += len;
}
| linux-master | lib/zlib_deflate/deftree.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/lib/zlib_deflate/deflate_syms.c
*
* Exported symbols for the deflate functionality.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/zlib.h>
EXPORT_SYMBOL(zlib_deflate_workspacesize);
EXPORT_SYMBOL(zlib_deflate_dfltcc_enabled);
EXPORT_SYMBOL(zlib_deflate);
EXPORT_SYMBOL(zlib_deflateInit2);
EXPORT_SYMBOL(zlib_deflateEnd);
EXPORT_SYMBOL(zlib_deflateReset);
MODULE_LICENSE("GPL");
| linux-master | lib/zlib_deflate/deflate_syms.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2019 Linaro Ltd <[email protected]>
*/
#include <crypto/aes.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <asm/unaligned.h>
/*
* Emit the sbox as volatile const to prevent the compiler from doing
* constant folding on sbox references involving fixed indexes.
*/
static volatile const u8 __cacheline_aligned aes_sbox[] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
};
static volatile const u8 __cacheline_aligned aes_inv_sbox[] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
};
extern const u8 crypto_aes_sbox[256] __alias(aes_sbox);
extern const u8 crypto_aes_inv_sbox[256] __alias(aes_inv_sbox);
EXPORT_SYMBOL(crypto_aes_sbox);
EXPORT_SYMBOL(crypto_aes_inv_sbox);
static u32 mul_by_x(u32 w)
{
u32 x = w & 0x7f7f7f7f;
u32 y = w & 0x80808080;
/* multiply by polynomial 'x' (0b10) in GF(2^8) */
return (x << 1) ^ (y >> 7) * 0x1b;
}
static u32 mul_by_x2(u32 w)
{
u32 x = w & 0x3f3f3f3f;
u32 y = w & 0x80808080;
u32 z = w & 0x40404040;
/* multiply by polynomial 'x^2' (0b100) in GF(2^8) */
return (x << 2) ^ (y >> 7) * 0x36 ^ (z >> 6) * 0x1b;
}
static u32 mix_columns(u32 x)
{
/*
* Perform the following matrix multiplication in GF(2^8)
*
* | 0x2 0x3 0x1 0x1 | | x[0] |
* | 0x1 0x2 0x3 0x1 | | x[1] |
* | 0x1 0x1 0x2 0x3 | x | x[2] |
* | 0x3 0x1 0x1 0x2 | | x[3] |
*/
u32 y = mul_by_x(x) ^ ror32(x, 16);
return y ^ ror32(x ^ y, 8);
}
static u32 inv_mix_columns(u32 x)
{
/*
* Perform the following matrix multiplication in GF(2^8)
*
* | 0xe 0xb 0xd 0x9 | | x[0] |
* | 0x9 0xe 0xb 0xd | | x[1] |
* | 0xd 0x9 0xe 0xb | x | x[2] |
* | 0xb 0xd 0x9 0xe | | x[3] |
*
* which can conveniently be reduced to
*
* | 0x2 0x3 0x1 0x1 | | 0x5 0x0 0x4 0x0 | | x[0] |
* | 0x1 0x2 0x3 0x1 | | 0x0 0x5 0x0 0x4 | | x[1] |
* | 0x1 0x1 0x2 0x3 | x | 0x4 0x0 0x5 0x0 | x | x[2] |
* | 0x3 0x1 0x1 0x2 | | 0x0 0x4 0x0 0x5 | | x[3] |
*/
u32 y = mul_by_x2(x);
return mix_columns(x ^ y ^ ror32(y, 16));
}
static __always_inline u32 subshift(u32 in[], int pos)
{
return (aes_sbox[in[pos] & 0xff]) ^
(aes_sbox[(in[(pos + 1) % 4] >> 8) & 0xff] << 8) ^
(aes_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
(aes_sbox[(in[(pos + 3) % 4] >> 24) & 0xff] << 24);
}
static __always_inline u32 inv_subshift(u32 in[], int pos)
{
return (aes_inv_sbox[in[pos] & 0xff]) ^
(aes_inv_sbox[(in[(pos + 3) % 4] >> 8) & 0xff] << 8) ^
(aes_inv_sbox[(in[(pos + 2) % 4] >> 16) & 0xff] << 16) ^
(aes_inv_sbox[(in[(pos + 1) % 4] >> 24) & 0xff] << 24);
}
static u32 subw(u32 in)
{
return (aes_sbox[in & 0xff]) ^
(aes_sbox[(in >> 8) & 0xff] << 8) ^
(aes_sbox[(in >> 16) & 0xff] << 16) ^
(aes_sbox[(in >> 24) & 0xff] << 24);
}
/**
* aes_expandkey - Expands the AES key as described in FIPS-197
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
* The expanded key size is 240 bytes (max of 14 rounds with a unique 16 bytes
* key schedule plus a 16 bytes key which is used before the first round).
* The decryption key is prepared for the "Equivalent Inverse Cipher" as
* described in FIPS-197. The first slot (16 bytes) of each key (enc or dec) is
* for the initial combination, the second slot for the first round and so on.
*/
int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
u32 kwords = key_len / sizeof(u32);
u32 rc, i, j;
int err;
err = aes_check_keylen(key_len);
if (err)
return err;
ctx->key_length = key_len;
for (i = 0; i < kwords; i++)
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
for (i = 0, rc = 1; i < 10; i++, rc = mul_by_x(rc)) {
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
rko[0] = ror32(subw(rki[kwords - 1]), 8) ^ rc ^ rki[0];
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
if (key_len == AES_KEYSIZE_192) {
if (i >= 7)
break;
rko[4] = rko[3] ^ rki[4];
rko[5] = rko[4] ^ rki[5];
} else if (key_len == AES_KEYSIZE_256) {
if (i >= 6)
break;
rko[4] = subw(rko[3]) ^ rki[4];
rko[5] = rko[4] ^ rki[5];
rko[6] = rko[5] ^ rki[6];
rko[7] = rko[6] ^ rki[7];
}
}
/*
* Generate the decryption keys for the Equivalent Inverse Cipher.
* This involves reversing the order of the round keys, and applying
* the Inverse Mix Columns transformation to all but the first and
* the last one.
*/
ctx->key_dec[0] = ctx->key_enc[key_len + 24];
ctx->key_dec[1] = ctx->key_enc[key_len + 25];
ctx->key_dec[2] = ctx->key_enc[key_len + 26];
ctx->key_dec[3] = ctx->key_enc[key_len + 27];
for (i = 4, j = key_len + 20; j > 0; i += 4, j -= 4) {
ctx->key_dec[i] = inv_mix_columns(ctx->key_enc[j]);
ctx->key_dec[i + 1] = inv_mix_columns(ctx->key_enc[j + 1]);
ctx->key_dec[i + 2] = inv_mix_columns(ctx->key_enc[j + 2]);
ctx->key_dec[i + 3] = inv_mix_columns(ctx->key_enc[j + 3]);
}
ctx->key_dec[i] = ctx->key_enc[0];
ctx->key_dec[i + 1] = ctx->key_enc[1];
ctx->key_dec[i + 2] = ctx->key_enc[2];
ctx->key_dec[i + 3] = ctx->key_enc[3];
return 0;
}
EXPORT_SYMBOL(aes_expandkey);
/**
* aes_encrypt - Encrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the ciphertext
* @in: Buffer containing the plaintext
*/
void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
{
const u32 *rkp = ctx->key_enc + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
/*
* Force the compiler to emit data independent Sbox references,
* by xoring the input with Sbox values that are known to add up
* to zero. This pulls the entire Sbox into the D-cache before any
* data dependent lookups are done.
*/
st0[0] ^= aes_sbox[ 0] ^ aes_sbox[ 64] ^ aes_sbox[134] ^ aes_sbox[195];
st0[1] ^= aes_sbox[16] ^ aes_sbox[ 82] ^ aes_sbox[158] ^ aes_sbox[221];
st0[2] ^= aes_sbox[32] ^ aes_sbox[ 96] ^ aes_sbox[160] ^ aes_sbox[234];
st0[3] ^= aes_sbox[48] ^ aes_sbox[112] ^ aes_sbox[186] ^ aes_sbox[241];
for (round = 0;; round += 2, rkp += 8) {
st1[0] = mix_columns(subshift(st0, 0)) ^ rkp[0];
st1[1] = mix_columns(subshift(st0, 1)) ^ rkp[1];
st1[2] = mix_columns(subshift(st0, 2)) ^ rkp[2];
st1[3] = mix_columns(subshift(st0, 3)) ^ rkp[3];
if (round == rounds - 2)
break;
st0[0] = mix_columns(subshift(st1, 0)) ^ rkp[4];
st0[1] = mix_columns(subshift(st1, 1)) ^ rkp[5];
st0[2] = mix_columns(subshift(st1, 2)) ^ rkp[6];
st0[3] = mix_columns(subshift(st1, 3)) ^ rkp[7];
}
put_unaligned_le32(subshift(st1, 0) ^ rkp[4], out);
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
}
EXPORT_SYMBOL(aes_encrypt);
/**
* aes_decrypt - Decrypt a single AES block
* @ctx: Context struct containing the key schedule
* @out: Buffer to store the plaintext
* @in: Buffer containing the ciphertext
*/
void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
{
const u32 *rkp = ctx->key_dec + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
/*
* Force the compiler to emit data independent Sbox references,
* by xoring the input with Sbox values that are known to add up
* to zero. This pulls the entire Sbox into the D-cache before any
* data dependent lookups are done.
*/
st0[0] ^= aes_inv_sbox[ 0] ^ aes_inv_sbox[ 64] ^ aes_inv_sbox[129] ^ aes_inv_sbox[200];
st0[1] ^= aes_inv_sbox[16] ^ aes_inv_sbox[ 83] ^ aes_inv_sbox[150] ^ aes_inv_sbox[212];
st0[2] ^= aes_inv_sbox[32] ^ aes_inv_sbox[ 96] ^ aes_inv_sbox[160] ^ aes_inv_sbox[236];
st0[3] ^= aes_inv_sbox[48] ^ aes_inv_sbox[112] ^ aes_inv_sbox[187] ^ aes_inv_sbox[247];
for (round = 0;; round += 2, rkp += 8) {
st1[0] = inv_mix_columns(inv_subshift(st0, 0)) ^ rkp[0];
st1[1] = inv_mix_columns(inv_subshift(st0, 1)) ^ rkp[1];
st1[2] = inv_mix_columns(inv_subshift(st0, 2)) ^ rkp[2];
st1[3] = inv_mix_columns(inv_subshift(st0, 3)) ^ rkp[3];
if (round == rounds - 2)
break;
st0[0] = inv_mix_columns(inv_subshift(st1, 0)) ^ rkp[4];
st0[1] = inv_mix_columns(inv_subshift(st1, 1)) ^ rkp[5];
st0[2] = inv_mix_columns(inv_subshift(st1, 2)) ^ rkp[6];
st0[3] = inv_mix_columns(inv_subshift(st1, 3)) ^ rkp[7];
}
put_unaligned_le32(inv_subshift(st1, 0) ^ rkp[4], out);
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
}
EXPORT_SYMBOL(aes_decrypt);
MODULE_DESCRIPTION("Generic AES library");
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | lib/crypto/aes.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include <crypto/internal/blake2s.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/string.h>
/*
* blake2s_testvecs[] generated with the program below (using libb2-dev and
* libssl-dev [OpenSSL])
*
* #include <blake2.h>
* #include <stdint.h>
* #include <stdio.h>
*
* #include <openssl/evp.h>
*
* #define BLAKE2S_TESTVEC_COUNT 256
*
* static void print_vec(const uint8_t vec[], int len)
* {
* int i;
*
* printf(" { ");
* for (i = 0; i < len; i++) {
* if (i && (i % 12) == 0)
* printf("\n ");
* printf("0x%02x, ", vec[i]);
* }
* printf("},\n");
* }
*
* int main(void)
* {
* uint8_t key[BLAKE2S_KEYBYTES];
* uint8_t buf[BLAKE2S_TESTVEC_COUNT];
* uint8_t hash[BLAKE2S_OUTBYTES];
* int i, j;
*
* key[0] = key[1] = 1;
* for (i = 2; i < BLAKE2S_KEYBYTES; ++i)
* key[i] = key[i - 2] + key[i - 1];
*
* for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i)
* buf[i] = (uint8_t)i;
*
* printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n");
*
* for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) {
* int outlen = 1 + i % BLAKE2S_OUTBYTES;
* int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1);
*
* blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i,
* keylen);
* print_vec(hash, outlen);
* }
* printf("};\n\n");
*
* return 0;
*}
*/
static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {
{ 0xa1, },
{ 0x7c, 0x89, },
{ 0x74, 0x0e, 0xd4, },
{ 0x47, 0x0c, 0x21, 0x15, },
{ 0x18, 0xd6, 0x9c, 0xa6, 0xc4, },
{ 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, },
{ 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, },
{ 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, },
{ 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, },
{ 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, },
{ 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, },
{ 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, },
{ 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda,
0xb7, },
{ 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25,
0x52, 0x3e, },
{ 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc,
0x57, 0xe2, 0x27, },
{ 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6,
0x47, 0x2a, 0xc7, 0xf5, },
{ 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43,
0xe7, 0x72, 0x08, 0xec, 0xbe, },
{ 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96,
0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, },
{ 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e,
0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, },
{ 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d,
0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, },
{ 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86,
0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, },
{ 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41,
0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, },
{ 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22,
0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, },
{ 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00,
0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, },
{ 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9,
0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13,
0xd1, },
{ 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05,
0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07,
0x01, 0x3e, },
{ 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74,
0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b,
0xec, 0x13, 0xed, },
{ 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7,
0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37,
0x72, 0x67, 0x09, 0xfc, },
{ 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38,
0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c,
0x95, 0x29, 0x19, 0xf2, 0x4b, },
{ 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22,
0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30,
0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, },
{ 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e,
0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd,
0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, },
{ 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe,
0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61,
0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, },
{ 0x0a, },
{ 0x6e, 0xd4, },
{ 0x64, 0xe9, 0xd1, },
{ 0x30, 0xdd, 0x71, 0xef, },
{ 0x11, 0xb5, 0x0c, 0x87, 0xc9, },
{ 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, },
{ 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, },
{ 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, },
{ 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, },
{ 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, },
{ 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, },
{ 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, },
{ 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf,
0xe2, },
{ 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64,
0x7e, 0xb0, },
{ 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51,
0x98, 0x0a, 0x8d, },
{ 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04,
0xf1, 0xc3, 0x94, 0xd8, },
{ 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7,
0x2c, 0xe8, 0x8f, 0xc7, 0x34, },
{ 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41,
0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, },
{ 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81,
0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, },
{ 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4,
0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, },
{ 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a,
0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, },
{ 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb,
0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, },
{ 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9,
0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, },
{ 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9,
0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, },
{ 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e,
0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42,
0xe3, },
{ 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9,
0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e,
0xe3, 0x90, },
{ 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3,
0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10,
0x58, 0x06, 0x9a, },
{ 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5,
0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d,
0x53, 0x03, 0x1a, 0x39, },
{ 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0,
0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5,
0xd4, 0x36, 0xb1, 0xac, 0x73, },
{ 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59,
0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90,
0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, },
{ 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28,
0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75,
0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, },
{ 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6,
0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77,
0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, },
{ 0x9d, },
{ 0x9d, 0x7d, },
{ 0xfd, 0xc3, 0xda, },
{ 0xe8, 0x82, 0xcd, 0x21, },
{ 0xc3, 0x1d, 0x42, 0x4c, 0x74, },
{ 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, },
{ 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, },
{ 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, },
{ 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, },
{ 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, },
{ 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, },
{ 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, },
{ 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3,
0x63, },
{ 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f,
0xe6, 0xa9, },
{ 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a,
0xf6, 0x0e, 0x20, },
{ 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39,
0x18, 0x3e, 0xc7, 0xc3, },
{ 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c,
0x92, 0xfc, 0x7f, 0x34, 0x41, },
{ 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb,
0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, },
{ 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96,
0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, },
{ 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6,
0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, },
{ 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba,
0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, },
{ 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0,
0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, },
{ 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d,
0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, },
{ 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59,
0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, },
{ 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89,
0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e,
0xaf, },
{ 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc,
0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7,
0xb1, 0x1d, },
{ 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9,
0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf,
0xee, 0x6a, 0xbe, },
{ 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8,
0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd,
0x5c, 0xef, 0xa3, 0x25, },
{ 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6,
0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8,
0x4b, 0x0e, 0xe3, 0x94, 0x9f, },
{ 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc,
0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf,
0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, },
{ 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76,
0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46,
0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, },
{ 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02,
0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3,
0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, },
{ 0x02, },
{ 0x52, 0xa8, },
{ 0x38, 0x25, 0x0d, },
{ 0xe3, 0x04, 0xd4, 0x92, },
{ 0x97, 0xdb, 0xf7, 0x81, 0xca, },
{ 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, },
{ 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, },
{ 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, },
{ 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, },
{ 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, },
{ 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, },
{ 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, },
{ 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa,
0xd3, },
{ 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c,
0xb7, 0x9a, },
{ 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31,
0x3d, 0x47, 0x3d, },
{ 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0,
0x24, 0xf0, 0x17, 0xc0, },
{ 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76,
0xd2, 0xfd, 0x0c, 0x47, 0x40, },
{ 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae,
0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, },
{ 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee,
0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, },
{ 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d,
0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, },
{ 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61,
0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, },
{ 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd,
0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, },
{ 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5,
0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, },
{ 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17,
0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, },
{ 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c,
0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c,
0xae, },
{ 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5,
0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d,
0x59, 0x00, },
{ 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e,
0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c,
0x5a, 0x2c, 0x3b, },
{ 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda,
0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c,
0x10, 0x07, 0x2a, 0xc4, },
{ 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14,
0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf,
0xee, 0xa1, 0x74, 0xd6, 0x71, },
{ 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33,
0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff,
0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, },
{ 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c,
0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44,
0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, },
{ 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff,
0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23,
0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, },
{ 0xbe, },
{ 0x17, 0x6c, },
{ 0x1a, 0x42, 0x4f, },
{ 0xba, 0xaf, 0xb7, 0x65, },
{ 0xc2, 0x63, 0x43, 0x6a, 0xea, },
{ 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, },
{ 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, },
{ 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, },
{ 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, },
{ 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, },
{ 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, },
{ 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, },
{ 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92,
0xe9, },
{ 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d,
0xc4, 0xb3, },
{ 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60,
0xd7, 0xd3, 0x5e, },
{ 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16,
0xaf, 0x39, 0xbd, 0x92, },
{ 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10,
0xd9, 0xa7, 0x66, 0x27, 0xcf, },
{ 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02,
0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, },
{ 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd,
0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, },
{ 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3,
0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, },
{ 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f,
0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, },
{ 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51,
0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, },
{ 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3,
0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, },
{ 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda,
0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, },
{ 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6,
0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a,
0x26, },
{ 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24,
0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35,
0xf4, 0x1d, },
{ 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9,
0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46,
0x4e, 0x29, 0x26, },
{ 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09,
0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98,
0x1a, 0x5b, 0xff, 0xc9, },
{ 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2,
0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69,
0xbf, 0xf6, 0xbe, 0x3c, 0x40, },
{ 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31,
0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20,
0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, },
{ 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4,
0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30,
0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, },
{ 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0,
0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5,
0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, },
{ 0x1f, },
{ 0x82, 0x60, },
{ 0xcc, 0xe3, 0x08, },
{ 0x56, 0x17, 0xe4, 0x59, },
{ 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, },
{ 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, },
{ 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, },
{ 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, },
{ 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, },
{ 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, },
{ 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, },
{ 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, },
{ 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc,
0x5b, },
{ 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0,
0x90, 0x48, },
{ 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60,
0x04, 0xd9, 0xd5, },
{ 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47,
0xe5, 0x31, 0x06, 0x70, },
{ 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c,
0x70, 0x25, 0xdb, 0x33, 0x27, },
{ 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a,
0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, },
{ 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3,
0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, },
{ 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7,
0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, },
{ 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac,
0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, },
{ 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98,
0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, },
{ 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11,
0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, },
{ 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a,
0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, },
{ 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41,
0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70,
0x88, },
{ 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4,
0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3,
0xc6, 0xbb, },
{ 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55,
0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5,
0x55, 0xfd, 0x4f, },
{ 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e,
0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2,
0x42, 0x64, 0x3b, 0x1a, },
{ 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95,
0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5,
0x1d, 0x60, 0x8f, 0x8a, 0x1d, },
{ 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4,
0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d,
0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, },
{ 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b,
0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3,
0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, },
{ 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf,
0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f,
0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, },
{ 0x60, },
{ 0x24, 0x26, },
{ 0x47, 0xeb, 0xc9, },
{ 0x4a, 0xd0, 0xbc, 0xf0, },
{ 0x8e, 0x2b, 0xc9, 0x85, 0x3c, },
{ 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, },
{ 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, },
{ 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, },
{ 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, },
{ 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, },
{ 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, },
{ 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, },
{ 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91,
0x8d, },
{ 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33,
0xbf, 0xa0, },
{ 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c,
0xd5, 0x4b, 0xed, },
{ 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44,
0xc8, 0x24, 0x0a, 0xb7, },
{ 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75,
0xb2, 0x15, 0xd1, 0xb1, 0x10, },
{ 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35,
0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, },
{ 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9,
0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, },
{ 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47,
0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, },
{ 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35,
0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, },
{ 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1,
0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, },
{ 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21,
0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, },
{ 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4,
0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, },
{ 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7,
0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7,
0xd3, },
{ 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb,
0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16,
0xa6, 0xd6, },
{ 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80,
0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88,
0x55, 0xd1, 0xa9, },
{ 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54,
0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46,
0xef, 0xb0, 0x00, 0x8d, },
{ 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8,
0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94,
0x1c, 0x9f, 0x8b, 0xf3, 0xcb, },
{ 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4,
0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67,
0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, },
{ 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02,
0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04,
0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, },
{ 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28,
0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca,
0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, },
{ 0x7e, },
{ 0x1e, 0x21, },
{ 0x26, 0xd3, 0xdd, },
{ 0x2c, 0xd4, 0xb3, 0x3d, },
{ 0x86, 0x7b, 0x76, 0x3c, 0xf0, },
{ 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, },
{ 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, },
{ 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, },
{ 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, },
{ 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, },
{ 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, },
{ 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, },
{ 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77,
0x66, },
{ 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31,
0x55, 0x66, },
{ 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12,
0x43, 0xbf, 0xa1, },
{ 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e,
0x95, 0x8a, 0xc5, 0xed, },
{ 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef,
0xf6, 0x2b, 0x3a, 0xba, 0x60, },
{ 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2,
0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, },
{ 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b,
0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, },
{ 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94,
0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, },
{ 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b,
0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, },
{ 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf,
0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, },
{ 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8,
0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, },
{ 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea,
0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, },
{ 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2,
0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0,
0xd7, },
{ 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b,
0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd,
0xb6, 0xef, },
{ 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6,
0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57,
0xef, 0xf8, 0x53, },
{ 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46,
0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89,
0x89, 0xfd, 0xf7, 0x99, },
{ 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03,
0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a,
0xa9, 0x8f, 0x2b, 0x59, 0xfb, },
{ 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb,
0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d,
0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, },
{ 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0,
0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d,
0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, },
{ 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c,
0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92,
0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, },
};
static bool __init noinline_for_stack blake2s_digest_test(void)
{
u8 key[BLAKE2S_KEY_SIZE];
u8 buf[ARRAY_SIZE(blake2s_testvecs)];
u8 hash[BLAKE2S_HASH_SIZE];
struct blake2s_state state;
bool success = true;
int i, l;
key[0] = key[1] = 1;
for (i = 2; i < sizeof(key); ++i)
key[i] = key[i - 2] + key[i - 1];
for (i = 0; i < sizeof(buf); ++i)
buf[i] = (u8)i;
for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) {
int outlen = 1 + i % BLAKE2S_HASH_SIZE;
int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1);
blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i,
keylen);
if (memcmp(hash, blake2s_testvecs[i], outlen)) {
pr_err("blake2s self-test %d: FAIL\n", i + 1);
success = false;
}
if (!keylen)
blake2s_init(&state, outlen);
else
blake2s_init_key(&state, outlen,
key + BLAKE2S_KEY_SIZE - keylen,
keylen);
blake2s_update(&state, buf, l);
blake2s_update(&state, buf + l, i - l);
blake2s_final(&state, hash);
if (memcmp(hash, blake2s_testvecs[i], outlen)) {
pr_err("blake2s init/update/final self-test %d: FAIL\n",
i + 1);
success = false;
}
}
return success;
}
static bool __init noinline_for_stack blake2s_random_test(void)
{
struct blake2s_state state;
bool success = true;
int i, l;
for (i = 0; i < 32; ++i) {
enum { TEST_ALIGNMENT = 16 };
u8 blocks[BLAKE2S_BLOCK_SIZE * 2 + TEST_ALIGNMENT - 1]
__aligned(TEST_ALIGNMENT);
u8 *unaligned_block = blocks + BLAKE2S_BLOCK_SIZE;
struct blake2s_state state1, state2;
get_random_bytes(blocks, sizeof(blocks));
get_random_bytes(&state, sizeof(state));
#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
memcpy(&state1, &state, sizeof(state1));
memcpy(&state2, &state, sizeof(state2));
blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
if (memcmp(&state1, &state2, sizeof(state1))) {
pr_err("blake2s random compress self-test %d: FAIL\n",
i + 1);
success = false;
}
#endif
memcpy(&state1, &state, sizeof(state1));
blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
for (l = 1; l < TEST_ALIGNMENT; ++l) {
memcpy(unaligned_block + l, blocks,
BLAKE2S_BLOCK_SIZE);
memcpy(&state2, &state, sizeof(state2));
blake2s_compress(&state2, unaligned_block + l, 1,
BLAKE2S_BLOCK_SIZE);
if (memcmp(&state1, &state2, sizeof(state1))) {
pr_err("blake2s random compress align %d self-test %d: FAIL\n",
l, i + 1);
success = false;
}
}
}
return success;
}
bool __init blake2s_selftest(void)
{
bool success;
success = blake2s_digest_test();
success &= blake2s_random_test();
return success;
}
| linux-master | lib/crypto/blake2s-selftest.c |
/* gf128mul.c - GF(2^128) multiplication functions
*
* Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
* Copyright (c) 2006, Rik Snel <[email protected]>
*
* Based on Dr Brian Gladman's (GPL'd) work published at
* http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
* See the original copyright notice below.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
/*
---------------------------------------------------------------------------
Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
LICENSE TERMS
The free distribution and use of this software in both source and binary
form is allowed (with or without changes) provided that:
1. distributions of this source code include the above copyright
notice, this list of conditions and the following disclaimer;
2. distributions in binary form include the above copyright
notice, this list of conditions and the following disclaimer
in the documentation and/or other associated materials;
3. the copyright holder's name is not used to endorse products
built using this software without specific written permission.
ALTERNATIVELY, provided that this notice is retained in full, this product
may be distributed under the terms of the GNU General Public License (GPL),
in which case the provisions of the GPL apply INSTEAD OF those given above.
DISCLAIMER
This software is provided 'as is' with no explicit or implied warranties
in respect of its properties, including, but not limited to, correctness
and/or fitness for purpose.
---------------------------------------------------------------------------
Issue 31/01/2006
This file provides fast multiplication in GF(2^128) as required by several
cryptographic authentication modes
*/
#include <crypto/gf128mul.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#define gf128mul_dat(q) { \
q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
}
/*
* Given a value i in 0..255 as the byte overflow when a field element
* in GF(2^128) is multiplied by x^8, the following macro returns the
* 16-bit value that must be XOR-ed into the low-degree end of the
* product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
*
* There are two versions of the macro, and hence two tables: one for
* the "be" convention where the highest-order bit is the coefficient of
* the highest-degree polynomial term, and one for the "le" convention
* where the highest-order bit is the coefficient of the lowest-degree
* polynomial term. In both cases the values are stored in CPU byte
* endianness such that the coefficients are ordered consistently across
* bytes, i.e. in the "be" table bits 15..0 of the stored value
* correspond to the coefficients of x^15..x^0, and in the "le" table
* bits 15..0 correspond to the coefficients of x^0..x^15.
*
* Therefore, provided that the appropriate byte endianness conversions
* are done by the multiplication functions (and these must be in place
* anyway to support both little endian and big endian CPUs), the "be"
* table can be used for multiplications of both "bbe" and "ble"
* elements, and the "le" table can be used for multiplications of both
* "lle" and "lbe" elements.
*/
#define xda_be(i) ( \
(i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
(i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
(i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
(i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
)
#define xda_le(i) ( \
(i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
(i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
(i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
(i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
)
static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
/*
* The following functions multiply a field element by x^8 in
* the polynomial field representation. They use 64-bit word operations
* to gain speed but compensate for machine endianness and hence work
* correctly on both styles of machine.
*/
static void gf128mul_x8_lle(be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_le[b & 0xff];
x->b = cpu_to_be64((b >> 8) | (a << 56));
x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
}
/* time invariant version of gf128mul_x8_lle */
static void gf128mul_x8_lle_ti(be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = xda_le(b & 0xff); /* avoid table lookup */
x->b = cpu_to_be64((b >> 8) | (a << 56));
x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
}
static void gf128mul_x8_bbe(be128 *x)
{
u64 a = be64_to_cpu(x->a);
u64 b = be64_to_cpu(x->b);
u64 _tt = gf128mul_table_be[a >> 56];
x->a = cpu_to_be64((a << 8) | (b >> 56));
x->b = cpu_to_be64((b << 8) ^ _tt);
}
void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
r->b = cpu_to_le64((b << 8) ^ _tt);
}
EXPORT_SYMBOL(gf128mul_x8_ble);
void gf128mul_lle(be128 *r, const be128 *b)
{
/*
* The p array should be aligned to twice the size of its element type,
* so that every even/odd pair is guaranteed to share a cacheline
* (assuming a cacheline size of 32 bytes or more, which is by far the
* most common). This ensures that each be128_xor() call in the loop
* takes the same amount of time regardless of the value of 'ch', which
* is derived from function parameter 'b', which is commonly used as a
* key, e.g., for GHASH. The odd array elements are all set to zero,
* making each be128_xor() a NOP if its associated bit in 'ch' is not
* set, and this is equivalent to calling be128_xor() conditionally.
* This approach aims to avoid leaking information about such keys
* through execution time variances.
*
* Unfortunately, __aligned(16) or higher does not work on x86 for
* variables on the stack so we need to perform the alignment by hand.
*/
be128 array[16 + 3] = {};
be128 *p = PTR_ALIGN(&array[0], 2 * sizeof(be128));
int i;
p[0] = *r;
for (i = 0; i < 7; ++i)
gf128mul_x_lle(&p[2 * i + 2], &p[2 * i]);
memset(r, 0, sizeof(*r));
for (i = 0;;) {
u8 ch = ((u8 *)b)[15 - i];
be128_xor(r, r, &p[ 0 + !(ch & 0x80)]);
be128_xor(r, r, &p[ 2 + !(ch & 0x40)]);
be128_xor(r, r, &p[ 4 + !(ch & 0x20)]);
be128_xor(r, r, &p[ 6 + !(ch & 0x10)]);
be128_xor(r, r, &p[ 8 + !(ch & 0x08)]);
be128_xor(r, r, &p[10 + !(ch & 0x04)]);
be128_xor(r, r, &p[12 + !(ch & 0x02)]);
be128_xor(r, r, &p[14 + !(ch & 0x01)]);
if (++i >= 16)
break;
gf128mul_x8_lle_ti(r); /* use the time invariant version */
}
}
EXPORT_SYMBOL(gf128mul_lle);
void gf128mul_bbe(be128 *r, const be128 *b)
{
be128 p[8];
int i;
p[0] = *r;
for (i = 0; i < 7; ++i)
gf128mul_x_bbe(&p[i + 1], &p[i]);
memset(r, 0, sizeof(*r));
for (i = 0;;) {
u8 ch = ((u8 *)b)[i];
if (ch & 0x80)
be128_xor(r, r, &p[7]);
if (ch & 0x40)
be128_xor(r, r, &p[6]);
if (ch & 0x20)
be128_xor(r, r, &p[5]);
if (ch & 0x10)
be128_xor(r, r, &p[4]);
if (ch & 0x08)
be128_xor(r, r, &p[3]);
if (ch & 0x04)
be128_xor(r, r, &p[2]);
if (ch & 0x02)
be128_xor(r, r, &p[1]);
if (ch & 0x01)
be128_xor(r, r, &p[0]);
if (++i >= 16)
break;
gf128mul_x8_bbe(r);
}
}
EXPORT_SYMBOL(gf128mul_bbe);
/* This version uses 64k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in
the buffer's lowest byte, we can construct a table of
the 256 16 byte values that result from the 256 values
of this byte. This requires 4096 bytes. But we also
need tables for each of the 16 higher bytes in the
buffer as well, which makes 64 kbytes in total.
*/
/* additional explanation
* t[0][BYTE] contains g*BYTE
* t[1][BYTE] contains g*x^8*BYTE
* ..
* t[15][BYTE] contains g*x^120*BYTE */
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
{
struct gf128mul_64k *t;
int i, j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
goto out;
for (i = 0; i < 16; i++) {
t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
if (!t->t[i]) {
gf128mul_free_64k(t);
t = NULL;
goto out;
}
}
t->t[0]->t[1] = *g;
for (j = 1; j <= 64; j <<= 1)
gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
for (i = 0;;) {
for (j = 2; j < 256; j += j)
for (k = 1; k < j; ++k)
be128_xor(&t->t[i]->t[j + k],
&t->t[i]->t[j], &t->t[i]->t[k]);
if (++i >= 16)
break;
for (j = 128; j > 0; j >>= 1) {
t->t[i]->t[j] = t->t[i - 1]->t[j];
gf128mul_x8_bbe(&t->t[i]->t[j]);
}
}
out:
return t;
}
EXPORT_SYMBOL(gf128mul_init_64k_bbe);
void gf128mul_free_64k(struct gf128mul_64k *t)
{
int i;
for (i = 0; i < 16; i++)
kfree_sensitive(t->t[i]);
kfree_sensitive(t);
}
EXPORT_SYMBOL(gf128mul_free_64k);
void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
int i;
*r = t->t[0]->t[ap[15]];
for (i = 1; i < 16; ++i)
be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
*a = *r;
}
EXPORT_SYMBOL(gf128mul_64k_bbe);
/* This version uses 4k bytes of table space.
A 16 byte buffer has to be multiplied by a 16 byte key
value in GF(2^128). If we consider a GF(2^128) value in a
single byte, we can construct a table of the 256 16 byte
values that result from the 256 values of this byte.
This requires 4096 bytes. If we take the highest byte in
the buffer and use this table to get the result, we then
have to multiply by x^120 to get the final value. For the
next highest byte the result has to be multiplied by x^112
and so on. But we can do this by accumulating the result
in an accumulator starting with the result for the top
byte. We repeatedly multiply the accumulator value by
x^8 and then add in (i.e. xor) the 16 bytes of the next
lower byte in the buffer, stopping when we reach the
lowest byte. This requires a 4096 byte table.
*/
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
{
struct gf128mul_4k *t;
int j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
goto out;
t->t[128] = *g;
for (j = 64; j > 0; j >>= 1)
gf128mul_x_lle(&t->t[j], &t->t[j+j]);
for (j = 2; j < 256; j += j)
for (k = 1; k < j; ++k)
be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
out:
return t;
}
EXPORT_SYMBOL(gf128mul_init_4k_lle);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
{
struct gf128mul_4k *t;
int j, k;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
goto out;
t->t[1] = *g;
for (j = 1; j <= 64; j <<= 1)
gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
for (j = 2; j < 256; j += j)
for (k = 1; k < j; ++k)
be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
out:
return t;
}
EXPORT_SYMBOL(gf128mul_init_4k_bbe);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
int i = 15;
*r = t->t[ap[15]];
while (i--) {
gf128mul_x8_lle(r);
be128_xor(r, r, &t->t[ap[i]]);
}
*a = *r;
}
EXPORT_SYMBOL(gf128mul_4k_lle);
void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
{
u8 *ap = (u8 *)a;
be128 r[1];
int i = 0;
*r = t->t[ap[0]];
while (++i < 16) {
gf128mul_x8_bbe(r);
be128_xor(r, r, &t->t[ap[i]]);
}
*a = *r;
}
EXPORT_SYMBOL(gf128mul_4k_bbe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
| linux-master | lib/crypto/gf128mul.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is an implementation of the BLAKE2s hash and PRF functions.
*
* Information: https://blake2.net/
*
*/
#include <crypto/internal/blake2s.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <asm/unaligned.h>
static const u8 blake2s_sigma[10][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
};
static inline void blake2s_increment_counter(struct blake2s_state *state,
const u32 inc)
{
state->t[0] += inc;
state->t[1] += (state->t[0] < inc);
}
void blake2s_compress(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
__weak __alias(blake2s_compress_generic);
void blake2s_compress_generic(struct blake2s_state *state, const u8 *block,
size_t nblocks, const u32 inc)
{
u32 m[16];
u32 v[16];
int i;
WARN_ON(IS_ENABLED(DEBUG) &&
(nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE));
while (nblocks > 0) {
blake2s_increment_counter(state, inc);
memcpy(m, block, BLAKE2S_BLOCK_SIZE);
le32_to_cpu_array(m, ARRAY_SIZE(m));
memcpy(v, state->h, 32);
v[ 8] = BLAKE2S_IV0;
v[ 9] = BLAKE2S_IV1;
v[10] = BLAKE2S_IV2;
v[11] = BLAKE2S_IV3;
v[12] = BLAKE2S_IV4 ^ state->t[0];
v[13] = BLAKE2S_IV5 ^ state->t[1];
v[14] = BLAKE2S_IV6 ^ state->f[0];
v[15] = BLAKE2S_IV7 ^ state->f[1];
#define G(r, i, a, b, c, d) do { \
a += b + m[blake2s_sigma[r][2 * i + 0]]; \
d = ror32(d ^ a, 16); \
c += d; \
b = ror32(b ^ c, 12); \
a += b + m[blake2s_sigma[r][2 * i + 1]]; \
d = ror32(d ^ a, 8); \
c += d; \
b = ror32(b ^ c, 7); \
} while (0)
#define ROUND(r) do { \
G(r, 0, v[0], v[ 4], v[ 8], v[12]); \
G(r, 1, v[1], v[ 5], v[ 9], v[13]); \
G(r, 2, v[2], v[ 6], v[10], v[14]); \
G(r, 3, v[3], v[ 7], v[11], v[15]); \
G(r, 4, v[0], v[ 5], v[10], v[15]); \
G(r, 5, v[1], v[ 6], v[11], v[12]); \
G(r, 6, v[2], v[ 7], v[ 8], v[13]); \
G(r, 7, v[3], v[ 4], v[ 9], v[14]); \
} while (0)
ROUND(0);
ROUND(1);
ROUND(2);
ROUND(3);
ROUND(4);
ROUND(5);
ROUND(6);
ROUND(7);
ROUND(8);
ROUND(9);
#undef G
#undef ROUND
for (i = 0; i < 8; ++i)
state->h[i] ^= v[i] ^ v[i + 8];
block += BLAKE2S_BLOCK_SIZE;
--nblocks;
}
}
EXPORT_SYMBOL(blake2s_compress_generic);
| linux-master | lib/crypto/blake2s-generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* DES & Triple DES EDE Cipher Algorithms.
*
* Copyright (c) 2005 Dag Arne Osvik <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/unaligned.h>
#include <crypto/des.h>
#include <crypto/internal/des.h>
#define ROL(x, r) ((x) = rol32((x), (r)))
#define ROR(x, r) ((x) = ror32((x), (r)))
/* Lookup tables for key expansion */
static const u8 pc1[256] = {
0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14,
0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54,
0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16,
0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56,
0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c,
0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c,
0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e,
0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e,
0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34,
0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74,
0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36,
0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76,
0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c,
0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c,
0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e,
0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e,
0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94,
0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4,
0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96,
0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6,
0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c,
0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc,
0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e,
0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde,
0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4,
0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4,
0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6,
0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6,
0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc,
0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc,
0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe,
0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe
};
static const u8 rs[256] = {
0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82,
0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86,
0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a,
0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e,
0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92,
0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96,
0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a,
0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e,
0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2,
0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6,
0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa,
0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae,
0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2,
0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6,
0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba,
0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe,
0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2,
0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6,
0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca,
0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce,
0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2,
0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6,
0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda,
0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde,
0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2,
0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6,
0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea,
0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee,
0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2,
0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6,
0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa,
0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe
};
static const u32 pc2[1024] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00040000, 0x00000000, 0x04000000, 0x00100000,
0x00400000, 0x00000008, 0x00000800, 0x40000000,
0x00440000, 0x00000008, 0x04000800, 0x40100000,
0x00000400, 0x00000020, 0x08000000, 0x00000100,
0x00040400, 0x00000020, 0x0c000000, 0x00100100,
0x00400400, 0x00000028, 0x08000800, 0x40000100,
0x00440400, 0x00000028, 0x0c000800, 0x40100100,
0x80000000, 0x00000010, 0x00000000, 0x00800000,
0x80040000, 0x00000010, 0x04000000, 0x00900000,
0x80400000, 0x00000018, 0x00000800, 0x40800000,
0x80440000, 0x00000018, 0x04000800, 0x40900000,
0x80000400, 0x00000030, 0x08000000, 0x00800100,
0x80040400, 0x00000030, 0x0c000000, 0x00900100,
0x80400400, 0x00000038, 0x08000800, 0x40800100,
0x80440400, 0x00000038, 0x0c000800, 0x40900100,
0x10000000, 0x00000000, 0x00200000, 0x00001000,
0x10040000, 0x00000000, 0x04200000, 0x00101000,
0x10400000, 0x00000008, 0x00200800, 0x40001000,
0x10440000, 0x00000008, 0x04200800, 0x40101000,
0x10000400, 0x00000020, 0x08200000, 0x00001100,
0x10040400, 0x00000020, 0x0c200000, 0x00101100,
0x10400400, 0x00000028, 0x08200800, 0x40001100,
0x10440400, 0x00000028, 0x0c200800, 0x40101100,
0x90000000, 0x00000010, 0x00200000, 0x00801000,
0x90040000, 0x00000010, 0x04200000, 0x00901000,
0x90400000, 0x00000018, 0x00200800, 0x40801000,
0x90440000, 0x00000018, 0x04200800, 0x40901000,
0x90000400, 0x00000030, 0x08200000, 0x00801100,
0x90040400, 0x00000030, 0x0c200000, 0x00901100,
0x90400400, 0x00000038, 0x08200800, 0x40801100,
0x90440400, 0x00000038, 0x0c200800, 0x40901100,
0x00000200, 0x00080000, 0x00000000, 0x00000004,
0x00040200, 0x00080000, 0x04000000, 0x00100004,
0x00400200, 0x00080008, 0x00000800, 0x40000004,
0x00440200, 0x00080008, 0x04000800, 0x40100004,
0x00000600, 0x00080020, 0x08000000, 0x00000104,
0x00040600, 0x00080020, 0x0c000000, 0x00100104,
0x00400600, 0x00080028, 0x08000800, 0x40000104,
0x00440600, 0x00080028, 0x0c000800, 0x40100104,
0x80000200, 0x00080010, 0x00000000, 0x00800004,
0x80040200, 0x00080010, 0x04000000, 0x00900004,
0x80400200, 0x00080018, 0x00000800, 0x40800004,
0x80440200, 0x00080018, 0x04000800, 0x40900004,
0x80000600, 0x00080030, 0x08000000, 0x00800104,
0x80040600, 0x00080030, 0x0c000000, 0x00900104,
0x80400600, 0x00080038, 0x08000800, 0x40800104,
0x80440600, 0x00080038, 0x0c000800, 0x40900104,
0x10000200, 0x00080000, 0x00200000, 0x00001004,
0x10040200, 0x00080000, 0x04200000, 0x00101004,
0x10400200, 0x00080008, 0x00200800, 0x40001004,
0x10440200, 0x00080008, 0x04200800, 0x40101004,
0x10000600, 0x00080020, 0x08200000, 0x00001104,
0x10040600, 0x00080020, 0x0c200000, 0x00101104,
0x10400600, 0x00080028, 0x08200800, 0x40001104,
0x10440600, 0x00080028, 0x0c200800, 0x40101104,
0x90000200, 0x00080010, 0x00200000, 0x00801004,
0x90040200, 0x00080010, 0x04200000, 0x00901004,
0x90400200, 0x00080018, 0x00200800, 0x40801004,
0x90440200, 0x00080018, 0x04200800, 0x40901004,
0x90000600, 0x00080030, 0x08200000, 0x00801104,
0x90040600, 0x00080030, 0x0c200000, 0x00901104,
0x90400600, 0x00080038, 0x08200800, 0x40801104,
0x90440600, 0x00080038, 0x0c200800, 0x40901104,
0x00000002, 0x00002000, 0x20000000, 0x00000001,
0x00040002, 0x00002000, 0x24000000, 0x00100001,
0x00400002, 0x00002008, 0x20000800, 0x40000001,
0x00440002, 0x00002008, 0x24000800, 0x40100001,
0x00000402, 0x00002020, 0x28000000, 0x00000101,
0x00040402, 0x00002020, 0x2c000000, 0x00100101,
0x00400402, 0x00002028, 0x28000800, 0x40000101,
0x00440402, 0x00002028, 0x2c000800, 0x40100101,
0x80000002, 0x00002010, 0x20000000, 0x00800001,
0x80040002, 0x00002010, 0x24000000, 0x00900001,
0x80400002, 0x00002018, 0x20000800, 0x40800001,
0x80440002, 0x00002018, 0x24000800, 0x40900001,
0x80000402, 0x00002030, 0x28000000, 0x00800101,
0x80040402, 0x00002030, 0x2c000000, 0x00900101,
0x80400402, 0x00002038, 0x28000800, 0x40800101,
0x80440402, 0x00002038, 0x2c000800, 0x40900101,
0x10000002, 0x00002000, 0x20200000, 0x00001001,
0x10040002, 0x00002000, 0x24200000, 0x00101001,
0x10400002, 0x00002008, 0x20200800, 0x40001001,
0x10440002, 0x00002008, 0x24200800, 0x40101001,
0x10000402, 0x00002020, 0x28200000, 0x00001101,
0x10040402, 0x00002020, 0x2c200000, 0x00101101,
0x10400402, 0x00002028, 0x28200800, 0x40001101,
0x10440402, 0x00002028, 0x2c200800, 0x40101101,
0x90000002, 0x00002010, 0x20200000, 0x00801001,
0x90040002, 0x00002010, 0x24200000, 0x00901001,
0x90400002, 0x00002018, 0x20200800, 0x40801001,
0x90440002, 0x00002018, 0x24200800, 0x40901001,
0x90000402, 0x00002030, 0x28200000, 0x00801101,
0x90040402, 0x00002030, 0x2c200000, 0x00901101,
0x90400402, 0x00002038, 0x28200800, 0x40801101,
0x90440402, 0x00002038, 0x2c200800, 0x40901101,
0x00000202, 0x00082000, 0x20000000, 0x00000005,
0x00040202, 0x00082000, 0x24000000, 0x00100005,
0x00400202, 0x00082008, 0x20000800, 0x40000005,
0x00440202, 0x00082008, 0x24000800, 0x40100005,
0x00000602, 0x00082020, 0x28000000, 0x00000105,
0x00040602, 0x00082020, 0x2c000000, 0x00100105,
0x00400602, 0x00082028, 0x28000800, 0x40000105,
0x00440602, 0x00082028, 0x2c000800, 0x40100105,
0x80000202, 0x00082010, 0x20000000, 0x00800005,
0x80040202, 0x00082010, 0x24000000, 0x00900005,
0x80400202, 0x00082018, 0x20000800, 0x40800005,
0x80440202, 0x00082018, 0x24000800, 0x40900005,
0x80000602, 0x00082030, 0x28000000, 0x00800105,
0x80040602, 0x00082030, 0x2c000000, 0x00900105,
0x80400602, 0x00082038, 0x28000800, 0x40800105,
0x80440602, 0x00082038, 0x2c000800, 0x40900105,
0x10000202, 0x00082000, 0x20200000, 0x00001005,
0x10040202, 0x00082000, 0x24200000, 0x00101005,
0x10400202, 0x00082008, 0x20200800, 0x40001005,
0x10440202, 0x00082008, 0x24200800, 0x40101005,
0x10000602, 0x00082020, 0x28200000, 0x00001105,
0x10040602, 0x00082020, 0x2c200000, 0x00101105,
0x10400602, 0x00082028, 0x28200800, 0x40001105,
0x10440602, 0x00082028, 0x2c200800, 0x40101105,
0x90000202, 0x00082010, 0x20200000, 0x00801005,
0x90040202, 0x00082010, 0x24200000, 0x00901005,
0x90400202, 0x00082018, 0x20200800, 0x40801005,
0x90440202, 0x00082018, 0x24200800, 0x40901005,
0x90000602, 0x00082030, 0x28200000, 0x00801105,
0x90040602, 0x00082030, 0x2c200000, 0x00901105,
0x90400602, 0x00082038, 0x28200800, 0x40801105,
0x90440602, 0x00082038, 0x2c200800, 0x40901105,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000008, 0x00080000, 0x10000000,
0x02000000, 0x00000000, 0x00000080, 0x00001000,
0x02000000, 0x00000008, 0x00080080, 0x10001000,
0x00004000, 0x00000000, 0x00000040, 0x00040000,
0x00004000, 0x00000008, 0x00080040, 0x10040000,
0x02004000, 0x00000000, 0x000000c0, 0x00041000,
0x02004000, 0x00000008, 0x000800c0, 0x10041000,
0x00020000, 0x00008000, 0x08000000, 0x00200000,
0x00020000, 0x00008008, 0x08080000, 0x10200000,
0x02020000, 0x00008000, 0x08000080, 0x00201000,
0x02020000, 0x00008008, 0x08080080, 0x10201000,
0x00024000, 0x00008000, 0x08000040, 0x00240000,
0x00024000, 0x00008008, 0x08080040, 0x10240000,
0x02024000, 0x00008000, 0x080000c0, 0x00241000,
0x02024000, 0x00008008, 0x080800c0, 0x10241000,
0x00000000, 0x01000000, 0x00002000, 0x00000020,
0x00000000, 0x01000008, 0x00082000, 0x10000020,
0x02000000, 0x01000000, 0x00002080, 0x00001020,
0x02000000, 0x01000008, 0x00082080, 0x10001020,
0x00004000, 0x01000000, 0x00002040, 0x00040020,
0x00004000, 0x01000008, 0x00082040, 0x10040020,
0x02004000, 0x01000000, 0x000020c0, 0x00041020,
0x02004000, 0x01000008, 0x000820c0, 0x10041020,
0x00020000, 0x01008000, 0x08002000, 0x00200020,
0x00020000, 0x01008008, 0x08082000, 0x10200020,
0x02020000, 0x01008000, 0x08002080, 0x00201020,
0x02020000, 0x01008008, 0x08082080, 0x10201020,
0x00024000, 0x01008000, 0x08002040, 0x00240020,
0x00024000, 0x01008008, 0x08082040, 0x10240020,
0x02024000, 0x01008000, 0x080020c0, 0x00241020,
0x02024000, 0x01008008, 0x080820c0, 0x10241020,
0x00000400, 0x04000000, 0x00100000, 0x00000004,
0x00000400, 0x04000008, 0x00180000, 0x10000004,
0x02000400, 0x04000000, 0x00100080, 0x00001004,
0x02000400, 0x04000008, 0x00180080, 0x10001004,
0x00004400, 0x04000000, 0x00100040, 0x00040004,
0x00004400, 0x04000008, 0x00180040, 0x10040004,
0x02004400, 0x04000000, 0x001000c0, 0x00041004,
0x02004400, 0x04000008, 0x001800c0, 0x10041004,
0x00020400, 0x04008000, 0x08100000, 0x00200004,
0x00020400, 0x04008008, 0x08180000, 0x10200004,
0x02020400, 0x04008000, 0x08100080, 0x00201004,
0x02020400, 0x04008008, 0x08180080, 0x10201004,
0x00024400, 0x04008000, 0x08100040, 0x00240004,
0x00024400, 0x04008008, 0x08180040, 0x10240004,
0x02024400, 0x04008000, 0x081000c0, 0x00241004,
0x02024400, 0x04008008, 0x081800c0, 0x10241004,
0x00000400, 0x05000000, 0x00102000, 0x00000024,
0x00000400, 0x05000008, 0x00182000, 0x10000024,
0x02000400, 0x05000000, 0x00102080, 0x00001024,
0x02000400, 0x05000008, 0x00182080, 0x10001024,
0x00004400, 0x05000000, 0x00102040, 0x00040024,
0x00004400, 0x05000008, 0x00182040, 0x10040024,
0x02004400, 0x05000000, 0x001020c0, 0x00041024,
0x02004400, 0x05000008, 0x001820c0, 0x10041024,
0x00020400, 0x05008000, 0x08102000, 0x00200024,
0x00020400, 0x05008008, 0x08182000, 0x10200024,
0x02020400, 0x05008000, 0x08102080, 0x00201024,
0x02020400, 0x05008008, 0x08182080, 0x10201024,
0x00024400, 0x05008000, 0x08102040, 0x00240024,
0x00024400, 0x05008008, 0x08182040, 0x10240024,
0x02024400, 0x05008000, 0x081020c0, 0x00241024,
0x02024400, 0x05008008, 0x081820c0, 0x10241024,
0x00000800, 0x00010000, 0x20000000, 0x00000010,
0x00000800, 0x00010008, 0x20080000, 0x10000010,
0x02000800, 0x00010000, 0x20000080, 0x00001010,
0x02000800, 0x00010008, 0x20080080, 0x10001010,
0x00004800, 0x00010000, 0x20000040, 0x00040010,
0x00004800, 0x00010008, 0x20080040, 0x10040010,
0x02004800, 0x00010000, 0x200000c0, 0x00041010,
0x02004800, 0x00010008, 0x200800c0, 0x10041010,
0x00020800, 0x00018000, 0x28000000, 0x00200010,
0x00020800, 0x00018008, 0x28080000, 0x10200010,
0x02020800, 0x00018000, 0x28000080, 0x00201010,
0x02020800, 0x00018008, 0x28080080, 0x10201010,
0x00024800, 0x00018000, 0x28000040, 0x00240010,
0x00024800, 0x00018008, 0x28080040, 0x10240010,
0x02024800, 0x00018000, 0x280000c0, 0x00241010,
0x02024800, 0x00018008, 0x280800c0, 0x10241010,
0x00000800, 0x01010000, 0x20002000, 0x00000030,
0x00000800, 0x01010008, 0x20082000, 0x10000030,
0x02000800, 0x01010000, 0x20002080, 0x00001030,
0x02000800, 0x01010008, 0x20082080, 0x10001030,
0x00004800, 0x01010000, 0x20002040, 0x00040030,
0x00004800, 0x01010008, 0x20082040, 0x10040030,
0x02004800, 0x01010000, 0x200020c0, 0x00041030,
0x02004800, 0x01010008, 0x200820c0, 0x10041030,
0x00020800, 0x01018000, 0x28002000, 0x00200030,
0x00020800, 0x01018008, 0x28082000, 0x10200030,
0x02020800, 0x01018000, 0x28002080, 0x00201030,
0x02020800, 0x01018008, 0x28082080, 0x10201030,
0x00024800, 0x01018000, 0x28002040, 0x00240030,
0x00024800, 0x01018008, 0x28082040, 0x10240030,
0x02024800, 0x01018000, 0x280020c0, 0x00241030,
0x02024800, 0x01018008, 0x280820c0, 0x10241030,
0x00000c00, 0x04010000, 0x20100000, 0x00000014,
0x00000c00, 0x04010008, 0x20180000, 0x10000014,
0x02000c00, 0x04010000, 0x20100080, 0x00001014,
0x02000c00, 0x04010008, 0x20180080, 0x10001014,
0x00004c00, 0x04010000, 0x20100040, 0x00040014,
0x00004c00, 0x04010008, 0x20180040, 0x10040014,
0x02004c00, 0x04010000, 0x201000c0, 0x00041014,
0x02004c00, 0x04010008, 0x201800c0, 0x10041014,
0x00020c00, 0x04018000, 0x28100000, 0x00200014,
0x00020c00, 0x04018008, 0x28180000, 0x10200014,
0x02020c00, 0x04018000, 0x28100080, 0x00201014,
0x02020c00, 0x04018008, 0x28180080, 0x10201014,
0x00024c00, 0x04018000, 0x28100040, 0x00240014,
0x00024c00, 0x04018008, 0x28180040, 0x10240014,
0x02024c00, 0x04018000, 0x281000c0, 0x00241014,
0x02024c00, 0x04018008, 0x281800c0, 0x10241014,
0x00000c00, 0x05010000, 0x20102000, 0x00000034,
0x00000c00, 0x05010008, 0x20182000, 0x10000034,
0x02000c00, 0x05010000, 0x20102080, 0x00001034,
0x02000c00, 0x05010008, 0x20182080, 0x10001034,
0x00004c00, 0x05010000, 0x20102040, 0x00040034,
0x00004c00, 0x05010008, 0x20182040, 0x10040034,
0x02004c00, 0x05010000, 0x201020c0, 0x00041034,
0x02004c00, 0x05010008, 0x201820c0, 0x10041034,
0x00020c00, 0x05018000, 0x28102000, 0x00200034,
0x00020c00, 0x05018008, 0x28182000, 0x10200034,
0x02020c00, 0x05018000, 0x28102080, 0x00201034,
0x02020c00, 0x05018008, 0x28182080, 0x10201034,
0x00024c00, 0x05018000, 0x28102040, 0x00240034,
0x00024c00, 0x05018008, 0x28182040, 0x10240034,
0x02024c00, 0x05018000, 0x281020c0, 0x00241034,
0x02024c00, 0x05018008, 0x281820c0, 0x10241034
};
/* S-box lookup tables */
static const u32 S1[64] = {
0x01010400, 0x00000000, 0x00010000, 0x01010404,
0x01010004, 0x00010404, 0x00000004, 0x00010000,
0x00000400, 0x01010400, 0x01010404, 0x00000400,
0x01000404, 0x01010004, 0x01000000, 0x00000004,
0x00000404, 0x01000400, 0x01000400, 0x00010400,
0x00010400, 0x01010000, 0x01010000, 0x01000404,
0x00010004, 0x01000004, 0x01000004, 0x00010004,
0x00000000, 0x00000404, 0x00010404, 0x01000000,
0x00010000, 0x01010404, 0x00000004, 0x01010000,
0x01010400, 0x01000000, 0x01000000, 0x00000400,
0x01010004, 0x00010000, 0x00010400, 0x01000004,
0x00000400, 0x00000004, 0x01000404, 0x00010404,
0x01010404, 0x00010004, 0x01010000, 0x01000404,
0x01000004, 0x00000404, 0x00010404, 0x01010400,
0x00000404, 0x01000400, 0x01000400, 0x00000000,
0x00010004, 0x00010400, 0x00000000, 0x01010004
};
static const u32 S2[64] = {
0x80108020, 0x80008000, 0x00008000, 0x00108020,
0x00100000, 0x00000020, 0x80100020, 0x80008020,
0x80000020, 0x80108020, 0x80108000, 0x80000000,
0x80008000, 0x00100000, 0x00000020, 0x80100020,
0x00108000, 0x00100020, 0x80008020, 0x00000000,
0x80000000, 0x00008000, 0x00108020, 0x80100000,
0x00100020, 0x80000020, 0x00000000, 0x00108000,
0x00008020, 0x80108000, 0x80100000, 0x00008020,
0x00000000, 0x00108020, 0x80100020, 0x00100000,
0x80008020, 0x80100000, 0x80108000, 0x00008000,
0x80100000, 0x80008000, 0x00000020, 0x80108020,
0x00108020, 0x00000020, 0x00008000, 0x80000000,
0x00008020, 0x80108000, 0x00100000, 0x80000020,
0x00100020, 0x80008020, 0x80000020, 0x00100020,
0x00108000, 0x00000000, 0x80008000, 0x00008020,
0x80000000, 0x80100020, 0x80108020, 0x00108000
};
static const u32 S3[64] = {
0x00000208, 0x08020200, 0x00000000, 0x08020008,
0x08000200, 0x00000000, 0x00020208, 0x08000200,
0x00020008, 0x08000008, 0x08000008, 0x00020000,
0x08020208, 0x00020008, 0x08020000, 0x00000208,
0x08000000, 0x00000008, 0x08020200, 0x00000200,
0x00020200, 0x08020000, 0x08020008, 0x00020208,
0x08000208, 0x00020200, 0x00020000, 0x08000208,
0x00000008, 0x08020208, 0x00000200, 0x08000000,
0x08020200, 0x08000000, 0x00020008, 0x00000208,
0x00020000, 0x08020200, 0x08000200, 0x00000000,
0x00000200, 0x00020008, 0x08020208, 0x08000200,
0x08000008, 0x00000200, 0x00000000, 0x08020008,
0x08000208, 0x00020000, 0x08000000, 0x08020208,
0x00000008, 0x00020208, 0x00020200, 0x08000008,
0x08020000, 0x08000208, 0x00000208, 0x08020000,
0x00020208, 0x00000008, 0x08020008, 0x00020200
};
static const u32 S4[64] = {
0x00802001, 0x00002081, 0x00002081, 0x00000080,
0x00802080, 0x00800081, 0x00800001, 0x00002001,
0x00000000, 0x00802000, 0x00802000, 0x00802081,
0x00000081, 0x00000000, 0x00800080, 0x00800001,
0x00000001, 0x00002000, 0x00800000, 0x00802001,
0x00000080, 0x00800000, 0x00002001, 0x00002080,
0x00800081, 0x00000001, 0x00002080, 0x00800080,
0x00002000, 0x00802080, 0x00802081, 0x00000081,
0x00800080, 0x00800001, 0x00802000, 0x00802081,
0x00000081, 0x00000000, 0x00000000, 0x00802000,
0x00002080, 0x00800080, 0x00800081, 0x00000001,
0x00802001, 0x00002081, 0x00002081, 0x00000080,
0x00802081, 0x00000081, 0x00000001, 0x00002000,
0x00800001, 0x00002001, 0x00802080, 0x00800081,
0x00002001, 0x00002080, 0x00800000, 0x00802001,
0x00000080, 0x00800000, 0x00002000, 0x00802080
};
static const u32 S5[64] = {
0x00000100, 0x02080100, 0x02080000, 0x42000100,
0x00080000, 0x00000100, 0x40000000, 0x02080000,
0x40080100, 0x00080000, 0x02000100, 0x40080100,
0x42000100, 0x42080000, 0x00080100, 0x40000000,
0x02000000, 0x40080000, 0x40080000, 0x00000000,
0x40000100, 0x42080100, 0x42080100, 0x02000100,
0x42080000, 0x40000100, 0x00000000, 0x42000000,
0x02080100, 0x02000000, 0x42000000, 0x00080100,
0x00080000, 0x42000100, 0x00000100, 0x02000000,
0x40000000, 0x02080000, 0x42000100, 0x40080100,
0x02000100, 0x40000000, 0x42080000, 0x02080100,
0x40080100, 0x00000100, 0x02000000, 0x42080000,
0x42080100, 0x00080100, 0x42000000, 0x42080100,
0x02080000, 0x00000000, 0x40080000, 0x42000000,
0x00080100, 0x02000100, 0x40000100, 0x00080000,
0x00000000, 0x40080000, 0x02080100, 0x40000100
};
static const u32 S6[64] = {
0x20000010, 0x20400000, 0x00004000, 0x20404010,
0x20400000, 0x00000010, 0x20404010, 0x00400000,
0x20004000, 0x00404010, 0x00400000, 0x20000010,
0x00400010, 0x20004000, 0x20000000, 0x00004010,
0x00000000, 0x00400010, 0x20004010, 0x00004000,
0x00404000, 0x20004010, 0x00000010, 0x20400010,
0x20400010, 0x00000000, 0x00404010, 0x20404000,
0x00004010, 0x00404000, 0x20404000, 0x20000000,
0x20004000, 0x00000010, 0x20400010, 0x00404000,
0x20404010, 0x00400000, 0x00004010, 0x20000010,
0x00400000, 0x20004000, 0x20000000, 0x00004010,
0x20000010, 0x20404010, 0x00404000, 0x20400000,
0x00404010, 0x20404000, 0x00000000, 0x20400010,
0x00000010, 0x00004000, 0x20400000, 0x00404010,
0x00004000, 0x00400010, 0x20004010, 0x00000000,
0x20404000, 0x20000000, 0x00400010, 0x20004010
};
static const u32 S7[64] = {
0x00200000, 0x04200002, 0x04000802, 0x00000000,
0x00000800, 0x04000802, 0x00200802, 0x04200800,
0x04200802, 0x00200000, 0x00000000, 0x04000002,
0x00000002, 0x04000000, 0x04200002, 0x00000802,
0x04000800, 0x00200802, 0x00200002, 0x04000800,
0x04000002, 0x04200000, 0x04200800, 0x00200002,
0x04200000, 0x00000800, 0x00000802, 0x04200802,
0x00200800, 0x00000002, 0x04000000, 0x00200800,
0x04000000, 0x00200800, 0x00200000, 0x04000802,
0x04000802, 0x04200002, 0x04200002, 0x00000002,
0x00200002, 0x04000000, 0x04000800, 0x00200000,
0x04200800, 0x00000802, 0x00200802, 0x04200800,
0x00000802, 0x04000002, 0x04200802, 0x04200000,
0x00200800, 0x00000000, 0x00000002, 0x04200802,
0x00000000, 0x00200802, 0x04200000, 0x00000800,
0x04000002, 0x04000800, 0x00000800, 0x00200002
};
static const u32 S8[64] = {
0x10001040, 0x00001000, 0x00040000, 0x10041040,
0x10000000, 0x10001040, 0x00000040, 0x10000000,
0x00040040, 0x10040000, 0x10041040, 0x00041000,
0x10041000, 0x00041040, 0x00001000, 0x00000040,
0x10040000, 0x10000040, 0x10001000, 0x00001040,
0x00041000, 0x00040040, 0x10040040, 0x10041000,
0x00001040, 0x00000000, 0x00000000, 0x10040040,
0x10000040, 0x10001000, 0x00041040, 0x00040000,
0x00041040, 0x00040000, 0x10041000, 0x00001000,
0x00000040, 0x10040040, 0x00001000, 0x00041040,
0x10001000, 0x00000040, 0x10000040, 0x10040000,
0x10040040, 0x10000000, 0x00040000, 0x10001040,
0x00000000, 0x10041040, 0x00040040, 0x10000040,
0x10040000, 0x10001000, 0x10001040, 0x00000000,
0x10041040, 0x00041000, 0x00041000, 0x00001040,
0x00001040, 0x00040040, 0x10000000, 0x10041000
};
/* Encryption components: IP, FP, and round function */
#define IP(L, R, T) \
ROL(R, 4); \
T = L; \
L ^= R; \
L &= 0xf0f0f0f0; \
R ^= L; \
L ^= T; \
ROL(R, 12); \
T = L; \
L ^= R; \
L &= 0xffff0000; \
R ^= L; \
L ^= T; \
ROR(R, 14); \
T = L; \
L ^= R; \
L &= 0xcccccccc; \
R ^= L; \
L ^= T; \
ROL(R, 6); \
T = L; \
L ^= R; \
L &= 0xff00ff00; \
R ^= L; \
L ^= T; \
ROR(R, 7); \
T = L; \
L ^= R; \
L &= 0xaaaaaaaa; \
R ^= L; \
L ^= T; \
ROL(L, 1);
#define FP(L, R, T) \
ROR(L, 1); \
T = L; \
L ^= R; \
L &= 0xaaaaaaaa; \
R ^= L; \
L ^= T; \
ROL(R, 7); \
T = L; \
L ^= R; \
L &= 0xff00ff00; \
R ^= L; \
L ^= T; \
ROR(R, 6); \
T = L; \
L ^= R; \
L &= 0xcccccccc; \
R ^= L; \
L ^= T; \
ROL(R, 14); \
T = L; \
L ^= R; \
L &= 0xffff0000; \
R ^= L; \
L ^= T; \
ROR(R, 12); \
T = L; \
L ^= R; \
L &= 0xf0f0f0f0; \
R ^= L; \
L ^= T; \
ROR(R, 4);
#define ROUND(L, R, A, B, K, d) \
B = K[0]; A = K[1]; K += d; \
B ^= R; A ^= R; \
B &= 0x3f3f3f3f; ROR(A, 4); \
L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \
L ^= S6[0xff & (B >> 8)]; B >>= 16; \
L ^= S7[0xff & A]; \
L ^= S5[0xff & (A >> 8)]; A >>= 16; \
L ^= S4[0xff & B]; \
L ^= S2[0xff & (B >> 8)]; \
L ^= S3[0xff & A]; \
L ^= S1[0xff & (A >> 8)];
/*
* PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved
* tables of 128 elements. One set is for C_i and the other for D_i, while
* the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i.
*
* After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i
* or D_i in bits 7-1 (bit 0 being the least significant).
*/
#define T1(x) pt[2 * (x) + 0]
#define T2(x) pt[2 * (x) + 1]
#define T3(x) pt[2 * (x) + 2]
#define T4(x) pt[2 * (x) + 3]
#define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a))
/*
* Encryption key expansion
*
* RFC2451: Weak key checks SHOULD be performed.
*
* FIPS 74:
*
* Keys having duals are keys which produce all zeros, all ones, or
* alternating zero-one patterns in the C and D registers after Permuted
* Choice 1 has operated on the key.
*
*/
static unsigned long des_ekey(u32 *pe, const u8 *k)
{
/* K&R: long is at least 32 bits */
unsigned long a, b, c, d, w;
const u32 *pt = pc2;
d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d];
pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c];
pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b];
pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a);
/* Check if first half is weak */
w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
/* Skip to next table set */
pt += 512;
d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
/* Check if second half is weak */
w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]);
pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
a = pe[2 * d];
b = pe[2 * d + 1];
c = a ^ b;
c &= 0xffff0000;
a ^= c;
b ^= c;
ROL(b, 18);
pe[2 * d] = a;
pe[2 * d + 1] = b;
}
/* Zero if weak key */
return w;
}
int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen)
{
if (keylen != DES_KEY_SIZE)
return -EINVAL;
return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY;
}
EXPORT_SYMBOL_GPL(des_expand_key);
/*
* Decryption key expansion
*
* No weak key checking is performed, as this is only used by triple DES
*
*/
static void dkey(u32 *pe, const u8 *k)
{
/* K&R: long is at least 32 bits */
unsigned long a, b, c, d;
const u32 *pt = pc2;
d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d];
c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c];
b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b];
a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a];
pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d];
pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c];
pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b];
pe[15 * 2] = DES_PC2(b, c, d, a);
/* Skip to next table set */
pt += 512;
d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1];
c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1];
b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1];
a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1];
pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d];
pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b];
pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d];
pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c];
pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a];
pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c];
pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b];
pe[15 * 2 + 1] = DES_PC2(b, c, d, a);
/* Fixup: 2413 5768 -> 1357 2468 */
for (d = 0; d < 16; ++d) {
a = pe[2 * d];
b = pe[2 * d + 1];
c = a ^ b;
c &= 0xffff0000;
a ^= c;
b ^= c;
ROL(b, 18);
pe[2 * d] = a;
pe[2 * d + 1] = b;
}
}
void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
{
const u32 *K = ctx->expkey;
u32 L, R, A, B;
int i;
L = get_unaligned_le32(src);
R = get_unaligned_le32(src + 4);
IP(L, R, A);
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, 2);
ROUND(R, L, A, B, K, 2);
}
FP(R, L, A);
put_unaligned_le32(R, dst);
put_unaligned_le32(L, dst + 4);
}
EXPORT_SYMBOL_GPL(des_encrypt);
void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src)
{
const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2;
u32 L, R, A, B;
int i;
L = get_unaligned_le32(src);
R = get_unaligned_le32(src + 4);
IP(L, R, A);
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, -2);
ROUND(R, L, A, B, K, -2);
}
FP(R, L, A);
put_unaligned_le32(R, dst);
put_unaligned_le32(L, dst + 4);
}
EXPORT_SYMBOL_GPL(des_decrypt);
int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key,
unsigned int keylen)
{
u32 *pe = ctx->expkey;
int err;
if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
err = des3_ede_verify_key(key, keylen, true);
if (err && err != -ENOKEY)
return err;
des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
des_ekey(pe, key);
return err;
}
EXPORT_SYMBOL_GPL(des3_ede_expand_key);
void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
{
const u32 *K = dctx->expkey;
u32 L, R, A, B;
int i;
L = get_unaligned_le32(src);
R = get_unaligned_le32(src + 4);
IP(L, R, A);
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, 2);
ROUND(R, L, A, B, K, 2);
}
for (i = 0; i < 8; i++) {
ROUND(R, L, A, B, K, 2);
ROUND(L, R, A, B, K, 2);
}
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, 2);
ROUND(R, L, A, B, K, 2);
}
FP(R, L, A);
put_unaligned_le32(R, dst);
put_unaligned_le32(L, dst + 4);
}
EXPORT_SYMBOL_GPL(des3_ede_encrypt);
void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src)
{
const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2;
u32 L, R, A, B;
int i;
L = get_unaligned_le32(src);
R = get_unaligned_le32(src + 4);
IP(L, R, A);
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, -2);
ROUND(R, L, A, B, K, -2);
}
for (i = 0; i < 8; i++) {
ROUND(R, L, A, B, K, -2);
ROUND(L, R, A, B, K, -2);
}
for (i = 0; i < 8; i++) {
ROUND(L, R, A, B, K, -2);
ROUND(R, L, A, B, K, -2);
}
FP(R, L, A);
put_unaligned_le32(R, dst);
put_unaligned_le32(L, dst + 4);
}
EXPORT_SYMBOL_GPL(des3_ede_decrypt);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/des.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is an implementation of the Curve25519 ECDH algorithm, using either
* a 32-bit implementation or a 64-bit implementation with 128-bit integers,
* depending on what is supported by the target compiler.
*
* Information: https://cr.yp.to/ecdh.html
*/
#include <crypto/curve25519.h>
#include <linux/module.h>
const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 };
const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 };
EXPORT_SYMBOL(curve25519_null_point);
EXPORT_SYMBOL(curve25519_base_point);
EXPORT_SYMBOL(curve25519_generic);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | lib/crypto/curve25519-generic.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is based in part on Andrew Moon's poly1305-donna, which is in the
* public domain.
*/
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <crypto/internal/poly1305.h>
void poly1305_core_setkey(struct poly1305_core_key *key,
const u8 raw_key[POLY1305_BLOCK_SIZE])
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03;
key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff;
key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff;
key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff;
/* s = 5*r */
key->precomputed_s.r[0] = key->key.r[1] * 5;
key->precomputed_s.r[1] = key->key.r[2] * 5;
key->precomputed_s.r[2] = key->key.r[3] * 5;
key->precomputed_s.r[3] = key->key.r[4] * 5;
}
EXPORT_SYMBOL(poly1305_core_setkey);
void poly1305_core_blocks(struct poly1305_state *state,
const struct poly1305_core_key *key, const void *src,
unsigned int nblocks, u32 hibit)
{
const u8 *input = src;
u32 r0, r1, r2, r3, r4;
u32 s1, s2, s3, s4;
u32 h0, h1, h2, h3, h4;
u64 d0, d1, d2, d3, d4;
u32 c;
if (!nblocks)
return;
hibit <<= 24;
r0 = key->key.r[0];
r1 = key->key.r[1];
r2 = key->key.r[2];
r3 = key->key.r[3];
r4 = key->key.r[4];
s1 = key->precomputed_s.r[0];
s2 = key->precomputed_s.r[1];
s3 = key->precomputed_s.r[2];
s4 = key->precomputed_s.r[3];
h0 = state->h[0];
h1 = state->h[1];
h2 = state->h[2];
h3 = state->h[3];
h4 = state->h[4];
do {
/* h += m[i] */
h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff;
h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff;
h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff;
h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff;
h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit;
/* h *= r */
d0 = ((u64)h0 * r0) + ((u64)h1 * s4) +
((u64)h2 * s3) + ((u64)h3 * s2) +
((u64)h4 * s1);
d1 = ((u64)h0 * r1) + ((u64)h1 * r0) +
((u64)h2 * s4) + ((u64)h3 * s3) +
((u64)h4 * s2);
d2 = ((u64)h0 * r2) + ((u64)h1 * r1) +
((u64)h2 * r0) + ((u64)h3 * s4) +
((u64)h4 * s3);
d3 = ((u64)h0 * r3) + ((u64)h1 * r2) +
((u64)h2 * r1) + ((u64)h3 * r0) +
((u64)h4 * s4);
d4 = ((u64)h0 * r4) + ((u64)h1 * r3) +
((u64)h2 * r2) + ((u64)h3 * r1) +
((u64)h4 * r0);
/* (partial) h %= p */
c = (u32)(d0 >> 26);
h0 = (u32)d0 & 0x3ffffff;
d1 += c;
c = (u32)(d1 >> 26);
h1 = (u32)d1 & 0x3ffffff;
d2 += c;
c = (u32)(d2 >> 26);
h2 = (u32)d2 & 0x3ffffff;
d3 += c;
c = (u32)(d3 >> 26);
h3 = (u32)d3 & 0x3ffffff;
d4 += c;
c = (u32)(d4 >> 26);
h4 = (u32)d4 & 0x3ffffff;
h0 += c * 5;
c = (h0 >> 26);
h0 = h0 & 0x3ffffff;
h1 += c;
input += POLY1305_BLOCK_SIZE;
} while (--nblocks);
state->h[0] = h0;
state->h[1] = h1;
state->h[2] = h2;
state->h[3] = h3;
state->h[4] = h4;
}
EXPORT_SYMBOL(poly1305_core_blocks);
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst)
{
u8 *mac = dst;
u32 h0, h1, h2, h3, h4, c;
u32 g0, g1, g2, g3, g4;
u64 f;
u32 mask;
/* fully carry h */
h0 = state->h[0];
h1 = state->h[1];
h2 = state->h[2];
h3 = state->h[3];
h4 = state->h[4];
c = h1 >> 26;
h1 = h1 & 0x3ffffff;
h2 += c;
c = h2 >> 26;
h2 = h2 & 0x3ffffff;
h3 += c;
c = h3 >> 26;
h3 = h3 & 0x3ffffff;
h4 += c;
c = h4 >> 26;
h4 = h4 & 0x3ffffff;
h0 += c * 5;
c = h0 >> 26;
h0 = h0 & 0x3ffffff;
h1 += c;
/* compute h + -p */
g0 = h0 + 5;
c = g0 >> 26;
g0 &= 0x3ffffff;
g1 = h1 + c;
c = g1 >> 26;
g1 &= 0x3ffffff;
g2 = h2 + c;
c = g2 >> 26;
g2 &= 0x3ffffff;
g3 = h3 + c;
c = g3 >> 26;
g3 &= 0x3ffffff;
g4 = h4 + c - (1UL << 26);
/* select h if h < p, or h + -p if h >= p */
mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1;
g0 &= mask;
g1 &= mask;
g2 &= mask;
g3 &= mask;
g4 &= mask;
mask = ~mask;
h0 = (h0 & mask) | g0;
h1 = (h1 & mask) | g1;
h2 = (h2 & mask) | g2;
h3 = (h3 & mask) | g3;
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
h0 = ((h0) | (h1 << 26)) & 0xffffffff;
h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
if (likely(nonce)) {
/* mac = (h + nonce) % (2^128) */
f = (u64)h0 + nonce[0];
h0 = (u32)f;
f = (u64)h1 + nonce[1] + (f >> 32);
h1 = (u32)f;
f = (u64)h2 + nonce[2] + (f >> 32);
h2 = (u32)f;
f = (u64)h3 + nonce[3] + (f >> 32);
h3 = (u32)f;
}
put_unaligned_le32(h0, &mac[0]);
put_unaligned_le32(h1, &mac[4]);
put_unaligned_le32(h2, &mac[8]);
put_unaligned_le32(h3, &mac[12]);
}
EXPORT_SYMBOL(poly1305_core_emit);
| linux-master | lib/crypto/poly1305-donna32.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is an implementation of the Curve25519 ECDH algorithm, using either
* a 32-bit implementation or a 64-bit implementation with 128-bit integers,
* depending on what is supported by the target compiler.
*
* Information: https://cr.yp.to/ecdh.html
*/
#include <crypto/curve25519.h>
#include <linux/module.h>
#include <linux/init.h>
static int __init curve25519_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!curve25519_selftest()))
return -ENODEV;
return 0;
}
static void __exit curve25519_exit(void)
{
}
module_init(curve25519_init);
module_exit(curve25519_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | lib/crypto/curve25519.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SHA1 routine optimized to do word accesses rather than byte accesses,
* and to avoid unnecessary copies into the context array.
*
* This was based on the git SHA1 implementation.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <crypto/sha1.h>
#include <asm/unaligned.h>
/*
* If you have 32 registers or more, the compiler can (and should)
* try to change the array[] accesses into registers. However, on
* machines with less than ~25 registers, that won't really work,
* and at least gcc will make an unholy mess of it.
*
* So to avoid that mess which just slows things down, we force
* the stores to memory to actually happen (we might be better off
* with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as
* suggested by Artur Skawina - that will also make gcc unable to
* try to do the silly "optimize away loads" part because it won't
* see what the value will be).
*
* Ben Herrenschmidt reports that on PPC, the C version comes close
* to the optimized asm with this (ie on PPC you don't want that
* 'volatile', since there are lots of registers).
*
* On ARM we get the best code generation by forcing a full memory barrier
* between each SHA_ROUND, otherwise gcc happily get wild with spilling and
* the stack frame size simply explode and performance goes down the drain.
*/
#ifdef CONFIG_X86
#define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
#elif defined(CONFIG_ARM)
#define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
#else
#define setW(x, val) (W(x) = (val))
#endif
/* This "rolls" over the 512-bit array */
#define W(x) (array[(x)&15])
/*
* Where do we get the source from? The first 16 iterations get it from
* the input data, the next mix it from the 512-bit array.
*/
#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t)
#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \
__u32 TEMP = input(t); setW(t, TEMP); \
E += TEMP + rol32(A,5) + (fn) + (constant); \
B = ror32(B, 2); \
TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0)
#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E )
#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E )
#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E )
#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E )
/**
* sha1_transform - single block SHA1 transform (deprecated)
*
* @digest: 160 bit digest to update
* @data: 512 bits of data to hash
* @array: 16 words of workspace (see note)
*
* This function executes SHA-1's internal compression function. It updates the
* 160-bit internal state (@digest) with a single 512-bit data block (@data).
*
* Don't use this function. SHA-1 is no longer considered secure. And even if
* you do have to use SHA-1, this isn't the correct way to hash something with
* SHA-1 as this doesn't handle padding and finalization.
*
* Note: If the hash is security sensitive, the caller should be sure
* to clear the workspace. This is left to the caller to avoid
* unnecessary clears between chained hashing operations.
*/
void sha1_transform(__u32 *digest, const char *data, __u32 *array)
{
__u32 A, B, C, D, E;
unsigned int i = 0;
A = digest[0];
B = digest[1];
C = digest[2];
D = digest[3];
E = digest[4];
/* Round 1 - iterations 0-16 take their input from 'data' */
for (; i < 16; ++i)
T_0_15(i, A, B, C, D, E);
/* Round 1 - tail. Input from 512-bit mixing array */
for (; i < 20; ++i)
T_16_19(i, A, B, C, D, E);
/* Round 2 */
for (; i < 40; ++i)
T_20_39(i, A, B, C, D, E);
/* Round 3 */
for (; i < 60; ++i)
T_40_59(i, A, B, C, D, E);
/* Round 4 */
for (; i < 80; ++i)
T_60_79(i, A, B, C, D, E);
digest[0] += A;
digest[1] += B;
digest[2] += C;
digest[3] += D;
digest[4] += E;
}
EXPORT_SYMBOL(sha1_transform);
/**
* sha1_init - initialize the vectors for a SHA1 digest
* @buf: vector to initialize
*/
void sha1_init(__u32 *buf)
{
buf[0] = 0x67452301;
buf[1] = 0xefcdab89;
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
buf[4] = 0xc3d2e1f0;
}
EXPORT_SYMBOL(sha1_init);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/sha1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Minimal library implementation of GCM
*
* Copyright 2022 Google LLC
*/
#include <linux/module.h>
#include <crypto/algapi.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
#include <asm/irqflags.h>
static void aesgcm_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
const void *src)
{
unsigned long flags;
/*
* In AES-GCM, both the GHASH key derivation and the CTR mode
* encryption operate on known plaintext, making them susceptible to
* timing attacks on the encryption key. The AES library already
* mitigates this risk to some extent by pulling the entire S-box into
* the caches before doing any substitutions, but this strategy is more
* effective when running with interrupts disabled.
*/
local_irq_save(flags);
aes_encrypt(ctx, dst, src);
local_irq_restore(flags);
}
/**
* aesgcm_expandkey - Expands the AES and GHASH keys for the AES-GCM key
* schedule
*
* @ctx: The data structure that will hold the AES-GCM key schedule
* @key: The AES encryption input key
* @keysize: The length in bytes of the input key
* @authsize: The size in bytes of the GCM authentication tag
*
* Returns: 0 on success, or -EINVAL if @keysize or @authsize contain values
* that are not permitted by the GCM specification.
*/
int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
unsigned int keysize, unsigned int authsize)
{
u8 kin[AES_BLOCK_SIZE] = {};
int ret;
ret = crypto_gcm_check_authsize(authsize) ?:
aes_expandkey(&ctx->aes_ctx, key, keysize);
if (ret)
return ret;
ctx->authsize = authsize;
aesgcm_encrypt_block(&ctx->aes_ctx, &ctx->ghash_key, kin);
return 0;
}
EXPORT_SYMBOL(aesgcm_expandkey);
static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
int len)
{
while (len > 0) {
crypto_xor((u8 *)ghash, src, min(len, GHASH_BLOCK_SIZE));
gf128mul_lle(ghash, key);
src += GHASH_BLOCK_SIZE;
len -= GHASH_BLOCK_SIZE;
}
}
static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
{
be128 tail = { cpu_to_be64(assoc_len * 8), cpu_to_be64(src_len * 8) };
u8 buf[AES_BLOCK_SIZE];
be128 ghash = {};
aesgcm_ghash(&ghash, &ctx->ghash_key, assoc, assoc_len);
aesgcm_ghash(&ghash, &ctx->ghash_key, src, src_len);
aesgcm_ghash(&ghash, &ctx->ghash_key, &tail, sizeof(tail));
ctr[3] = cpu_to_be32(1);
aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
crypto_xor_cpy(authtag, buf, (u8 *)&ghash, ctx->authsize);
memzero_explicit(&ghash, sizeof(ghash));
memzero_explicit(buf, sizeof(buf));
}
static void aesgcm_crypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
int len, __be32 *ctr)
{
u8 buf[AES_BLOCK_SIZE];
unsigned int n = 2;
while (len > 0) {
/*
* The counter increment below must not result in overflow or
* carry into the next 32-bit word, as this could result in
* inadvertent IV reuse, which must be avoided at all cost for
* stream ciphers such as AES-CTR. Given the range of 'int
* len', this cannot happen, so no explicit test is necessary.
*/
ctr[3] = cpu_to_be32(n++);
aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE));
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
len -= AES_BLOCK_SIZE;
}
memzero_explicit(buf, sizeof(buf));
}
/**
* aesgcm_encrypt - Perform AES-GCM encryption on a block of data
*
* @ctx: The AES-GCM key schedule
* @dst: Pointer to the ciphertext output buffer
* @src: Pointer the plaintext (may equal @dst for encryption in place)
* @crypt_len: The size in bytes of the plaintext and ciphertext.
* @assoc: Pointer to the associated data,
* @assoc_len: The size in bytes of the associated data
* @iv: The initialization vector (IV) to use for this block of data
* (must be 12 bytes in size as per the GCM spec recommendation)
* @authtag: The address of the buffer in memory where the authentication
* tag should be stored. The buffer is assumed to have space for
* @ctx->authsize bytes.
*/
void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
int crypt_len, const u8 *assoc, int assoc_len,
const u8 iv[GCM_AES_IV_SIZE], u8 *authtag)
{
__be32 ctr[4];
memcpy(ctr, iv, GCM_AES_IV_SIZE);
aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
aesgcm_mac(ctx, dst, crypt_len, assoc, assoc_len, ctr, authtag);
}
EXPORT_SYMBOL(aesgcm_encrypt);
/**
* aesgcm_decrypt - Perform AES-GCM decryption on a block of data
*
* @ctx: The AES-GCM key schedule
* @dst: Pointer to the plaintext output buffer
* @src: Pointer the ciphertext (may equal @dst for decryption in place)
* @crypt_len: The size in bytes of the plaintext and ciphertext.
* @assoc: Pointer to the associated data,
* @assoc_len: The size in bytes of the associated data
* @iv: The initialization vector (IV) to use for this block of data
* (must be 12 bytes in size as per the GCM spec recommendation)
* @authtag: The address of the buffer in memory where the authentication
* tag is stored.
*
* Returns: true on success, or false if the ciphertext failed authentication.
* On failure, no plaintext will be returned.
*/
bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
const u8 *src, int crypt_len, const u8 *assoc,
int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
const u8 *authtag)
{
u8 tagbuf[AES_BLOCK_SIZE];
__be32 ctr[4];
memcpy(ctr, iv, GCM_AES_IV_SIZE);
aesgcm_mac(ctx, src, crypt_len, assoc, assoc_len, ctr, tagbuf);
if (crypto_memneq(authtag, tagbuf, ctx->authsize)) {
memzero_explicit(tagbuf, sizeof(tagbuf));
return false;
}
aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
return true;
}
EXPORT_SYMBOL(aesgcm_decrypt);
MODULE_DESCRIPTION("Generic AES-GCM library");
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL");
#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/*
* Test code below. Vectors taken from crypto/testmgr.h
*/
static const u8 __initconst ctext0[16] =
"\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
"\x36\x7f\x1d\x57\xa4\xe7\x45\x5a";
static const u8 __initconst ptext1[16];
static const u8 __initconst ctext1[32] =
"\x03\x88\xda\xce\x60\xb6\xa3\x92"
"\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
"\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
"\xf5\x3a\x67\xb2\x12\x57\xbd\xdf";
static const u8 __initconst ptext2[64] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
static const u8 __initconst ctext2[80] =
"\x42\x83\x1e\xc2\x21\x77\x74\x24"
"\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
"\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
"\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
"\x21\xd5\x14\xb2\x54\x66\x93\x1c"
"\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
"\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
"\x3d\x58\xe0\x91\x47\x3f\x59\x85"
"\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
"\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4";
static const u8 __initconst ptext3[60] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39";
static const u8 __initconst ctext3[76] =
"\x42\x83\x1e\xc2\x21\x77\x74\x24"
"\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
"\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
"\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
"\x21\xd5\x14\xb2\x54\x66\x93\x1c"
"\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
"\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
"\x3d\x58\xe0\x91"
"\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
"\x94\xfa\xe9\x5a\xe7\x12\x1a\x47";
static const u8 __initconst ctext4[16] =
"\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
"\xa0\x0e\xd1\xf3\x12\x57\x24\x35";
static const u8 __initconst ctext5[32] =
"\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
"\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
"\x2f\xf5\x8d\x80\x03\x39\x27\xab"
"\x8e\xf4\xd4\x58\x75\x14\xf0\xfb";
static const u8 __initconst ptext6[64] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
static const u8 __initconst ctext6[80] =
"\x39\x80\xca\x0b\x3c\x00\xe8\x41"
"\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
"\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
"\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
"\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
"\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
"\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
"\xcc\xda\x27\x10\xac\xad\xe2\x56"
"\x99\x24\xa7\xc8\x58\x73\x36\xbf"
"\xb1\x18\x02\x4d\xb8\x67\x4a\x14";
static const u8 __initconst ctext7[16] =
"\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
"\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b";
static const u8 __initconst ctext8[32] =
"\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
"\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
"\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
"\x26\x5b\x98\xb5\xd4\x8a\xb9\x19";
static const u8 __initconst ptext9[64] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
static const u8 __initconst ctext9[80] =
"\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
"\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
"\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
"\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
"\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
"\xa7\xb0\x8b\x10\x56\x82\x88\x38"
"\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
"\xbc\xc9\xf6\x62\x89\x80\x15\xad"
"\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
"\xec\x1a\x50\x22\x70\xe3\xcc\x6c";
static const u8 __initconst ptext10[60] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39";
static const u8 __initconst ctext10[76] =
"\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
"\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
"\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
"\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
"\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
"\xa7\xb0\x8b\x10\x56\x82\x88\x38"
"\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
"\xbc\xc9\xf6\x62"
"\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
"\xcd\xdf\x88\x53\xbb\x2d\x55\x1b";
static const u8 __initconst ptext11[60] =
"\xd9\x31\x32\x25\xf8\x84\x06\xe5"
"\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
"\x86\xa7\xa9\x53\x15\x34\xf7\xda"
"\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
"\x1c\x3c\x0c\x95\x95\x68\x09\x53"
"\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
"\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
"\xba\x63\x7b\x39";
static const u8 __initconst ctext11[76] =
"\x39\x80\xca\x0b\x3c\x00\xe8\x41"
"\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
"\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
"\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
"\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
"\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
"\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
"\xcc\xda\x27\x10"
"\x25\x19\x49\x8e\x80\xf1\x47\x8f"
"\x37\xba\x55\xbd\x6d\x27\x61\x8c";
static const u8 __initconst ptext12[719] =
"\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
"\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
"\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
"\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
"\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
"\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
"\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
"\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
"\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
"\x00\xa4\x43\x54\x04\x54\x9b\x3b"
"\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
"\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
"\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
"\x35\x23\xf4\x20\x41\xd4\xad\x82"
"\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
"\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
"\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
"\xad\x49\x3a\xae\x98\xce\xa6\x66"
"\x10\x30\x90\x8c\x55\x83\xd7\x7c"
"\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
"\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
"\x57\xcc\x89\x09\x75\x9b\x78\x70"
"\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
"\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
"\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
"\x82\xd9\xec\xa2\x87\x68\x55\xf9"
"\x8f\x9e\x73\x43\x47\x6a\x08\x36"
"\x93\x67\xa8\x2d\xde\xac\x41\xa9"
"\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
"\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
"\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
"\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
"\xab\x19\x37\xd9\xba\x76\x5e\xd2"
"\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
"\x02\x66\x49\xca\x7c\x91\x05\xf2"
"\x45\x36\x1e\xf5\x77\xad\x1f\x46"
"\xa8\x13\xfb\x63\xb6\x08\x99\x63"
"\x82\xa2\xed\xb3\xac\xdf\x43\x19"
"\x45\xea\x78\x73\xd9\xb7\x39\x11"
"\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
"\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
"\xa4\x47\x7d\x80\x20\x26\xfd\x63"
"\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
"\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
"\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
"\x54\x03\xa4\x09\x0c\x37\x7a\x15"
"\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
"\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
"\x03\x25\x3c\x8d\x48\x58\x71\x34"
"\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
"\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
"\x80\x6f\x96\x0e\x05\x62\xc7\x78"
"\x87\x79\x60\x38\x46\xb4\x25\x57"
"\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
"\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
"\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
"\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
"\x09\xfc\x91\x96\x47\x87\x4f\x1a"
"\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
"\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
"\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
"\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
"\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
"\x0b\x63\xde\x87\x42\x79\x8a\x68"
"\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
"\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
"\xe9\x83\x84\xcb\x28\x69\x09\x69"
"\xce\x99\x46\x00\x54\xcb\xd8\x38"
"\xf9\x53\x4a\xbf\x31\xce\x57\x15"
"\x33\xfa\x96\x04\x33\x42\xe3\xc0"
"\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
"\x19\x95\xd0\x0e\x82\x07\x63\xf9"
"\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
"\xb5\x9f\x23\x28\x60\xe7\x20\x51"
"\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
"\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
"\x78\xc6\x91\x22\x40\x91\x80\xbe"
"\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
"\x67\x10\xa4\x83\x98\x79\x23\xe7"
"\x92\xda\xa9\x22\x16\xb1\xe7\x78"
"\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
"\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
"\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
"\x48\x11\x06\xbb\x2d\xf2\x63\x88"
"\x3f\x73\x09\xe2\x45\x56\x31\x51"
"\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
"\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
"\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
"\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
"\xa4\x78\xdb\x74\x3d\x8b\xb5";
static const u8 __initconst ctext12[735] =
"\x84\x0b\xdb\xd5\xb7\xa8\xfe\x20"
"\xbb\xb1\x12\x7f\x41\xea\xb3\xc0"
"\xa2\xb4\x37\x19\x11\x58\xb6\x0b"
"\x4c\x1d\x38\x05\x54\xd1\x16\x73"
"\x8e\x1c\x20\x90\xa2\x9a\xb7\x74"
"\x47\xe6\xd8\xfc\x18\x3a\xb4\xea"
"\xd5\x16\x5a\x2c\x53\x01\x46\xb3"
"\x18\x33\x74\x6c\x50\xf2\xe8\xc0"
"\x73\xda\x60\x22\xeb\xe3\xe5\x9b"
"\x20\x93\x6c\x4b\x37\x99\xb8\x23"
"\x3b\x4e\xac\xe8\x5b\xe8\x0f\xb7"
"\xc3\x8f\xfb\x4a\x37\xd9\x39\x95"
"\x34\xf1\xdb\x8f\x71\xd9\xc7\x0b"
"\x02\xf1\x63\xfc\x9b\xfc\xc5\xab"
"\xb9\x14\x13\x21\xdf\xce\xaa\x88"
"\x44\x30\x1e\xce\x26\x01\x92\xf8"
"\x9f\x00\x4b\x0c\x4b\xf7\x5f\xe0"
"\x89\xca\x94\x66\x11\x21\x97\xca"
"\x3e\x83\x74\x2d\xdb\x4d\x11\xeb"
"\x97\xc2\x14\xff\x9e\x1e\xa0\x6b"
"\x08\xb4\x31\x2b\x85\xc6\x85\x6c"
"\x90\xec\x39\xc0\xec\xb3\xb5\x4e"
"\xf3\x9c\xe7\x83\x3a\x77\x0a\xf4"
"\x56\xfe\xce\x18\x33\x6d\x0b\x2d"
"\x33\xda\xc8\x05\x5c\xb4\x09\x2a"
"\xde\x6b\x52\x98\x01\xef\x36\x3d"
"\xbd\xf9\x8f\xa8\x3e\xaa\xcd\xd1"
"\x01\x2d\x42\x49\xc3\xb6\x84\xbb"
"\x48\x96\xe0\x90\x93\x6c\x48\x64"
"\xd4\xfa\x7f\x93\x2c\xa6\x21\xc8"
"\x7a\x23\x7b\xaa\x20\x56\x12\xae"
"\x16\x9d\x94\x0f\x54\xa1\xec\xca"
"\x51\x4e\xf2\x39\xf4\xf8\x5f\x04"
"\x5a\x0d\xbf\xf5\x83\xa1\x15\xe1"
"\xf5\x3c\xd8\x62\xa3\xed\x47\x89"
"\x85\x4c\xe5\xdb\xac\x9e\x17\x1d"
"\x0c\x09\xe3\x3e\x39\x5b\x4d\x74"
"\x0e\xf5\x34\xee\x70\x11\x4c\xfd"
"\xdb\x34\xb1\xb5\x10\x3f\x73\xb7"
"\xf5\xfa\xed\xb0\x1f\xa5\xcd\x3c"
"\x8d\x35\x83\xd4\x11\x44\x6e\x6c"
"\x5b\xe0\x0e\x69\xa5\x39\xe5\xbb"
"\xa9\x57\x24\x37\xe6\x1f\xdd\xcf"
"\x16\x2a\x13\xf9\x6a\x2d\x90\xa0"
"\x03\x60\x7a\xed\x69\xd5\x00\x8b"
"\x7e\x4f\xcb\xb9\xfa\x91\xb9\x37"
"\xc1\x26\xce\x90\x97\x22\x64\x64"
"\xc1\x72\x43\x1b\xf6\xac\xc1\x54"
"\x8a\x10\x9c\xdd\x8d\xd5\x8e\xb2"
"\xe4\x85\xda\xe0\x20\x5f\xf4\xb4"
"\x15\xb5\xa0\x8d\x12\x74\x49\x23"
"\x3a\xdf\x4a\xd3\xf0\x3b\x89\xeb"
"\xf8\xcc\x62\x7b\xfb\x93\x07\x41"
"\x61\x26\x94\x58\x70\xa6\x3c\xe4"
"\xff\x58\xc4\x13\x3d\xcb\x36\x6b"
"\x32\xe5\xb2\x6d\x03\x74\x6f\x76"
"\x93\x77\xde\x48\xc4\xfa\x30\x4a"
"\xda\x49\x80\x77\x0f\x1c\xbe\x11"
"\xc8\x48\xb1\xe5\xbb\xf2\x8a\xe1"
"\x96\x2f\x9f\xd1\x8e\x8a\x5c\xe2"
"\xf7\xd7\xd8\x54\xf3\x3f\xc4\x91"
"\xb8\xfb\x86\xdc\x46\x24\x91\x60"
"\x6c\x2f\xc9\x41\x37\x51\x49\x54"
"\x09\x81\x21\xf3\x03\x9f\x2b\xe3"
"\x1f\x39\x63\xaf\xf4\xd7\x53\x60"
"\xa7\xc7\x54\xf9\xee\xb1\xb1\x7d"
"\x75\x54\x65\x93\xfe\xb1\x68\x6b"
"\x57\x02\xf9\xbb\x0e\xf9\xf8\xbf"
"\x01\x12\x27\xb4\xfe\xe4\x79\x7a"
"\x40\x5b\x51\x4b\xdf\x38\xec\xb1"
"\x6a\x56\xff\x35\x4d\x42\x33\xaa"
"\x6f\x1b\xe4\xdc\xe0\xdb\x85\x35"
"\x62\x10\xd4\xec\xeb\xc5\x7e\x45"
"\x1c\x6f\x17\xca\x3b\x8e\x2d\x66"
"\x4f\x4b\x36\x56\xcd\x1b\x59\xaa"
"\xd2\x9b\x17\xb9\x58\xdf\x7b\x64"
"\x8a\xff\x3b\x9c\xa6\xb5\x48\x9e"
"\xaa\xe2\x5d\x09\x71\x32\x5f\xb6"
"\x29\xbe\xe7\xc7\x52\x7e\x91\x82"
"\x6b\x6d\x33\xe1\x34\x06\x36\x21"
"\x5e\xbe\x1e\x2f\x3e\xc1\xfb\xea"
"\x49\x2c\xb5\xca\xf7\xb0\x37\xea"
"\x1f\xed\x10\x04\xd9\x48\x0d\x1a"
"\x1c\xfb\xe7\x84\x0e\x83\x53\x74"
"\xc7\x65\xe2\x5c\xe5\xba\x73\x4c"
"\x0e\xe1\xb5\x11\x45\x61\x43\x46"
"\xaa\x25\x8f\xbd\x85\x08\xfa\x4c"
"\x15\xc1\xc0\xd8\xf5\xdc\x16\xbb"
"\x7b\x1d\xe3\x87\x57\xa7\x2a\x1d"
"\x38\x58\x9e\x8a\x43\xdc\x57"
"\xd1\x81\x7d\x2b\xe9\xff\x99\x3a"
"\x4b\x24\x52\x58\x55\xe1\x49\x14";
static struct {
const u8 *ptext;
const u8 *ctext;
u8 key[AES_MAX_KEY_SIZE];
u8 iv[GCM_AES_IV_SIZE];
u8 assoc[20];
int klen;
int clen;
int plen;
int alen;
} const aesgcm_tv[] __initconst = {
{ /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
.klen = 16,
.ctext = ctext0,
.clen = sizeof(ctext0),
}, {
.klen = 16,
.ptext = ptext1,
.plen = sizeof(ptext1),
.ctext = ctext1,
.clen = sizeof(ctext1),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 16,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext2,
.plen = sizeof(ptext2),
.ctext = ctext2,
.clen = sizeof(ctext2),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 16,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext3,
.plen = sizeof(ptext3),
.assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xab\xad\xda\xd2",
.alen = 20,
.ctext = ctext3,
.clen = sizeof(ctext3),
}, {
.klen = 24,
.ctext = ctext4,
.clen = sizeof(ctext4),
}, {
.klen = 24,
.ptext = ptext1,
.plen = sizeof(ptext1),
.ctext = ctext5,
.clen = sizeof(ctext5),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\xfe\xff\xe9\x92\x86\x65\x73\x1c",
.klen = 24,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext6,
.plen = sizeof(ptext6),
.ctext = ctext6,
.clen = sizeof(ctext6),
}, {
.klen = 32,
.ctext = ctext7,
.clen = sizeof(ctext7),
}, {
.klen = 32,
.ptext = ptext1,
.plen = sizeof(ptext1),
.ctext = ctext8,
.clen = sizeof(ctext8),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 32,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext9,
.plen = sizeof(ptext9),
.ctext = ctext9,
.clen = sizeof(ctext9),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08",
.klen = 32,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext10,
.plen = sizeof(ptext10),
.assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xab\xad\xda\xd2",
.alen = 20,
.ctext = ctext10,
.clen = sizeof(ctext10),
}, {
.key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
"\x6d\x6a\x8f\x94\x67\x30\x83\x08"
"\xfe\xff\xe9\x92\x86\x65\x73\x1c",
.klen = 24,
.iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
"\xde\xca\xf8\x88",
.ptext = ptext11,
.plen = sizeof(ptext11),
.assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xfe\xed\xfa\xce\xde\xad\xbe\xef"
"\xab\xad\xda\xd2",
.alen = 20,
.ctext = ctext11,
.clen = sizeof(ctext11),
}, {
.key = "\x62\x35\xf8\x95\xfc\xa5\xeb\xf6"
"\x0e\x92\x12\x04\xd3\xa1\x3f\x2e"
"\x8b\x32\xcf\xe7\x44\xed\x13\x59"
"\x04\x38\x77\xb0\xb9\xad\xb4\x38",
.klen = 32,
.iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
"\xff\xff\x00\xff",
.ptext = ptext12,
.plen = sizeof(ptext12),
.ctext = ctext12,
.clen = sizeof(ctext12),
}
};
static int __init libaesgcm_init(void)
{
for (int i = 0; i < ARRAY_SIZE(aesgcm_tv); i++) {
u8 tagbuf[AES_BLOCK_SIZE];
int plen = aesgcm_tv[i].plen;
struct aesgcm_ctx ctx;
u8 buf[sizeof(ptext12)];
if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
aesgcm_tv[i].clen - plen)) {
pr_err("aesgcm_expandkey() failed on vector %d\n", i);
return -ENODEV;
}
if (!aesgcm_decrypt(&ctx, buf, aesgcm_tv[i].ctext, plen,
aesgcm_tv[i].assoc, aesgcm_tv[i].alen,
aesgcm_tv[i].iv, aesgcm_tv[i].ctext + plen)
|| memcmp(buf, aesgcm_tv[i].ptext, plen)) {
pr_err("aesgcm_decrypt() #1 failed on vector %d\n", i);
return -ENODEV;
}
/* encrypt in place */
aesgcm_encrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf);
if (memcmp(buf, aesgcm_tv[i].ctext, plen)) {
pr_err("aesgcm_encrypt() failed on vector %d\n", i);
return -ENODEV;
}
/* decrypt in place */
if (!aesgcm_decrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf)
|| memcmp(buf, aesgcm_tv[i].ptext, plen)) {
pr_err("aesgcm_decrypt() #2 failed on vector %d\n", i);
return -ENODEV;
}
}
return 0;
}
module_init(libaesgcm_init);
static void __exit libaesgcm_exit(void)
{
}
module_exit(libaesgcm_exit);
#endif
| linux-master | lib/crypto/aesgcm.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2016 The fiat-crypto Authors.
* Copyright (C) 2018-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is a machine-generated formally verified implementation of Curve25519
* ECDH from: <https://github.com/mit-plv/fiat-crypto>. Though originally
* machine generated, it has been tweaked to be suitable for use in the kernel.
* It is optimized for 32-bit machines and machines that cannot work efficiently
* with 128-bit integer types.
*/
#include <asm/unaligned.h>
#include <crypto/curve25519.h>
#include <linux/string.h>
/* fe means field element. Here the field is \Z/(2^255-19). An element t,
* entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
* t[3]+2^102 t[4]+...+2^230 t[9].
* fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc.
* Multiplication and carrying produce fe from fe_loose.
*/
typedef struct fe { u32 v[10]; } fe;
/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc
* Addition and subtraction produce fe_loose from (fe, fe).
*/
typedef struct fe_loose { u32 v[10]; } fe_loose;
static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s)
{
/* Ignores top bit of s. */
u32 a0 = get_unaligned_le32(s);
u32 a1 = get_unaligned_le32(s+4);
u32 a2 = get_unaligned_le32(s+8);
u32 a3 = get_unaligned_le32(s+12);
u32 a4 = get_unaligned_le32(s+16);
u32 a5 = get_unaligned_le32(s+20);
u32 a6 = get_unaligned_le32(s+24);
u32 a7 = get_unaligned_le32(s+28);
h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */
h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */
h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */
h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */
h[4] = (a3>> 6); /* (32- 6) = 26 */
h[5] = a4&((1<<25)-1); /* 25 */
h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */
h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */
h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */
h[9] = (a7>> 6)&((1<<25)-1); /* 25 */
}
static __always_inline void fe_frombytes(fe *h, const u8 *s)
{
fe_frombytes_impl(h->v, s);
}
static __always_inline u8 /*bool*/
addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
{
/* This function extracts 25 bits of result and 1 bit of carry
* (26 total), so a 32-bit intermediate is sufficient.
*/
u32 x = a + b + c;
*low = x & ((1 << 25) - 1);
return (x >> 25) & 1;
}
static __always_inline u8 /*bool*/
addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
{
/* This function extracts 26 bits of result and 1 bit of carry
* (27 total), so a 32-bit intermediate is sufficient.
*/
u32 x = a + b + c;
*low = x & ((1 << 26) - 1);
return (x >> 26) & 1;
}
static __always_inline u8 /*bool*/
subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
{
/* This function extracts 25 bits of result and 1 bit of borrow
* (26 total), so a 32-bit intermediate is sufficient.
*/
u32 x = a - b - c;
*low = x & ((1 << 25) - 1);
return x >> 31;
}
static __always_inline u8 /*bool*/
subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low)
{
/* This function extracts 26 bits of result and 1 bit of borrow
*(27 total), so a 32-bit intermediate is sufficient.
*/
u32 x = a - b - c;
*low = x & ((1 << 26) - 1);
return x >> 31;
}
static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz)
{
t = -!!t; /* all set if nonzero, 0 if 0 */
return (t&nz) | ((~t)&z);
}
static __always_inline void fe_freeze(u32 out[10], const u32 in1[10])
{
{ const u32 x17 = in1[9];
{ const u32 x18 = in1[8];
{ const u32 x16 = in1[7];
{ const u32 x14 = in1[6];
{ const u32 x12 = in1[5];
{ const u32 x10 = in1[4];
{ const u32 x8 = in1[3];
{ const u32 x6 = in1[2];
{ const u32 x4 = in1[1];
{ const u32 x2 = in1[0];
{ u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20);
{ u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23);
{ u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26);
{ u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29);
{ u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32);
{ u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35);
{ u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38);
{ u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41);
{ u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44);
{ u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47);
{ u32 x49 = cmovznz32(x48, 0x0, 0xffffffff);
{ u32 x50 = (x49 & 0x3ffffed);
{ u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52);
{ u32 x54 = (x49 & 0x1ffffff);
{ u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56);
{ u32 x58 = (x49 & 0x3ffffff);
{ u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60);
{ u32 x62 = (x49 & 0x1ffffff);
{ u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64);
{ u32 x66 = (x49 & 0x3ffffff);
{ u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68);
{ u32 x70 = (x49 & 0x1ffffff);
{ u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72);
{ u32 x74 = (x49 & 0x3ffffff);
{ u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76);
{ u32 x78 = (x49 & 0x1ffffff);
{ u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80);
{ u32 x82 = (x49 & 0x3ffffff);
{ u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84);
{ u32 x86 = (x49 & 0x1ffffff);
{ u32 x88; addcarryx_u25(x85, x47, x86, &x88);
out[0] = x52;
out[1] = x56;
out[2] = x60;
out[3] = x64;
out[4] = x68;
out[5] = x72;
out[6] = x76;
out[7] = x80;
out[8] = x84;
out[9] = x88;
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
static __always_inline void fe_tobytes(u8 s[32], const fe *f)
{
u32 h[10];
fe_freeze(h, f->v);
s[0] = h[0] >> 0;
s[1] = h[0] >> 8;
s[2] = h[0] >> 16;
s[3] = (h[0] >> 24) | (h[1] << 2);
s[4] = h[1] >> 6;
s[5] = h[1] >> 14;
s[6] = (h[1] >> 22) | (h[2] << 3);
s[7] = h[2] >> 5;
s[8] = h[2] >> 13;
s[9] = (h[2] >> 21) | (h[3] << 5);
s[10] = h[3] >> 3;
s[11] = h[3] >> 11;
s[12] = (h[3] >> 19) | (h[4] << 6);
s[13] = h[4] >> 2;
s[14] = h[4] >> 10;
s[15] = h[4] >> 18;
s[16] = h[5] >> 0;
s[17] = h[5] >> 8;
s[18] = h[5] >> 16;
s[19] = (h[5] >> 24) | (h[6] << 1);
s[20] = h[6] >> 7;
s[21] = h[6] >> 15;
s[22] = (h[6] >> 23) | (h[7] << 3);
s[23] = h[7] >> 5;
s[24] = h[7] >> 13;
s[25] = (h[7] >> 21) | (h[8] << 4);
s[26] = h[8] >> 4;
s[27] = h[8] >> 12;
s[28] = (h[8] >> 20) | (h[9] << 6);
s[29] = h[9] >> 2;
s[30] = h[9] >> 10;
s[31] = h[9] >> 18;
}
/* h = f */
static __always_inline void fe_copy(fe *h, const fe *f)
{
memmove(h, f, sizeof(u32) * 10);
}
static __always_inline void fe_copy_lt(fe_loose *h, const fe *f)
{
memmove(h, f, sizeof(u32) * 10);
}
/* h = 0 */
static __always_inline void fe_0(fe *h)
{
memset(h, 0, sizeof(u32) * 10);
}
/* h = 1 */
static __always_inline void fe_1(fe *h)
{
memset(h, 0, sizeof(u32) * 10);
h->v[0] = 1;
}
static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
{ const u32 x21 = in1[8];
{ const u32 x19 = in1[7];
{ const u32 x17 = in1[6];
{ const u32 x15 = in1[5];
{ const u32 x13 = in1[4];
{ const u32 x11 = in1[3];
{ const u32 x9 = in1[2];
{ const u32 x7 = in1[1];
{ const u32 x5 = in1[0];
{ const u32 x38 = in2[9];
{ const u32 x39 = in2[8];
{ const u32 x37 = in2[7];
{ const u32 x35 = in2[6];
{ const u32 x33 = in2[5];
{ const u32 x31 = in2[4];
{ const u32 x29 = in2[3];
{ const u32 x27 = in2[2];
{ const u32 x25 = in2[1];
{ const u32 x23 = in2[0];
out[0] = (x5 + x23);
out[1] = (x7 + x25);
out[2] = (x9 + x27);
out[3] = (x11 + x29);
out[4] = (x13 + x31);
out[5] = (x15 + x33);
out[6] = (x17 + x35);
out[7] = (x19 + x37);
out[8] = (x21 + x39);
out[9] = (x20 + x38);
}}}}}}}}}}}}}}}}}}}}
}
/* h = f + g
* Can overlap h with f or g.
*/
static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g)
{
fe_add_impl(h->v, f->v, g->v);
}
static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
{ const u32 x21 = in1[8];
{ const u32 x19 = in1[7];
{ const u32 x17 = in1[6];
{ const u32 x15 = in1[5];
{ const u32 x13 = in1[4];
{ const u32 x11 = in1[3];
{ const u32 x9 = in1[2];
{ const u32 x7 = in1[1];
{ const u32 x5 = in1[0];
{ const u32 x38 = in2[9];
{ const u32 x39 = in2[8];
{ const u32 x37 = in2[7];
{ const u32 x35 = in2[6];
{ const u32 x33 = in2[5];
{ const u32 x31 = in2[4];
{ const u32 x29 = in2[3];
{ const u32 x27 = in2[2];
{ const u32 x25 = in2[1];
{ const u32 x23 = in2[0];
out[0] = ((0x7ffffda + x5) - x23);
out[1] = ((0x3fffffe + x7) - x25);
out[2] = ((0x7fffffe + x9) - x27);
out[3] = ((0x3fffffe + x11) - x29);
out[4] = ((0x7fffffe + x13) - x31);
out[5] = ((0x3fffffe + x15) - x33);
out[6] = ((0x7fffffe + x17) - x35);
out[7] = ((0x3fffffe + x19) - x37);
out[8] = ((0x7fffffe + x21) - x39);
out[9] = ((0x3fffffe + x20) - x38);
}}}}}}}}}}}}}}}}}}}}
}
/* h = f - g
* Can overlap h with f or g.
*/
static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g)
{
fe_sub_impl(h->v, f->v, g->v);
}
static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10])
{
{ const u32 x20 = in1[9];
{ const u32 x21 = in1[8];
{ const u32 x19 = in1[7];
{ const u32 x17 = in1[6];
{ const u32 x15 = in1[5];
{ const u32 x13 = in1[4];
{ const u32 x11 = in1[3];
{ const u32 x9 = in1[2];
{ const u32 x7 = in1[1];
{ const u32 x5 = in1[0];
{ const u32 x38 = in2[9];
{ const u32 x39 = in2[8];
{ const u32 x37 = in2[7];
{ const u32 x35 = in2[6];
{ const u32 x33 = in2[5];
{ const u32 x31 = in2[4];
{ const u32 x29 = in2[3];
{ const u32 x27 = in2[2];
{ const u32 x25 = in2[1];
{ const u32 x23 = in2[0];
{ u64 x40 = ((u64)x23 * x5);
{ u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
{ u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
{ u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
{ u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
{ u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
{ u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
{ u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
{ u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
{ u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
{ u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
{ u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
{ u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
{ u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
{ u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
{ u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
{ u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
{ u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
{ u64 x58 = ((u64)(0x2 * x38) * x20);
{ u64 x59 = (x48 + (x58 << 0x4));
{ u64 x60 = (x59 + (x58 << 0x1));
{ u64 x61 = (x60 + x58);
{ u64 x62 = (x47 + (x57 << 0x4));
{ u64 x63 = (x62 + (x57 << 0x1));
{ u64 x64 = (x63 + x57);
{ u64 x65 = (x46 + (x56 << 0x4));
{ u64 x66 = (x65 + (x56 << 0x1));
{ u64 x67 = (x66 + x56);
{ u64 x68 = (x45 + (x55 << 0x4));
{ u64 x69 = (x68 + (x55 << 0x1));
{ u64 x70 = (x69 + x55);
{ u64 x71 = (x44 + (x54 << 0x4));
{ u64 x72 = (x71 + (x54 << 0x1));
{ u64 x73 = (x72 + x54);
{ u64 x74 = (x43 + (x53 << 0x4));
{ u64 x75 = (x74 + (x53 << 0x1));
{ u64 x76 = (x75 + x53);
{ u64 x77 = (x42 + (x52 << 0x4));
{ u64 x78 = (x77 + (x52 << 0x1));
{ u64 x79 = (x78 + x52);
{ u64 x80 = (x41 + (x51 << 0x4));
{ u64 x81 = (x80 + (x51 << 0x1));
{ u64 x82 = (x81 + x51);
{ u64 x83 = (x40 + (x50 << 0x4));
{ u64 x84 = (x83 + (x50 << 0x1));
{ u64 x85 = (x84 + x50);
{ u64 x86 = (x85 >> 0x1a);
{ u32 x87 = ((u32)x85 & 0x3ffffff);
{ u64 x88 = (x86 + x82);
{ u64 x89 = (x88 >> 0x19);
{ u32 x90 = ((u32)x88 & 0x1ffffff);
{ u64 x91 = (x89 + x79);
{ u64 x92 = (x91 >> 0x1a);
{ u32 x93 = ((u32)x91 & 0x3ffffff);
{ u64 x94 = (x92 + x76);
{ u64 x95 = (x94 >> 0x19);
{ u32 x96 = ((u32)x94 & 0x1ffffff);
{ u64 x97 = (x95 + x73);
{ u64 x98 = (x97 >> 0x1a);
{ u32 x99 = ((u32)x97 & 0x3ffffff);
{ u64 x100 = (x98 + x70);
{ u64 x101 = (x100 >> 0x19);
{ u32 x102 = ((u32)x100 & 0x1ffffff);
{ u64 x103 = (x101 + x67);
{ u64 x104 = (x103 >> 0x1a);
{ u32 x105 = ((u32)x103 & 0x3ffffff);
{ u64 x106 = (x104 + x64);
{ u64 x107 = (x106 >> 0x19);
{ u32 x108 = ((u32)x106 & 0x1ffffff);
{ u64 x109 = (x107 + x61);
{ u64 x110 = (x109 >> 0x1a);
{ u32 x111 = ((u32)x109 & 0x3ffffff);
{ u64 x112 = (x110 + x49);
{ u64 x113 = (x112 >> 0x19);
{ u32 x114 = ((u32)x112 & 0x1ffffff);
{ u64 x115 = (x87 + (0x13 * x113));
{ u32 x116 = (u32) (x115 >> 0x1a);
{ u32 x117 = ((u32)x115 & 0x3ffffff);
{ u32 x118 = (x116 + x90);
{ u32 x119 = (x118 >> 0x19);
{ u32 x120 = (x118 & 0x1ffffff);
out[0] = x117;
out[1] = x120;
out[2] = (x119 + x93);
out[3] = x96;
out[4] = x99;
out[5] = x102;
out[6] = x105;
out[7] = x108;
out[8] = x111;
out[9] = x114;
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g)
{
fe_mul_impl(h->v, f->v, g->v);
}
static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g)
{
fe_mul_impl(h->v, f->v, g->v);
}
static __always_inline void
fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g)
{
fe_mul_impl(h->v, f->v, g->v);
}
static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10])
{
{ const u32 x17 = in1[9];
{ const u32 x18 = in1[8];
{ const u32 x16 = in1[7];
{ const u32 x14 = in1[6];
{ const u32 x12 = in1[5];
{ const u32 x10 = in1[4];
{ const u32 x8 = in1[3];
{ const u32 x6 = in1[2];
{ const u32 x4 = in1[1];
{ const u32 x2 = in1[0];
{ u64 x19 = ((u64)x2 * x2);
{ u64 x20 = ((u64)(0x2 * x2) * x4);
{ u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6)));
{ u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8)));
{ u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10));
{ u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12)));
{ u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12)));
{ u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16)));
{ u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12))))));
{ u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17)));
{ u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17)))));
{ u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17)));
{ u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17))))));
{ u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17)));
{ u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17)));
{ u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17)));
{ u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17));
{ u64 x36 = ((u64)(0x2 * x18) * x17);
{ u64 x37 = ((u64)(0x2 * x17) * x17);
{ u64 x38 = (x27 + (x37 << 0x4));
{ u64 x39 = (x38 + (x37 << 0x1));
{ u64 x40 = (x39 + x37);
{ u64 x41 = (x26 + (x36 << 0x4));
{ u64 x42 = (x41 + (x36 << 0x1));
{ u64 x43 = (x42 + x36);
{ u64 x44 = (x25 + (x35 << 0x4));
{ u64 x45 = (x44 + (x35 << 0x1));
{ u64 x46 = (x45 + x35);
{ u64 x47 = (x24 + (x34 << 0x4));
{ u64 x48 = (x47 + (x34 << 0x1));
{ u64 x49 = (x48 + x34);
{ u64 x50 = (x23 + (x33 << 0x4));
{ u64 x51 = (x50 + (x33 << 0x1));
{ u64 x52 = (x51 + x33);
{ u64 x53 = (x22 + (x32 << 0x4));
{ u64 x54 = (x53 + (x32 << 0x1));
{ u64 x55 = (x54 + x32);
{ u64 x56 = (x21 + (x31 << 0x4));
{ u64 x57 = (x56 + (x31 << 0x1));
{ u64 x58 = (x57 + x31);
{ u64 x59 = (x20 + (x30 << 0x4));
{ u64 x60 = (x59 + (x30 << 0x1));
{ u64 x61 = (x60 + x30);
{ u64 x62 = (x19 + (x29 << 0x4));
{ u64 x63 = (x62 + (x29 << 0x1));
{ u64 x64 = (x63 + x29);
{ u64 x65 = (x64 >> 0x1a);
{ u32 x66 = ((u32)x64 & 0x3ffffff);
{ u64 x67 = (x65 + x61);
{ u64 x68 = (x67 >> 0x19);
{ u32 x69 = ((u32)x67 & 0x1ffffff);
{ u64 x70 = (x68 + x58);
{ u64 x71 = (x70 >> 0x1a);
{ u32 x72 = ((u32)x70 & 0x3ffffff);
{ u64 x73 = (x71 + x55);
{ u64 x74 = (x73 >> 0x19);
{ u32 x75 = ((u32)x73 & 0x1ffffff);
{ u64 x76 = (x74 + x52);
{ u64 x77 = (x76 >> 0x1a);
{ u32 x78 = ((u32)x76 & 0x3ffffff);
{ u64 x79 = (x77 + x49);
{ u64 x80 = (x79 >> 0x19);
{ u32 x81 = ((u32)x79 & 0x1ffffff);
{ u64 x82 = (x80 + x46);
{ u64 x83 = (x82 >> 0x1a);
{ u32 x84 = ((u32)x82 & 0x3ffffff);
{ u64 x85 = (x83 + x43);
{ u64 x86 = (x85 >> 0x19);
{ u32 x87 = ((u32)x85 & 0x1ffffff);
{ u64 x88 = (x86 + x40);
{ u64 x89 = (x88 >> 0x1a);
{ u32 x90 = ((u32)x88 & 0x3ffffff);
{ u64 x91 = (x89 + x28);
{ u64 x92 = (x91 >> 0x19);
{ u32 x93 = ((u32)x91 & 0x1ffffff);
{ u64 x94 = (x66 + (0x13 * x92));
{ u32 x95 = (u32) (x94 >> 0x1a);
{ u32 x96 = ((u32)x94 & 0x3ffffff);
{ u32 x97 = (x95 + x69);
{ u32 x98 = (x97 >> 0x19);
{ u32 x99 = (x97 & 0x1ffffff);
out[0] = x96;
out[1] = x99;
out[2] = (x98 + x72);
out[3] = x75;
out[4] = x78;
out[5] = x81;
out[6] = x84;
out[7] = x87;
out[8] = x90;
out[9] = x93;
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
static __always_inline void fe_sq_tl(fe *h, const fe_loose *f)
{
fe_sqr_impl(h->v, f->v);
}
static __always_inline void fe_sq_tt(fe *h, const fe *f)
{
fe_sqr_impl(h->v, f->v);
}
static __always_inline void fe_loose_invert(fe *out, const fe_loose *z)
{
fe t0;
fe t1;
fe t2;
fe t3;
int i;
fe_sq_tl(&t0, z);
fe_sq_tt(&t1, &t0);
for (i = 1; i < 2; ++i)
fe_sq_tt(&t1, &t1);
fe_mul_tlt(&t1, z, &t1);
fe_mul_ttt(&t0, &t0, &t1);
fe_sq_tt(&t2, &t0);
fe_mul_ttt(&t1, &t1, &t2);
fe_sq_tt(&t2, &t1);
for (i = 1; i < 5; ++i)
fe_sq_tt(&t2, &t2);
fe_mul_ttt(&t1, &t2, &t1);
fe_sq_tt(&t2, &t1);
for (i = 1; i < 10; ++i)
fe_sq_tt(&t2, &t2);
fe_mul_ttt(&t2, &t2, &t1);
fe_sq_tt(&t3, &t2);
for (i = 1; i < 20; ++i)
fe_sq_tt(&t3, &t3);
fe_mul_ttt(&t2, &t3, &t2);
fe_sq_tt(&t2, &t2);
for (i = 1; i < 10; ++i)
fe_sq_tt(&t2, &t2);
fe_mul_ttt(&t1, &t2, &t1);
fe_sq_tt(&t2, &t1);
for (i = 1; i < 50; ++i)
fe_sq_tt(&t2, &t2);
fe_mul_ttt(&t2, &t2, &t1);
fe_sq_tt(&t3, &t2);
for (i = 1; i < 100; ++i)
fe_sq_tt(&t3, &t3);
fe_mul_ttt(&t2, &t3, &t2);
fe_sq_tt(&t2, &t2);
for (i = 1; i < 50; ++i)
fe_sq_tt(&t2, &t2);
fe_mul_ttt(&t1, &t2, &t1);
fe_sq_tt(&t1, &t1);
for (i = 1; i < 5; ++i)
fe_sq_tt(&t1, &t1);
fe_mul_ttt(out, &t1, &t0);
}
static __always_inline void fe_invert(fe *out, const fe *z)
{
fe_loose l;
fe_copy_lt(&l, z);
fe_loose_invert(out, &l);
}
/* Replace (f,g) with (g,f) if b == 1;
* replace (f,g) with (f,g) if b == 0.
*
* Preconditions: b in {0,1}
*/
static noinline void fe_cswap(fe *f, fe *g, unsigned int b)
{
unsigned i;
b = 0 - b;
for (i = 0; i < 10; i++) {
u32 x = f->v[i] ^ g->v[i];
x &= b;
f->v[i] ^= x;
g->v[i] ^= x;
}
}
/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/
static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10])
{
{ const u32 x20 = in1[9];
{ const u32 x21 = in1[8];
{ const u32 x19 = in1[7];
{ const u32 x17 = in1[6];
{ const u32 x15 = in1[5];
{ const u32 x13 = in1[4];
{ const u32 x11 = in1[3];
{ const u32 x9 = in1[2];
{ const u32 x7 = in1[1];
{ const u32 x5 = in1[0];
{ const u32 x38 = 0;
{ const u32 x39 = 0;
{ const u32 x37 = 0;
{ const u32 x35 = 0;
{ const u32 x33 = 0;
{ const u32 x31 = 0;
{ const u32 x29 = 0;
{ const u32 x27 = 0;
{ const u32 x25 = 0;
{ const u32 x23 = 121666;
{ u64 x40 = ((u64)x23 * x5);
{ u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5));
{ u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5));
{ u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5));
{ u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5));
{ u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5));
{ u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5));
{ u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5));
{ u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5));
{ u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5));
{ u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9));
{ u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9));
{ u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13));
{ u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13));
{ u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17));
{ u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17));
{ u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19))));
{ u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21));
{ u64 x58 = ((u64)(0x2 * x38) * x20);
{ u64 x59 = (x48 + (x58 << 0x4));
{ u64 x60 = (x59 + (x58 << 0x1));
{ u64 x61 = (x60 + x58);
{ u64 x62 = (x47 + (x57 << 0x4));
{ u64 x63 = (x62 + (x57 << 0x1));
{ u64 x64 = (x63 + x57);
{ u64 x65 = (x46 + (x56 << 0x4));
{ u64 x66 = (x65 + (x56 << 0x1));
{ u64 x67 = (x66 + x56);
{ u64 x68 = (x45 + (x55 << 0x4));
{ u64 x69 = (x68 + (x55 << 0x1));
{ u64 x70 = (x69 + x55);
{ u64 x71 = (x44 + (x54 << 0x4));
{ u64 x72 = (x71 + (x54 << 0x1));
{ u64 x73 = (x72 + x54);
{ u64 x74 = (x43 + (x53 << 0x4));
{ u64 x75 = (x74 + (x53 << 0x1));
{ u64 x76 = (x75 + x53);
{ u64 x77 = (x42 + (x52 << 0x4));
{ u64 x78 = (x77 + (x52 << 0x1));
{ u64 x79 = (x78 + x52);
{ u64 x80 = (x41 + (x51 << 0x4));
{ u64 x81 = (x80 + (x51 << 0x1));
{ u64 x82 = (x81 + x51);
{ u64 x83 = (x40 + (x50 << 0x4));
{ u64 x84 = (x83 + (x50 << 0x1));
{ u64 x85 = (x84 + x50);
{ u64 x86 = (x85 >> 0x1a);
{ u32 x87 = ((u32)x85 & 0x3ffffff);
{ u64 x88 = (x86 + x82);
{ u64 x89 = (x88 >> 0x19);
{ u32 x90 = ((u32)x88 & 0x1ffffff);
{ u64 x91 = (x89 + x79);
{ u64 x92 = (x91 >> 0x1a);
{ u32 x93 = ((u32)x91 & 0x3ffffff);
{ u64 x94 = (x92 + x76);
{ u64 x95 = (x94 >> 0x19);
{ u32 x96 = ((u32)x94 & 0x1ffffff);
{ u64 x97 = (x95 + x73);
{ u64 x98 = (x97 >> 0x1a);
{ u32 x99 = ((u32)x97 & 0x3ffffff);
{ u64 x100 = (x98 + x70);
{ u64 x101 = (x100 >> 0x19);
{ u32 x102 = ((u32)x100 & 0x1ffffff);
{ u64 x103 = (x101 + x67);
{ u64 x104 = (x103 >> 0x1a);
{ u32 x105 = ((u32)x103 & 0x3ffffff);
{ u64 x106 = (x104 + x64);
{ u64 x107 = (x106 >> 0x19);
{ u32 x108 = ((u32)x106 & 0x1ffffff);
{ u64 x109 = (x107 + x61);
{ u64 x110 = (x109 >> 0x1a);
{ u32 x111 = ((u32)x109 & 0x3ffffff);
{ u64 x112 = (x110 + x49);
{ u64 x113 = (x112 >> 0x19);
{ u32 x114 = ((u32)x112 & 0x1ffffff);
{ u64 x115 = (x87 + (0x13 * x113));
{ u32 x116 = (u32) (x115 >> 0x1a);
{ u32 x117 = ((u32)x115 & 0x3ffffff);
{ u32 x118 = (x116 + x90);
{ u32 x119 = (x118 >> 0x19);
{ u32 x120 = (x118 & 0x1ffffff);
out[0] = x117;
out[1] = x120;
out[2] = (x119 + x93);
out[3] = x96;
out[4] = x99;
out[5] = x102;
out[6] = x105;
out[7] = x108;
out[8] = x111;
out[9] = x114;
}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}
}
static __always_inline void fe_mul121666(fe *h, const fe_loose *f)
{
fe_mul_121666_impl(h->v, f->v);
}
void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
const u8 scalar[CURVE25519_KEY_SIZE],
const u8 point[CURVE25519_KEY_SIZE])
{
fe x1, x2, z2, x3, z3;
fe_loose x2l, z2l, x3l;
unsigned swap = 0;
int pos;
u8 e[32];
memcpy(e, scalar, 32);
curve25519_clamp_secret(e);
/* The following implementation was transcribed to Coq and proven to
* correspond to unary scalar multiplication in affine coordinates given
* that x1 != 0 is the x coordinate of some point on the curve. It was
* also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives
* z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was
* quantified over the underlying field, so it applies to Curve25519
* itself and the quadratic twist of Curve25519. It was not proven in
* Coq that prime-field arithmetic correctly simulates extension-field
* arithmetic on prime-field values. The decoding of the byte array
* representation of e was not considered.
*
* Specification of Montgomery curves in affine coordinates:
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Spec/MontgomeryCurve.v#L27>
*
* Proof that these form a group that is isomorphic to a Weierstrass
* curve:
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/AffineProofs.v#L35>
*
* Coq transcription and correctness proof of the loop
* (where scalarbits=255):
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L118>
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L278>
* preconditions: 0 <= e < 2^255 (not necessarily e < order),
* fe_invert(0) = 0
*/
fe_frombytes(&x1, point);
fe_1(&x2);
fe_0(&z2);
fe_copy(&x3, &x1);
fe_1(&z3);
for (pos = 254; pos >= 0; --pos) {
fe tmp0, tmp1;
fe_loose tmp0l, tmp1l;
/* loop invariant as of right before the test, for the case
* where x1 != 0:
* pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3
* is nonzero
* let r := e >> (pos+1) in the following equalities of
* projective points:
* to_xz (r*P) === if swap then (x3, z3) else (x2, z2)
* to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3)
* x1 is the nonzero x coordinate of the nonzero
* point (r*P-(r+1)*P)
*/
unsigned b = 1 & (e[pos / 8] >> (pos & 7));
swap ^= b;
fe_cswap(&x2, &x3, swap);
fe_cswap(&z2, &z3, swap);
swap = b;
/* Coq transcription of ladderstep formula (called from
* transcribed loop):
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZ.v#L89>
* <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L131>
* x1 != 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L217>
* x1 = 0 <https://github.com/mit-plv/fiat-crypto/blob/2456d821825521f7e03e65882cc3521795b0320f/src/Curves/Montgomery/XZProofs.v#L147>
*/
fe_sub(&tmp0l, &x3, &z3);
fe_sub(&tmp1l, &x2, &z2);
fe_add(&x2l, &x2, &z2);
fe_add(&z2l, &x3, &z3);
fe_mul_tll(&z3, &tmp0l, &x2l);
fe_mul_tll(&z2, &z2l, &tmp1l);
fe_sq_tl(&tmp0, &tmp1l);
fe_sq_tl(&tmp1, &x2l);
fe_add(&x3l, &z3, &z2);
fe_sub(&z2l, &z3, &z2);
fe_mul_ttt(&x2, &tmp1, &tmp0);
fe_sub(&tmp1l, &tmp1, &tmp0);
fe_sq_tl(&z2, &z2l);
fe_mul121666(&z3, &tmp1l);
fe_sq_tl(&x3, &x3l);
fe_add(&tmp0l, &tmp0, &z3);
fe_mul_ttt(&z3, &x1, &z2);
fe_mul_tll(&z2, &tmp1l, &tmp0l);
}
/* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3)
* else (x2, z2)
*/
fe_cswap(&x2, &x3, swap);
fe_cswap(&z2, &z3, swap);
fe_invert(&z2, &z2);
fe_mul_ttt(&x2, &x2, &z2);
fe_tobytes(out, &x2);
memzero_explicit(&x1, sizeof(x1));
memzero_explicit(&x2, sizeof(x2));
memzero_explicit(&z2, sizeof(z2));
memzero_explicit(&x3, sizeof(x3));
memzero_explicit(&z3, sizeof(z3));
memzero_explicit(&x2l, sizeof(x2l));
memzero_explicit(&z2l, sizeof(z2l));
memzero_explicit(&x3l, sizeof(x3l));
memzero_explicit(&e, sizeof(e));
}
| linux-master | lib/crypto/curve25519-fiat32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Crypto library utility functions
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <asm/unaligned.h>
#include <crypto/utils.h>
#include <linux/module.h>
/*
* XOR @len bytes from @src1 and @src2 together, writing the result to @dst
* (which may alias one of the sources). Don't call this directly; call
* crypto_xor() or crypto_xor_cpy() instead.
*/
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
int d = (((unsigned long)dst ^ (unsigned long)src1) |
((unsigned long)dst ^ (unsigned long)src2)) &
(size - 1);
relalign = d ? 1 << __ffs(d) : size;
/*
* If we care about alignment, process as many bytes as
* needed to advance dst and src to values whose alignments
* equal their relative alignment. This will allow us to
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
*dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u64 l = get_unaligned((u64 *)src1) ^
get_unaligned((u64 *)src2);
put_unaligned(l, (u64 *)dst);
} else {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
}
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u32 l = get_unaligned((u32 *)src1) ^
get_unaligned((u32 *)src2);
put_unaligned(l, (u32 *)dst);
} else {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
}
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u16 l = get_unaligned((u16 *)src1) ^
get_unaligned((u16 *)src2);
put_unaligned(l, (u16 *)dst);
} else {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
}
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/utils.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include <crypto/curve25519.h>
struct curve25519_test_vector {
u8 private[CURVE25519_KEY_SIZE];
u8 public[CURVE25519_KEY_SIZE];
u8 result[CURVE25519_KEY_SIZE];
bool valid;
};
static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = {
{
.private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d,
0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45,
0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a,
0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a },
.public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4,
0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37,
0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d,
0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f },
.result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
.valid = true
},
{
.private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b,
0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6,
0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd,
0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb },
.public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54,
0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a,
0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4,
0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a },
.result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1,
0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25,
0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33,
0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 },
.valid = true
},
{
.private = { 1 },
.public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64,
0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d,
0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98,
0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f },
.valid = true
},
{
.private = { 1 },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f,
0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d,
0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3,
0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 },
.valid = true
},
{
.private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 },
.public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
.result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
.valid = true
},
{
.private = { 1, 2, 3, 4 },
.public = { 0 },
.result = { 0 },
.valid = false
},
{
.private = { 2, 4, 6, 8 },
.public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 },
.result = { 0 },
.valid = false
},
{
.private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f },
.result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2,
0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57,
0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05,
0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 },
.valid = true
},
{
.private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 },
.result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d,
0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12,
0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99,
0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c },
.valid = true
},
/* wycheproof - normal case */
{
.private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda,
0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66,
0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3,
0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba },
.public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5,
0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9,
0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e,
0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a },
.result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5,
0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38,
0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e,
0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 },
.valid = true
},
/* wycheproof - public key on twist */
{
.private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4,
0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5,
0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49,
0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 },
.public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5,
0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8,
0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3,
0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 },
.result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff,
0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d,
0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe,
0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 },
.valid = true
},
/* wycheproof - public key on twist */
{
.private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9,
0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39,
0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5,
0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 },
.public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f,
0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b,
0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c,
0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 },
.result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53,
0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57,
0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0,
0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b },
.valid = true
},
/* wycheproof - public key on twist */
{
.private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc,
0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d,
0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67,
0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c },
.public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97,
0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f,
0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45,
0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a },
.result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93,
0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2,
0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44,
0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a },
.valid = true
},
/* wycheproof - public key on twist */
{
.private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1,
0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95,
0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99,
0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d },
.public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27,
0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07,
0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae,
0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c },
.result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73,
0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2,
0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f,
0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 },
.valid = true
},
/* wycheproof - public key on twist */
{
.private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9,
0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd,
0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b,
0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 },
.public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5,
0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52,
0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8,
0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 },
.result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86,
0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4,
0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6,
0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 },
.valid = true
},
/* wycheproof - public key = 0 */
{
.private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11,
0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac,
0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b,
0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc },
.public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key = 1 */
{
.private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61,
0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea,
0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f,
0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab },
.public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - edge case on twist */
{
.private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04,
0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77,
0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90,
0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 },
.public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97,
0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9,
0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7,
0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 },
.valid = true
},
/* wycheproof - edge case on twist */
{
.private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36,
0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd,
0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c,
0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 },
.public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e,
0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b,
0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e,
0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 },
.valid = true
},
/* wycheproof - edge case on twist */
{
.private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed,
0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e,
0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd,
0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 },
.public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff,
0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00,
0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 },
.result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f,
0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1,
0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10,
0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b },
.valid = true
},
/* wycheproof - edge case on twist */
{
.private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3,
0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d,
0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00,
0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 },
.public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00,
0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff,
0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f },
.result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8,
0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4,
0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70,
0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b },
.valid = true
},
/* wycheproof - edge case on twist */
{
.private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3,
0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a,
0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e,
0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 },
.public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57,
0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c,
0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59,
0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 },
.valid = true
},
/* wycheproof - edge case on twist */
{
.private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f,
0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42,
0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9,
0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 },
.public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c,
0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5,
0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65,
0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6,
0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4,
0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8,
0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe },
.public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7,
0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca,
0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f,
0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa,
0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3,
0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52,
0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 },
.public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 },
.result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3,
0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e,
0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75,
0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26,
0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea,
0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00,
0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
.result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8,
0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32,
0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87,
0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c,
0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6,
0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb,
0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 },
.public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff,
0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff,
0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff,
0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f },
.result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85,
0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f,
0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0,
0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38,
0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b,
0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c,
0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
.result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b,
0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81,
0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3,
0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d,
0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42,
0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98,
0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 },
.public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f },
.result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c,
0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9,
0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89,
0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 },
.valid = true
},
/* wycheproof - edge case for public key */
{
.private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29,
0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6,
0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c,
0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f },
.public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75,
0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89,
0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c,
0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f },
.valid = true
},
/* wycheproof - public key with low order */
{
.private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30,
0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69,
0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14,
0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 },
.public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3,
0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b,
0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef,
0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 },
.public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20,
0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf,
0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43,
0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 },
.public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f,
0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65,
0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06,
0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 },
.public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe,
0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9,
0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f,
0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f },
.public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8,
0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85,
0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c,
0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c },
.public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8,
0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d,
0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0,
0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 },
.public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a,
0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b,
0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67,
0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 },
.public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46,
0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02,
0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3,
0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 },
.public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30,
0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1,
0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6,
0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe },
.public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f,
0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77,
0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0,
0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c },
.public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key with low order */
{
.private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e,
0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f,
0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77,
0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b },
.public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = false
},
/* wycheproof - public key >= p */
{
.private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc,
0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1,
0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d,
0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae },
.public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09,
0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde,
0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1,
0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81,
0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a,
0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99,
0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d },
.public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17,
0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35,
0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55,
0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11,
0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b,
0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9,
0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 },
.public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53,
0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e,
0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6,
0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78,
0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2,
0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd,
0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb,
0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40,
0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2,
0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9,
0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60,
0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13,
0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 },
.public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
.result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c,
0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3,
0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65,
0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a,
0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7,
0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11,
0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e },
.public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
.result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82,
0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4,
0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c,
0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e,
0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a,
0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d,
0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f },
.public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 },
.result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2,
0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60,
0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25,
0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb,
0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97,
0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c,
0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 },
.public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23,
0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8,
0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69,
0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a,
0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23,
0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b,
0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 },
.public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b,
0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44,
0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37,
0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80,
0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d,
0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b,
0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 },
.public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63,
0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae,
0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f,
0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0,
0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd,
0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49,
0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 },
.public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41,
0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0,
0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf,
0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9,
0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa,
0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5,
0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e },
.public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47,
0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3,
0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b,
0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8,
0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98,
0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0,
0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 },
.public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0,
0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1,
0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a,
0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02,
0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4,
0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68,
0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d },
.public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f,
0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2,
0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95,
0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7,
0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06,
0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9,
0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 },
.public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5,
0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0,
0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80,
0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 },
.valid = true
},
/* wycheproof - public key >= p */
{
.private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd,
0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4,
0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04,
0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 },
.public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0,
0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac,
0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48,
0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 },
.valid = true
},
/* wycheproof - RFC 7748 */
{
.private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d,
0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd,
0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18,
0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 },
.public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb,
0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c,
0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b,
0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c },
.result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90,
0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f,
0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7,
0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 },
.valid = true
},
/* wycheproof - RFC 7748 */
{
.private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c,
0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5,
0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4,
0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d },
.public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3,
0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c,
0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e,
0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 },
.result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d,
0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8,
0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52,
0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde,
0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8,
0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4,
0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 },
.result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d,
0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64,
0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd,
0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 },
.result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8,
0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf,
0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94,
0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d },
.result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84,
0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62,
0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e,
0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 },
.result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8,
0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58,
0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02,
0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 },
.result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9,
0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a,
0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44,
0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b },
.result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd,
0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22,
0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56,
0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b },
.result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53,
0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f,
0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18,
0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f },
.result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55,
0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b,
0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79,
0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f },
.result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39,
0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c,
0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb,
0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e },
.result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04,
0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10,
0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58,
0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c },
.result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3,
0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c,
0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88,
0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 },
.result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a,
0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49,
0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a,
0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 },
.valid = true
},
/* wycheproof - edge case for shared secret */
{
.private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4,
0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3,
0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc,
0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 },
.public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca,
0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c,
0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb,
0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 },
.result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 },
.valid = true
},
/* wycheproof - checking for overflow */
{
.private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
.public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58,
0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7,
0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01,
0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d },
.result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d,
0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27,
0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b,
0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 },
.valid = true
},
/* wycheproof - checking for overflow */
{
.private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
.public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26,
0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2,
0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44,
0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e },
.result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6,
0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d,
0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e,
0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 },
.valid = true
},
/* wycheproof - checking for overflow */
{
.private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
.public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61,
0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67,
0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e,
0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c },
.result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65,
0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce,
0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0,
0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 },
.valid = true
},
/* wycheproof - checking for overflow */
{
.private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
.public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee,
0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d,
0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14,
0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 },
.result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e,
0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc,
0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5,
0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b },
.valid = true
},
/* wycheproof - checking for overflow */
{
.private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d,
0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d,
0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c,
0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 },
.public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4,
0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5,
0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c,
0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 },
.result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b,
0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93,
0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f,
0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 },
.valid = true
},
/* wycheproof - private key == -1 (mod order) */
{
.private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8,
0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 },
.public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
.result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e,
0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57,
0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f,
0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 },
.valid = true
},
/* wycheproof - private key == 1 (mod order) on twist */
{
.private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef,
0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f },
.public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
.result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f,
0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6,
0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64,
0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 },
.valid = true
}
};
bool __init curve25519_selftest(void)
{
bool success = true, ret, ret2;
size_t i = 0, j;
u8 in[CURVE25519_KEY_SIZE];
u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE],
out3[CURVE25519_KEY_SIZE];
for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) {
memset(out, 0, CURVE25519_KEY_SIZE);
ret = curve25519(out, curve25519_test_vectors[i].private,
curve25519_test_vectors[i].public);
if (ret != curve25519_test_vectors[i].valid ||
memcmp(out, curve25519_test_vectors[i].result,
CURVE25519_KEY_SIZE)) {
pr_err("curve25519 self-test %zu: FAIL\n", i + 1);
success = false;
}
}
for (i = 0; i < 5; ++i) {
get_random_bytes(in, sizeof(in));
ret = curve25519_generate_public(out, in);
ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
curve25519_generic(out3, in, (u8[CURVE25519_KEY_SIZE]){ 9 });
if (ret != ret2 ||
memcmp(out, out2, CURVE25519_KEY_SIZE) ||
memcmp(out, out3, CURVE25519_KEY_SIZE)) {
pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x",
i + 1);
for (j = CURVE25519_KEY_SIZE; j-- > 0;)
printk(KERN_CONT "%02x", in[j]);
printk(KERN_CONT "\n");
success = false;
}
}
return success;
}
| linux-master | lib/crypto/curve25519-selftest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <asm/unaligned.h>
#include <crypto/chacha.h>
static void chacha_permute(u32 *x, int nrounds)
{
int i;
/* whitelist the allowed round counts */
WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
}
/**
* chacha_block_generic - generate one keystream block and increment block counter
* @state: input state matrix (16 32-bit words)
* @stream: output keystream block (64 bytes)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
* The caller has already converted the endianness of the input. This function
* also handles incrementing the block counter in the input matrix.
*/
void chacha_block_generic(u32 *state, u8 *stream, int nrounds)
{
u32 x[16];
int i;
memcpy(x, state, 64);
chacha_permute(x, nrounds);
for (i = 0; i < ARRAY_SIZE(x); i++)
put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
EXPORT_SYMBOL(chacha_block_generic);
/**
* hchacha_block_generic - abbreviated ChaCha core, for XChaCha
* @state: input state matrix (16 32-bit words)
* @stream: output (8 32-bit words)
* @nrounds: number of rounds (20 or 12; 20 is recommended)
*
* HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
* towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
* skips the final addition of the initial state, and outputs only certain words
* of the state. It should not be used for streaming directly.
*/
void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds)
{
u32 x[16];
memcpy(x, state, 64);
chacha_permute(x, nrounds);
memcpy(&stream[0], &x[0], 16);
memcpy(&stream[4], &x[12], 16);
}
EXPORT_SYMBOL(hchacha_block_generic);
| linux-master | lib/crypto/chacha.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SHA-256, as specified in
* http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
*
* SHA-256 code by Jean-Luc Cooke <[email protected]>.
*
* Copyright (c) Jean-Luc Cooke <[email protected]>
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2014 Red Hat Inc.
*/
#include <asm/unaligned.h>
#include <crypto/sha256_base.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
static const u32 SHA256_K[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
};
static inline u32 Ch(u32 x, u32 y, u32 z)
{
return z ^ (x & (y ^ z));
}
static inline u32 Maj(u32 x, u32 y, u32 z)
{
return (x & y) | (z & (x | y));
}
#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
static inline void LOAD_OP(int I, u32 *W, const u8 *input)
{
W[I] = get_unaligned_be32((__u32 *)input + I);
}
static inline void BLEND_OP(int I, u32 *W)
{
W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
}
#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
u32 t1, t2; \
t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
t2 = e0(a) + Maj(a, b, c); \
d += t1; \
h = t1 + t2; \
} while (0)
static void sha256_transform(u32 *state, const u8 *input, u32 *W)
{
u32 a, b, c, d, e, f, g, h;
int i;
/* load the input */
for (i = 0; i < 16; i += 8) {
LOAD_OP(i + 0, W, input);
LOAD_OP(i + 1, W, input);
LOAD_OP(i + 2, W, input);
LOAD_OP(i + 3, W, input);
LOAD_OP(i + 4, W, input);
LOAD_OP(i + 5, W, input);
LOAD_OP(i + 6, W, input);
LOAD_OP(i + 7, W, input);
}
/* now blend */
for (i = 16; i < 64; i += 8) {
BLEND_OP(i + 0, W);
BLEND_OP(i + 1, W);
BLEND_OP(i + 2, W);
BLEND_OP(i + 3, W);
BLEND_OP(i + 4, W);
BLEND_OP(i + 5, W);
BLEND_OP(i + 6, W);
BLEND_OP(i + 7, W);
}
/* load the state into our registers */
a = state[0]; b = state[1]; c = state[2]; d = state[3];
e = state[4]; f = state[5]; g = state[6]; h = state[7];
/* now iterate */
for (i = 0; i < 64; i += 8) {
SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
}
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
static void sha256_transform_blocks(struct sha256_state *sctx,
const u8 *input, int blocks)
{
u32 W[64];
do {
sha256_transform(sctx->state, input, W);
input += SHA256_BLOCK_SIZE;
} while (--blocks);
memzero_explicit(W, sizeof(W));
}
void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
{
lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks);
}
EXPORT_SYMBOL(sha256_update);
static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size)
{
lib_sha256_base_do_finalize(sctx, sha256_transform_blocks);
lib_sha256_base_finish(sctx, out, digest_size);
}
void sha256_final(struct sha256_state *sctx, u8 *out)
{
__sha256_final(sctx, out, 32);
}
EXPORT_SYMBOL(sha256_final);
void sha224_final(struct sha256_state *sctx, u8 *out)
{
__sha256_final(sctx, out, 28);
}
EXPORT_SYMBOL(sha224_final);
void sha256(const u8 *data, unsigned int len, u8 *out)
{
struct sha256_state sctx;
sha256_init(&sctx);
sha256_update(&sctx, data, len);
sha256_final(&sctx, out);
}
EXPORT_SYMBOL(sha256);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/sha256.c |
/*
* Constant-time equality testing of memory regions.
*
* Authors:
*
* James Yonan <[email protected]>
* Daniel Borkmann <[email protected]>
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
* The full GNU General Public License is included in this distribution
* in the file called LICENSE.GPL.
*
* BSD LICENSE
*
* Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of OpenVPN Technologies nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <linux/module.h>
/* Generic path for arbitrary size */
static inline unsigned long
__crypto_memneq_generic(const void *a, const void *b, size_t size)
{
unsigned long neq = 0;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
while (size >= sizeof(unsigned long)) {
neq |= get_unaligned((unsigned long *)a) ^
get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
a += sizeof(unsigned long);
b += sizeof(unsigned long);
size -= sizeof(unsigned long);
}
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
while (size > 0) {
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
OPTIMIZER_HIDE_VAR(neq);
a += 1;
b += 1;
size -= 1;
}
return neq;
}
/* Loop-free fast-path for frequently used 16-byte size */
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
{
unsigned long neq = 0;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (sizeof(unsigned long) == 8) {
neq |= get_unaligned((unsigned long *)a) ^
get_unaligned((unsigned long *)b);
OPTIMIZER_HIDE_VAR(neq);
neq |= get_unaligned((unsigned long *)(a + 8)) ^
get_unaligned((unsigned long *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
} else if (sizeof(unsigned int) == 4) {
neq |= get_unaligned((unsigned int *)a) ^
get_unaligned((unsigned int *)b);
OPTIMIZER_HIDE_VAR(neq);
neq |= get_unaligned((unsigned int *)(a + 4)) ^
get_unaligned((unsigned int *)(b + 4));
OPTIMIZER_HIDE_VAR(neq);
neq |= get_unaligned((unsigned int *)(a + 8)) ^
get_unaligned((unsigned int *)(b + 8));
OPTIMIZER_HIDE_VAR(neq);
neq |= get_unaligned((unsigned int *)(a + 12)) ^
get_unaligned((unsigned int *)(b + 12));
OPTIMIZER_HIDE_VAR(neq);
} else
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
{
neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
OPTIMIZER_HIDE_VAR(neq);
neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
OPTIMIZER_HIDE_VAR(neq);
}
return neq;
}
/* Compare two areas of memory without leaking timing information,
* and with special optimizations for common sizes. Users should
* not call this function directly, but should instead use
* crypto_memneq defined in crypto/algapi.h.
*/
noinline unsigned long __crypto_memneq(const void *a, const void *b,
size_t size)
{
switch (size) {
case 16:
return __crypto_memneq_16(a, b);
default:
return __crypto_memneq_generic(a, b, size);
}
}
EXPORT_SYMBOL(__crypto_memneq);
| linux-master | lib/crypto/memneq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API
*
* ARC4 Cipher Algorithm
*
* Jon Oberheide <[email protected]>
*/
#include <crypto/arc4.h>
#include <linux/module.h>
int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
{
int i, j = 0, k = 0;
ctx->x = 1;
ctx->y = 0;
for (i = 0; i < 256; i++)
ctx->S[i] = i;
for (i = 0; i < 256; i++) {
u32 a = ctx->S[i];
j = (j + in_key[k] + a) & 0xff;
ctx->S[i] = ctx->S[j];
ctx->S[j] = a;
if (++k >= key_len)
k = 0;
}
return 0;
}
EXPORT_SYMBOL(arc4_setkey);
void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
{
u32 *const S = ctx->S;
u32 x, y, a, b;
u32 ty, ta, tb;
if (len == 0)
return;
x = ctx->x;
y = ctx->y;
a = S[x];
y = (y + a) & 0xff;
b = S[y];
do {
S[y] = a;
a = (a + b) & 0xff;
S[x] = b;
x = (x + 1) & 0xff;
ta = S[x];
ty = (y + ta) & 0xff;
tb = S[ty];
*out++ = *in++ ^ S[a];
if (--len == 0)
break;
y = ty;
a = ta;
b = tb;
} while (true);
ctx->x = x;
ctx->y = y;
}
EXPORT_SYMBOL(arc4_crypt);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/arc4.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is an implementation of the BLAKE2s hash and PRF functions.
*
* Information: https://blake2.net/
*
*/
#include <crypto/internal/blake2s.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bug.h>
static inline void blake2s_set_lastblock(struct blake2s_state *state)
{
state->f[0] = -1;
}
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
if (unlikely(!inlen))
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
}
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
memcpy(state->buf + state->buflen, in, inlen);
state->buflen += inlen;
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
blake2s_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
blake2s_compress(state, state->buf, 1, state->buflen);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
memcpy(out, state->h, state->outlen);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
static int __init blake2s_mod_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!blake2s_selftest()))
return -ENODEV;
return 0;
}
module_init(blake2s_mod_init);
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | lib/crypto/blake2s.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Poly1305 authenticator algorithm, RFC7539
*
* Copyright (C) 2015 Martin Willi
*
* Based on public domain code by Andrew Moon and Daniel J. Bernstein.
*/
#include <crypto/internal/poly1305.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
void poly1305_init_generic(struct poly1305_desc_ctx *desc,
const u8 key[POLY1305_KEY_SIZE])
{
poly1305_core_setkey(&desc->core_r, key);
desc->s[0] = get_unaligned_le32(key + 16);
desc->s[1] = get_unaligned_le32(key + 20);
desc->s[2] = get_unaligned_le32(key + 24);
desc->s[3] = get_unaligned_le32(key + 28);
poly1305_core_init(&desc->h);
desc->buflen = 0;
desc->sset = true;
desc->rset = 2;
}
EXPORT_SYMBOL_GPL(poly1305_init_generic);
void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
unsigned int nbytes)
{
unsigned int bytes;
if (unlikely(desc->buflen)) {
bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen);
memcpy(desc->buf + desc->buflen, src, bytes);
src += bytes;
nbytes -= bytes;
desc->buflen += bytes;
if (desc->buflen == POLY1305_BLOCK_SIZE) {
poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf,
1, 1);
desc->buflen = 0;
}
}
if (likely(nbytes >= POLY1305_BLOCK_SIZE)) {
poly1305_core_blocks(&desc->h, &desc->core_r, src,
nbytes / POLY1305_BLOCK_SIZE, 1);
src += nbytes - (nbytes % POLY1305_BLOCK_SIZE);
nbytes %= POLY1305_BLOCK_SIZE;
}
if (unlikely(nbytes)) {
desc->buflen = nbytes;
memcpy(desc->buf, src, nbytes);
}
}
EXPORT_SYMBOL_GPL(poly1305_update_generic);
void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst)
{
if (unlikely(desc->buflen)) {
desc->buf[desc->buflen++] = 1;
memset(desc->buf + desc->buflen, 0,
POLY1305_BLOCK_SIZE - desc->buflen);
poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0);
}
poly1305_core_emit(&desc->h, desc->s, dst);
*desc = (struct poly1305_desc_ctx){};
}
EXPORT_SYMBOL_GPL(poly1305_final_generic);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <[email protected]>");
| linux-master | lib/crypto/poly1305.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/module.h>
#include <crypto/algapi.h> // for crypto_xor_cpy
#include <crypto/chacha.h>
void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
src += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block_generic(state, stream, nrounds);
crypto_xor_cpy(dst, src, stream, bytes);
}
}
EXPORT_SYMBOL(chacha_crypt_generic);
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/libchacha.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is an implementation of the ChaCha20Poly1305 AEAD construction.
*
* Information: https://tools.ietf.org/html/rfc8439
*/
#include <crypto/algapi.h>
#include <crypto/chacha20poly1305.h>
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <crypto/scatterwalk.h>
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
static void chacha_load_key(u32 *k, const u8 *in)
{
k[0] = get_unaligned_le32(in);
k[1] = get_unaligned_le32(in + 4);
k[2] = get_unaligned_le32(in + 8);
k[3] = get_unaligned_le32(in + 12);
k[4] = get_unaligned_le32(in + 16);
k[5] = get_unaligned_le32(in + 20);
k[6] = get_unaligned_le32(in + 24);
k[7] = get_unaligned_le32(in + 28);
}
static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce)
{
u32 k[CHACHA_KEY_WORDS];
u8 iv[CHACHA_IV_SIZE];
memset(iv, 0, 8);
memcpy(iv + 8, nonce + 16, 8);
chacha_load_key(k, key);
/* Compute the subkey given the original key and first 128 nonce bits */
chacha_init(chacha_state, k, nonce);
hchacha_block(chacha_state, k, 20);
chacha_init(chacha_state, k, iv);
memzero_explicit(k, sizeof(k));
memzero_explicit(iv, sizeof(iv));
}
static void
__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len, u32 *chacha_state)
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
union {
u8 block0[POLY1305_KEY_SIZE];
__le64 lens[2];
} b;
chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
poly1305_update(&poly1305_state, ad, ad_len);
if (ad_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
chacha20_crypt(chacha_state, dst, src, src_len);
poly1305_update(&poly1305_state, dst, src_len);
if (src_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
b.lens[0] = cpu_to_le64(ad_len);
b.lens[1] = cpu_to_le64(src_len);
poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
poly1305_final(&poly1305_state, dst + src_len);
memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32));
memzero_explicit(&b, sizeof(b));
}
void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
u32 chacha_state[CHACHA_STATE_WORDS];
u32 k[CHACHA_KEY_WORDS];
__le64 iv[2];
chacha_load_key(k, key);
iv[0] = 0;
iv[1] = cpu_to_le64(nonce);
chacha_init(chacha_state, k, (u8 *)iv);
__chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
memzero_explicit(iv, sizeof(iv));
memzero_explicit(k, sizeof(k));
}
EXPORT_SYMBOL(chacha20poly1305_encrypt);
void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
u32 chacha_state[CHACHA_STATE_WORDS];
xchacha_init(chacha_state, key, nonce);
__chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state);
}
EXPORT_SYMBOL(xchacha20poly1305_encrypt);
static bool
__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len, u32 *chacha_state)
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
size_t dst_len;
int ret;
union {
u8 block0[POLY1305_KEY_SIZE];
u8 mac[POLY1305_DIGEST_SIZE];
__le64 lens[2];
} b;
if (unlikely(src_len < POLY1305_DIGEST_SIZE))
return false;
chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
poly1305_update(&poly1305_state, ad, ad_len);
if (ad_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
dst_len = src_len - POLY1305_DIGEST_SIZE;
poly1305_update(&poly1305_state, src, dst_len);
if (dst_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (dst_len & 0xf));
b.lens[0] = cpu_to_le64(ad_len);
b.lens[1] = cpu_to_le64(dst_len);
poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
poly1305_final(&poly1305_state, b.mac);
ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE);
if (likely(!ret))
chacha20_crypt(chacha_state, dst, src, dst_len);
memzero_explicit(&b, sizeof(b));
return !ret;
}
bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
u32 chacha_state[CHACHA_STATE_WORDS];
u32 k[CHACHA_KEY_WORDS];
__le64 iv[2];
bool ret;
chacha_load_key(k, key);
iv[0] = 0;
iv[1] = cpu_to_le64(nonce);
chacha_init(chacha_state, k, (u8 *)iv);
ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
chacha_state);
memzero_explicit(chacha_state, sizeof(chacha_state));
memzero_explicit(iv, sizeof(iv));
memzero_explicit(k, sizeof(k));
return ret;
}
EXPORT_SYMBOL(chacha20poly1305_decrypt);
bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
u32 chacha_state[CHACHA_STATE_WORDS];
xchacha_init(chacha_state, key, nonce);
return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len,
chacha_state);
}
EXPORT_SYMBOL(xchacha20poly1305_decrypt);
static
bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE],
int encrypt)
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
u32 chacha_state[CHACHA_STATE_WORDS];
struct sg_mapping_iter miter;
size_t partial = 0;
unsigned int flags;
bool ret = true;
int sl;
union {
struct {
u32 k[CHACHA_KEY_WORDS];
__le64 iv[2];
};
u8 block0[POLY1305_KEY_SIZE];
u8 chacha_stream[CHACHA_BLOCK_SIZE];
struct {
u8 mac[2][POLY1305_DIGEST_SIZE];
};
__le64 lens[2];
} b __aligned(16);
if (WARN_ON(src_len > INT_MAX))
return false;
chacha_load_key(b.k, key);
b.iv[0] = 0;
b.iv[1] = cpu_to_le64(nonce);
chacha_init(chacha_state, b.k, (u8 *)b.iv);
chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
if (unlikely(ad_len)) {
poly1305_update(&poly1305_state, ad, ad_len);
if (ad_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf));
}
flags = SG_MITER_TO_SG | SG_MITER_ATOMIC;
sg_miter_start(&miter, src, sg_nents(src), flags);
for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) {
u8 *addr = miter.addr;
size_t length = min_t(size_t, sl, miter.length);
if (!encrypt)
poly1305_update(&poly1305_state, addr, length);
if (unlikely(partial)) {
size_t l = min(length, CHACHA_BLOCK_SIZE - partial);
crypto_xor(addr, b.chacha_stream + partial, l);
partial = (partial + l) & (CHACHA_BLOCK_SIZE - 1);
addr += l;
length -= l;
}
if (likely(length >= CHACHA_BLOCK_SIZE || length == sl)) {
size_t l = length;
if (unlikely(length < sl))
l &= ~(CHACHA_BLOCK_SIZE - 1);
chacha20_crypt(chacha_state, addr, addr, l);
addr += l;
length -= l;
}
if (unlikely(length > 0)) {
chacha20_crypt(chacha_state, b.chacha_stream, pad0,
CHACHA_BLOCK_SIZE);
crypto_xor(addr, b.chacha_stream, length);
partial = length;
}
if (encrypt)
poly1305_update(&poly1305_state, miter.addr,
min_t(size_t, sl, miter.length));
}
if (src_len & 0xf)
poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf));
b.lens[0] = cpu_to_le64(ad_len);
b.lens[1] = cpu_to_le64(src_len);
poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
if (likely(sl <= -POLY1305_DIGEST_SIZE)) {
if (encrypt) {
poly1305_final(&poly1305_state,
miter.addr + miter.length + sl);
ret = true;
} else {
poly1305_final(&poly1305_state, b.mac[0]);
ret = !crypto_memneq(b.mac[0],
miter.addr + miter.length + sl,
POLY1305_DIGEST_SIZE);
}
}
sg_miter_stop(&miter);
if (unlikely(sl > -POLY1305_DIGEST_SIZE)) {
poly1305_final(&poly1305_state, b.mac[1]);
scatterwalk_map_and_copy(b.mac[encrypt], src, src_len,
sizeof(b.mac[1]), encrypt);
ret = encrypt ||
!crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE);
}
memzero_explicit(chacha_state, sizeof(chacha_state));
memzero_explicit(&b, sizeof(b));
return ret;
}
bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
return chacha20poly1305_crypt_sg_inplace(src, src_len, ad, ad_len,
nonce, key, 1);
}
EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace);
bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
if (unlikely(src_len < POLY1305_DIGEST_SIZE))
return false;
return chacha20poly1305_crypt_sg_inplace(src,
src_len - POLY1305_DIGEST_SIZE,
ad, ad_len, nonce, key, 0);
}
EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
static int __init chacha20poly1305_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!chacha20poly1305_selftest()))
return -ENODEV;
return 0;
}
static void __exit chacha20poly1305_exit(void)
{
}
module_init(chacha20poly1305_init);
module_exit(chacha20poly1305_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
| linux-master | lib/crypto/chacha20poly1305.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include <crypto/chacha20poly1305.h>
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <asm/unaligned.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
struct chacha20poly1305_testvec {
const u8 *input, *output, *assoc, *nonce, *key;
size_t ilen, alen, nlen;
bool failure;
};
/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539
* 2.8.2. After they are generated by reference implementations. And the final
* marked ones are taken from wycheproof, but we only do these for the encrypt
* side, because mostly we're stressing the primitives rather than the actual
* chapoly construction.
*/
static const u8 enc_input001[] __initconst = {
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
0x9d
};
static const u8 enc_output001[] __initconst = {
0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
0x38
};
static const u8 enc_assoc001[] __initconst = {
0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x4e, 0x91
};
static const u8 enc_nonce001[] __initconst = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
};
static const u8 enc_key001[] __initconst = {
0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
};
static const u8 enc_input002[] __initconst = { };
static const u8 enc_output002[] __initconst = {
0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
};
static const u8 enc_assoc002[] __initconst = { };
static const u8 enc_nonce002[] __initconst = {
0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
};
static const u8 enc_key002[] __initconst = {
0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
};
static const u8 enc_input003[] __initconst = { };
static const u8 enc_output003[] __initconst = {
0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
};
static const u8 enc_assoc003[] __initconst = {
0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
};
static const u8 enc_nonce003[] __initconst = {
0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
};
static const u8 enc_key003[] __initconst = {
0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
};
static const u8 enc_input004[] __initconst = {
0xa4
};
static const u8 enc_output004[] __initconst = {
0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
0x89
};
static const u8 enc_assoc004[] __initconst = {
0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
};
static const u8 enc_nonce004[] __initconst = {
0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
};
static const u8 enc_key004[] __initconst = {
0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
};
static const u8 enc_input005[] __initconst = {
0x2d
};
static const u8 enc_output005[] __initconst = {
0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
0xac
};
static const u8 enc_assoc005[] __initconst = { };
static const u8 enc_nonce005[] __initconst = {
0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
};
static const u8 enc_key005[] __initconst = {
0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
};
static const u8 enc_input006[] __initconst = {
0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
0x8f
};
static const u8 enc_output006[] __initconst = {
0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
0xeb
};
static const u8 enc_assoc006[] __initconst = {
0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
};
static const u8 enc_nonce006[] __initconst = {
0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
};
static const u8 enc_key006[] __initconst = {
0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
};
static const u8 enc_input007[] __initconst = {
0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
};
static const u8 enc_output007[] __initconst = {
0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
};
static const u8 enc_assoc007[] __initconst = { };
static const u8 enc_nonce007[] __initconst = {
0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
};
static const u8 enc_key007[] __initconst = {
0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
};
static const u8 enc_input008[] __initconst = {
0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
};
static const u8 enc_output008[] __initconst = {
0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
};
static const u8 enc_assoc008[] __initconst = { };
static const u8 enc_nonce008[] __initconst = {
0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
};
static const u8 enc_key008[] __initconst = {
0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
};
static const u8 enc_input009[] __initconst = {
0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
0x65
};
static const u8 enc_output009[] __initconst = {
0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
0xae
};
static const u8 enc_assoc009[] __initconst = {
0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
0xef
};
static const u8 enc_nonce009[] __initconst = {
0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
};
static const u8 enc_key009[] __initconst = {
0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
};
static const u8 enc_input010[] __initconst = {
0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
};
static const u8 enc_output010[] __initconst = {
0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
};
static const u8 enc_assoc010[] __initconst = {
0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
};
static const u8 enc_nonce010[] __initconst = {
0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
};
static const u8 enc_key010[] __initconst = {
0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
};
static const u8 enc_input011[] __initconst = {
0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
0x10, 0x1e, 0xbf, 0xec, 0xa8
};
static const u8 enc_output011[] __initconst = {
0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
0x2b, 0xdf, 0xcd, 0xf9, 0x3c
};
static const u8 enc_assoc011[] __initconst = {
0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
};
static const u8 enc_nonce011[] __initconst = {
0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
};
static const u8 enc_key011[] __initconst = {
0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
};
static const u8 enc_input012[] __initconst = {
0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
0x78, 0xec, 0x00
};
static const u8 enc_output012[] __initconst = {
0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
0x70, 0xcf, 0xd6
};
static const u8 enc_assoc012[] __initconst = {
0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
};
static const u8 enc_nonce012[] __initconst = {
0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
};
static const u8 enc_key012[] __initconst = {
0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
};
/* wycheproof - rfc7539 */
static const u8 enc_input013[] __initconst = {
0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61,
0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c,
0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20,
0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73,
0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39,
0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63,
0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66,
0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f,
0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20,
0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20,
0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75,
0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73,
0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f,
0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69,
0x74, 0x2e
};
static const u8 enc_output013[] __initconst = {
0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb,
0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2,
0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe,
0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6,
0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12,
0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b,
0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29,
0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36,
0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c,
0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58,
0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94,
0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc,
0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d,
0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b,
0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09,
0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60,
0x06, 0x91
};
static const u8 enc_assoc013[] __initconst = {
0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7
};
static const u8 enc_nonce013[] __initconst = {
0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43,
0x44, 0x45, 0x46, 0x47
};
static const u8 enc_key013[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input014[] __initconst = { };
static const u8 enc_output014[] __initconst = {
0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5,
0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d
};
static const u8 enc_assoc014[] __initconst = { };
static const u8 enc_nonce014[] __initconst = {
0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1,
0xea, 0x12, 0x37, 0x9d
};
static const u8 enc_key014[] __initconst = {
0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96,
0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0,
0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08,
0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0
};
/* wycheproof - misc */
static const u8 enc_input015[] __initconst = { };
static const u8 enc_output015[] __initconst = {
0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b,
0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09
};
static const u8 enc_assoc015[] __initconst = {
0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10
};
static const u8 enc_nonce015[] __initconst = {
0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16,
0xa3, 0xc6, 0xf6, 0x89
};
static const u8 enc_key015[] __initconst = {
0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb,
0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22,
0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63,
0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42
};
/* wycheproof - misc */
static const u8 enc_input016[] __initconst = {
0x2a
};
static const u8 enc_output016[] __initconst = {
0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80,
0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75,
0x22
};
static const u8 enc_assoc016[] __initconst = { };
static const u8 enc_nonce016[] __initconst = {
0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd,
0xee, 0xab, 0x60, 0xf1
};
static const u8 enc_key016[] __initconst = {
0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50,
0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa,
0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a,
0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73
};
/* wycheproof - misc */
static const u8 enc_input017[] __initconst = {
0x51
};
static const u8 enc_output017[] __initconst = {
0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7,
0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72,
0xb9
};
static const u8 enc_assoc017[] __initconst = {
0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53
};
static const u8 enc_nonce017[] __initconst = {
0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2,
0x78, 0x2f, 0x44, 0x03
};
static const u8 enc_key017[] __initconst = {
0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5,
0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f,
0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5,
0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee
};
/* wycheproof - misc */
static const u8 enc_input018[] __initconst = {
0x5c, 0x60
};
static const u8 enc_output018[] __initconst = {
0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39,
0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22,
0xe5, 0xe2
};
static const u8 enc_assoc018[] __initconst = { };
static const u8 enc_nonce018[] __initconst = {
0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34,
0x7e, 0x03, 0xf2, 0xdb
};
static const u8 enc_key018[] __initconst = {
0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89,
0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e,
0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f,
0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00
};
/* wycheproof - misc */
static const u8 enc_input019[] __initconst = {
0xdd, 0xf2
};
static const u8 enc_output019[] __initconst = {
0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec,
0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24,
0x37, 0xa2
};
static const u8 enc_assoc019[] __initconst = {
0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf
};
static const u8 enc_nonce019[] __initconst = {
0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90,
0xb6, 0x04, 0x0a, 0xc6
};
static const u8 enc_key019[] __initconst = {
0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48,
0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff,
0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44,
0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58
};
/* wycheproof - misc */
static const u8 enc_input020[] __initconst = {
0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31
};
static const u8 enc_output020[] __initconst = {
0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed,
0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28,
0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42
};
static const u8 enc_assoc020[] __initconst = { };
static const u8 enc_nonce020[] __initconst = {
0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59,
0x6d, 0xc5, 0x5b, 0xb7
};
static const u8 enc_key020[] __initconst = {
0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f,
0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f,
0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3,
0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7
};
/* wycheproof - misc */
static const u8 enc_input021[] __initconst = {
0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90
};
static const u8 enc_output021[] __initconst = {
0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18,
0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5,
0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba
};
static const u8 enc_assoc021[] __initconst = {
0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53
};
static const u8 enc_nonce021[] __initconst = {
0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98,
0xde, 0x94, 0x83, 0x96
};
static const u8 enc_key021[] __initconst = {
0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5,
0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4,
0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda,
0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f
};
/* wycheproof - misc */
static const u8 enc_input022[] __initconst = {
0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed
};
static const u8 enc_output022[] __initconst = {
0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17,
0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93,
0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21
};
static const u8 enc_assoc022[] __initconst = { };
static const u8 enc_nonce022[] __initconst = {
0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2,
0x36, 0x18, 0x23, 0xd3
};
static const u8 enc_key022[] __initconst = {
0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf,
0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d,
0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7,
0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52
};
/* wycheproof - misc */
static const u8 enc_input023[] __initconst = {
0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf
};
static const u8 enc_output023[] __initconst = {
0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0,
0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0,
0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18
};
static const u8 enc_assoc023[] __initconst = {
0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d
};
static const u8 enc_nonce023[] __initconst = {
0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33,
0x66, 0x48, 0x4a, 0x78
};
static const u8 enc_key023[] __initconst = {
0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54,
0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a,
0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe,
0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd
};
/* wycheproof - misc */
static const u8 enc_input024[] __initconst = {
0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c,
0x36, 0x8d, 0x14, 0xe0
};
static const u8 enc_output024[] __initconst = {
0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a,
0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27,
0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10,
0x6d, 0xcb, 0x29, 0xb4
};
static const u8 enc_assoc024[] __initconst = { };
static const u8 enc_nonce024[] __initconst = {
0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e,
0x07, 0x53, 0x86, 0x56
};
static const u8 enc_key024[] __initconst = {
0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0,
0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39,
0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5,
0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe
};
/* wycheproof - misc */
static const u8 enc_input025[] __initconst = {
0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7,
0x59, 0xb1, 0xa0, 0xda
};
static const u8 enc_output025[] __initconst = {
0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38,
0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c,
0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7,
0xcf, 0x05, 0x07, 0x2f
};
static const u8 enc_assoc025[] __initconst = {
0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9
};
static const u8 enc_nonce025[] __initconst = {
0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d,
0xd9, 0x06, 0xe9, 0xce
};
static const u8 enc_key025[] __initconst = {
0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40,
0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70,
0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e,
0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57
};
/* wycheproof - misc */
static const u8 enc_input026[] __initconst = {
0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac,
0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07
};
static const u8 enc_output026[] __initconst = {
0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf,
0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a,
0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79,
0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2
};
static const u8 enc_assoc026[] __initconst = { };
static const u8 enc_nonce026[] __initconst = {
0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda,
0xd4, 0x61, 0xd2, 0x3c
};
static const u8 enc_key026[] __initconst = {
0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda,
0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77,
0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0,
0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb
};
/* wycheproof - misc */
static const u8 enc_input027[] __initconst = {
0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f,
0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73
};
static const u8 enc_output027[] __initconst = {
0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81,
0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe,
0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9,
0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17
};
static const u8 enc_assoc027[] __initconst = {
0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12
};
static const u8 enc_nonce027[] __initconst = {
0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14,
0xc5, 0x03, 0x5b, 0x6a
};
static const u8 enc_key027[] __initconst = {
0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f,
0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38,
0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b,
0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d
};
/* wycheproof - misc */
static const u8 enc_input028[] __initconst = {
0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0,
0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88
};
static const u8 enc_output028[] __initconst = {
0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4,
0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13,
0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a,
0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c
};
static const u8 enc_assoc028[] __initconst = { };
static const u8 enc_nonce028[] __initconst = {
0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8,
0x47, 0x40, 0xad, 0x9b
};
static const u8 enc_key028[] __initconst = {
0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7,
0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7,
0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63,
0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a
};
/* wycheproof - misc */
static const u8 enc_input029[] __initconst = {
0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09,
0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6
};
static const u8 enc_output029[] __initconst = {
0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3,
0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c,
0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc,
0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d
};
static const u8 enc_assoc029[] __initconst = {
0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff
};
static const u8 enc_nonce029[] __initconst = {
0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80,
0x07, 0x1f, 0x52, 0x66
};
static const u8 enc_key029[] __initconst = {
0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8,
0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14,
0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d,
0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e
};
/* wycheproof - misc */
static const u8 enc_input030[] __initconst = {
0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15,
0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e,
0x1f
};
static const u8 enc_output030[] __initconst = {
0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd,
0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74,
0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80,
0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08,
0x13
};
static const u8 enc_assoc030[] __initconst = { };
static const u8 enc_nonce030[] __initconst = {
0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42,
0xdc, 0x03, 0x44, 0x5d
};
static const u8 enc_key030[] __initconst = {
0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21,
0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e,
0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b,
0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11
};
/* wycheproof - misc */
static const u8 enc_input031[] __initconst = {
0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88,
0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c,
0x9c
};
static const u8 enc_output031[] __initconst = {
0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b,
0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e,
0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38,
0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c,
0xeb
};
static const u8 enc_assoc031[] __initconst = {
0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79
};
static const u8 enc_nonce031[] __initconst = {
0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10,
0x63, 0x3d, 0x99, 0x3d
};
static const u8 enc_key031[] __initconst = {
0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7,
0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f,
0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82,
0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70
};
/* wycheproof - misc */
static const u8 enc_input032[] __initconst = {
0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66,
0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7,
0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11
};
static const u8 enc_output032[] __initconst = {
0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d,
0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0,
0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64,
0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d,
0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a
};
static const u8 enc_assoc032[] __initconst = { };
static const u8 enc_nonce032[] __initconst = {
0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76,
0x2c, 0x65, 0xf3, 0x1b
};
static const u8 enc_key032[] __initconst = {
0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87,
0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f,
0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2,
0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7
};
/* wycheproof - misc */
static const u8 enc_input033[] __initconst = {
0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d,
0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c,
0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93
};
static const u8 enc_output033[] __initconst = {
0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69,
0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4,
0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00,
0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63,
0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65
};
static const u8 enc_assoc033[] __initconst = {
0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08
};
static const u8 enc_nonce033[] __initconst = {
0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd,
0x78, 0x34, 0xed, 0x55
};
static const u8 enc_key033[] __initconst = {
0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed,
0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda,
0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d,
0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3
};
/* wycheproof - misc */
static const u8 enc_input034[] __initconst = {
0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f,
0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c,
0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26,
0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29
};
static const u8 enc_output034[] __initconst = {
0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab,
0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb,
0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1,
0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56,
0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f,
0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93
};
static const u8 enc_assoc034[] __initconst = { };
static const u8 enc_nonce034[] __initconst = {
0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31,
0x37, 0x1a, 0x6f, 0xd2
};
static const u8 enc_key034[] __initconst = {
0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e,
0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2,
0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15,
0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71
};
/* wycheproof - misc */
static const u8 enc_input035[] __initconst = {
0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2,
0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f,
0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56,
0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb
};
static const u8 enc_output035[] __initconst = {
0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20,
0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5,
0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e,
0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53,
0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d,
0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f
};
static const u8 enc_assoc035[] __initconst = {
0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48
};
static const u8 enc_nonce035[] __initconst = {
0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8,
0xb8, 0x1a, 0x1f, 0x8b
};
static const u8 enc_key035[] __initconst = {
0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f,
0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40,
0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4,
0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4
};
/* wycheproof - misc */
static const u8 enc_input036[] __initconst = {
0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a,
0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb,
0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce,
0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78
};
static const u8 enc_output036[] __initconst = {
0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76,
0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e,
0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50,
0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21,
0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33,
0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea
};
static const u8 enc_assoc036[] __initconst = { };
static const u8 enc_nonce036[] __initconst = {
0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2,
0x2b, 0x7e, 0x6e, 0x6a
};
static const u8 enc_key036[] __initconst = {
0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c,
0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb,
0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9,
0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3
};
/* wycheproof - misc */
static const u8 enc_input037[] __initconst = {
0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14,
0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3,
0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79,
0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e
};
static const u8 enc_output037[] __initconst = {
0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b,
0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2,
0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29,
0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4,
0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d,
0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32
};
static const u8 enc_assoc037[] __initconst = {
0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59
};
static const u8 enc_nonce037[] __initconst = {
0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5,
0x34, 0x0d, 0xd1, 0xb8
};
static const u8 enc_key037[] __initconst = {
0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4,
0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f,
0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd,
0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45
};
/* wycheproof - misc */
static const u8 enc_input038[] __initconst = {
0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4,
0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e,
0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11,
0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04,
0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46,
0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a
};
static const u8 enc_output038[] __initconst = {
0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6,
0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00,
0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6,
0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27,
0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24,
0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f,
0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08,
0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9
};
static const u8 enc_assoc038[] __initconst = { };
static const u8 enc_nonce038[] __initconst = {
0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9,
0x0b, 0xef, 0x55, 0xd2
};
static const u8 enc_key038[] __initconst = {
0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51,
0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63,
0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3,
0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64
};
/* wycheproof - misc */
static const u8 enc_input039[] __initconst = {
0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74,
0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3,
0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d,
0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59,
0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c,
0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9
};
static const u8 enc_output039[] __initconst = {
0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec,
0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04,
0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e,
0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d,
0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3,
0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4,
0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42,
0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3
};
static const u8 enc_assoc039[] __initconst = {
0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84
};
static const u8 enc_nonce039[] __initconst = {
0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b,
0x31, 0xcd, 0x4d, 0x95
};
static const u8 enc_key039[] __initconst = {
0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c,
0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25,
0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4,
0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac
};
/* wycheproof - misc */
static const u8 enc_input040[] __initconst = {
0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e,
0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd,
0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f,
0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f,
0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88,
0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76,
0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d,
0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82
};
static const u8 enc_output040[] __initconst = {
0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5,
0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8,
0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c,
0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8,
0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc,
0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33,
0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd,
0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50,
0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56,
0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13
};
static const u8 enc_assoc040[] __initconst = { };
static const u8 enc_nonce040[] __initconst = {
0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28,
0x23, 0xcc, 0x06, 0x5b
};
static const u8 enc_key040[] __initconst = {
0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0,
0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8,
0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30,
0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01
};
/* wycheproof - misc */
static const u8 enc_input041[] __initconst = {
0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88,
0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d,
0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19,
0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44,
0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec,
0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d,
0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c,
0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17
};
static const u8 enc_output041[] __initconst = {
0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16,
0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1,
0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8,
0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97,
0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84,
0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3,
0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20,
0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e,
0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14,
0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec
};
static const u8 enc_assoc041[] __initconst = {
0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2
};
static const u8 enc_nonce041[] __initconst = {
0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d,
0xe4, 0xeb, 0xb1, 0x9c
};
static const u8 enc_key041[] __initconst = {
0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9,
0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2,
0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5,
0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e
};
/* wycheproof - misc */
static const u8 enc_input042[] __initconst = {
0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba,
0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58,
0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06,
0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64,
0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5,
0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74,
0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61,
0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e,
0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6,
0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd,
0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4,
0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5,
0x92
};
static const u8 enc_output042[] __initconst = {
0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97,
0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf,
0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98,
0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5,
0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45,
0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10,
0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28,
0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe,
0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c,
0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a,
0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2,
0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3,
0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b,
0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b,
0xb0
};
static const u8 enc_assoc042[] __initconst = { };
static const u8 enc_nonce042[] __initconst = {
0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03,
0xac, 0xde, 0x27, 0x99
};
static const u8 enc_key042[] __initconst = {
0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28,
0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d,
0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a,
0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01
};
/* wycheproof - misc */
static const u8 enc_input043[] __initconst = {
0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff,
0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc,
0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc,
0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5,
0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd,
0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55,
0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85,
0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b,
0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3,
0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad,
0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92,
0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31,
0x76
};
static const u8 enc_output043[] __initconst = {
0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5,
0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06,
0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e,
0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae,
0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37,
0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8,
0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3,
0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d,
0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70,
0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07,
0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6,
0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54,
0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5,
0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac,
0xf6
};
static const u8 enc_assoc043[] __initconst = {
0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30
};
static const u8 enc_nonce043[] __initconst = {
0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63,
0xe5, 0x22, 0x94, 0x60
};
static const u8 enc_key043[] __initconst = {
0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c,
0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4,
0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b,
0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e
};
/* wycheproof - misc */
static const u8 enc_input044[] __initconst = {
0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68,
0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2
};
static const u8 enc_output044[] __initconst = {
0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6,
0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b,
0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02,
0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4
};
static const u8 enc_assoc044[] __initconst = {
0x02
};
static const u8 enc_nonce044[] __initconst = {
0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21,
0x02, 0xd5, 0x06, 0x56
};
static const u8 enc_key044[] __initconst = {
0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32,
0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50,
0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0,
0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c
};
/* wycheproof - misc */
static const u8 enc_input045[] __initconst = {
0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44,
0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8
};
static const u8 enc_output045[] __initconst = {
0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1,
0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60,
0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1,
0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58
};
static const u8 enc_assoc045[] __initconst = {
0xb6, 0x48
};
static const u8 enc_nonce045[] __initconst = {
0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9,
0x5b, 0x3a, 0xa7, 0x13
};
static const u8 enc_key045[] __initconst = {
0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41,
0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0,
0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44,
0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc
};
/* wycheproof - misc */
static const u8 enc_input046[] __initconst = {
0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23,
0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47
};
static const u8 enc_output046[] __initconst = {
0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda,
0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1,
0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b,
0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8
};
static const u8 enc_assoc046[] __initconst = {
0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd,
0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0
};
static const u8 enc_nonce046[] __initconst = {
0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b,
0xa4, 0x65, 0x96, 0xdf
};
static const u8 enc_key046[] __initconst = {
0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08,
0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96,
0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89,
0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f
};
/* wycheproof - misc */
static const u8 enc_input047[] __initconst = {
0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf,
0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2
};
static const u8 enc_output047[] __initconst = {
0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb,
0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95,
0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34,
0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f
};
static const u8 enc_assoc047[] __initconst = {
0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07,
0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b
};
static const u8 enc_nonce047[] __initconst = {
0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6,
0x80, 0x92, 0x66, 0xd9
};
static const u8 enc_key047[] __initconst = {
0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91,
0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23,
0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6,
0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16
};
/* wycheproof - misc */
static const u8 enc_input048[] __initconst = {
0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32,
0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3
};
static const u8 enc_output048[] __initconst = {
0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b,
0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68,
0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84,
0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3
};
static const u8 enc_assoc048[] __initconst = {
0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab,
0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c,
0x0e
};
static const u8 enc_nonce048[] __initconst = {
0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40,
0xfc, 0x10, 0x68, 0xc3
};
static const u8 enc_key048[] __initconst = {
0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b,
0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97,
0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b,
0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57
};
/* wycheproof - misc */
static const u8 enc_input049[] __initconst = {
0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2,
0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6
};
static const u8 enc_output049[] __initconst = {
0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87,
0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11,
0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e,
0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6
};
static const u8 enc_assoc049[] __initconst = {
0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8,
0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94,
0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5,
0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda
};
static const u8 enc_nonce049[] __initconst = {
0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf,
0xad, 0x14, 0xd5, 0x3e
};
static const u8 enc_key049[] __initconst = {
0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d,
0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc,
0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47,
0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0
};
/* wycheproof - misc */
static const u8 enc_input050[] __initconst = {
0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18,
0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c
};
static const u8 enc_output050[] __initconst = {
0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6,
0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca,
0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b,
0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04
};
static const u8 enc_assoc050[] __initconst = {
0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22,
0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07,
0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90,
0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37
};
static const u8 enc_nonce050[] __initconst = {
0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28,
0x6a, 0x7b, 0x76, 0x51
};
static const u8 enc_key050[] __initconst = {
0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1,
0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c,
0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a,
0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30
};
/* wycheproof - misc */
static const u8 enc_input051[] __initconst = {
0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59,
0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6
};
static const u8 enc_output051[] __initconst = {
0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70,
0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd,
0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4,
0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43
};
static const u8 enc_assoc051[] __initconst = {
0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92,
0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57,
0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75,
0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88,
0x73
};
static const u8 enc_nonce051[] __initconst = {
0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0,
0xa8, 0xfa, 0x89, 0x49
};
static const u8 enc_key051[] __initconst = {
0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27,
0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74,
0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c,
0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99
};
/* wycheproof - misc */
static const u8 enc_input052[] __initconst = {
0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3,
0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed
};
static const u8 enc_output052[] __initconst = {
0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc,
0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b,
0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed,
0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32
};
static const u8 enc_assoc052[] __initconst = {
0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a,
0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14,
0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae,
0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c,
0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92,
0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61
};
static const u8 enc_nonce052[] __initconst = {
0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73,
0x0e, 0xc3, 0x5d, 0x12
};
static const u8 enc_key052[] __initconst = {
0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5,
0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf,
0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c,
0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4
};
/* wycheproof - misc */
static const u8 enc_input053[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe
};
static const u8 enc_output053[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7,
0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07
};
static const u8 enc_assoc053[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce053[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key053[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input054[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd
};
static const u8 enc_output054[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2,
0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce
};
static const u8 enc_assoc054[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce054[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key054[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input055[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83,
0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8,
0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b,
0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe,
0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe,
0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b,
0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5,
0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd,
0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa,
0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02,
0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80,
0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4,
0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7,
0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed,
0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38,
0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7
};
static const u8 enc_output055[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3,
0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e
};
static const u8 enc_assoc055[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce055[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key055[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input056[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41
};
static const u8 enc_output056[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27,
0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6
};
static const u8 enc_assoc056[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce056[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key056[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input057[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42
};
static const u8 enc_output057[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b,
0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68
};
static const u8 enc_assoc057[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce057[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key057[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input058[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c,
0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17,
0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84,
0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41,
0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01,
0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4,
0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a,
0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42,
0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05,
0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd,
0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f,
0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b,
0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38,
0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12,
0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7,
0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38
};
static const u8 enc_output058[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91,
0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62
};
static const u8 enc_assoc058[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce058[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key058[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input059[] __initconst = {
0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e
};
static const u8 enc_output059[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75,
0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4
};
static const u8 enc_assoc059[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
};
static const u8 enc_nonce059[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key059[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input060[] __initconst = {
0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d
};
static const u8 enc_output060[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba,
0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a
};
static const u8 enc_assoc060[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
};
static const u8 enc_nonce060[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key060[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input061[] __initconst = {
0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03,
0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68,
0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb,
0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e,
0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e,
0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab,
0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65,
0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d,
0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a,
0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82,
0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00,
0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44,
0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47,
0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d,
0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8,
0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47
};
static const u8 enc_output061[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f,
0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a
};
static const u8 enc_assoc061[] __initconst = {
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
};
static const u8 enc_nonce061[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key061[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input062[] __initconst = {
0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1
};
static const u8 enc_output062[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59,
0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19
};
static const u8 enc_assoc062[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
};
static const u8 enc_nonce062[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key062[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input063[] __initconst = {
0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2
};
static const u8 enc_output063[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2,
0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c
};
static const u8 enc_assoc063[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
};
static const u8 enc_nonce063[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key063[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input064[] __initconst = {
0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc,
0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97,
0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04,
0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1,
0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81,
0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54,
0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a,
0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2,
0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85,
0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d,
0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff,
0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb,
0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8,
0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92,
0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47,
0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8
};
static const u8 enc_output064[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5,
0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06
};
static const u8 enc_assoc064[] __initconst = {
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
};
static const u8 enc_nonce064[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key064[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input065[] __initconst = {
0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41
};
static const u8 enc_output065[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf,
0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67
};
static const u8 enc_assoc065[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
};
static const u8 enc_nonce065[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key065[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input066[] __initconst = {
0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42
};
static const u8 enc_output066[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89,
0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90
};
static const u8 enc_assoc066[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
};
static const u8 enc_nonce066[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key066[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input067[] __initconst = {
0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c,
0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17,
0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84,
0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41,
0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01,
0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4,
0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a,
0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42,
0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05,
0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd,
0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f,
0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b,
0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38,
0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12,
0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7,
0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38
};
static const u8 enc_output067[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94,
0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22
};
static const u8 enc_assoc067[] __initconst = {
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff
};
static const u8 enc_nonce067[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key067[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input068[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41
};
static const u8 enc_output068[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9,
0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d
};
static const u8 enc_assoc068[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce068[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key068[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input069[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42
};
static const u8 enc_output069[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde,
0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82
};
static const u8 enc_assoc069[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce069[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key069[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input070[] __initconst = {
0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c,
0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17,
0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84,
0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41,
0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01,
0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4,
0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a,
0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42,
0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05,
0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd,
0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f,
0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b,
0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38,
0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12,
0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7,
0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38
};
static const u8 enc_output070[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3,
0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71
};
static const u8 enc_assoc070[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce070[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key070[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input071[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe
};
static const u8 enc_output071[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5,
0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20
};
static const u8 enc_assoc071[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce071[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key071[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input072[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd
};
static const u8 enc_output072[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e,
0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4
};
static const u8 enc_assoc072[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce072[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key072[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - misc */
static const u8 enc_input073[] __initconst = {
0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83,
0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8,
0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b,
0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe,
0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe,
0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b,
0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5,
0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd,
0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa,
0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02,
0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80,
0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4,
0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7,
0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed,
0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38,
0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7
};
static const u8 enc_output073[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51,
0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f
};
static const u8 enc_assoc073[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_nonce073[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00
};
static const u8 enc_key073[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - checking for int overflows */
static const u8 enc_input074[] __initconst = {
0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51,
0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69,
0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f,
0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90,
0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0,
0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac,
0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2,
0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5,
0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b,
0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49,
0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16,
0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58,
0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73,
0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3,
0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c,
0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a
};
static const u8 enc_output074[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85,
0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca
};
static const u8 enc_assoc074[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce074[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x00, 0x02, 0x50, 0x6e
};
static const u8 enc_key074[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input075[] __initconst = {
0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75,
0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56,
0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2,
0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed,
0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f,
0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f,
0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0,
0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8,
0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32,
0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8,
0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b,
0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b,
0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd,
0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f,
0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1,
0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94
};
static const u8 enc_output075[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7,
0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5
};
static const u8 enc_assoc075[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce075[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x00, 0x03, 0x18, 0xa5
};
static const u8 enc_key075[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input076[] __initconst = {
0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85,
0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02,
0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09,
0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8,
0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd,
0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85,
0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39,
0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae,
0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed,
0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4,
0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c,
0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33,
0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19,
0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11,
0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9,
0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5
};
static const u8 enc_output076[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d,
0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63
};
static const u8 enc_assoc076[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce076[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0
};
static const u8 enc_key076[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input077[] __initconst = {
0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae,
0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16,
0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7,
0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61,
0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2,
0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf,
0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf,
0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48,
0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8,
0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa,
0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87,
0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32,
0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1,
0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b,
0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c,
0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f
};
static const u8 enc_output077[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4,
0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa
};
static const u8 enc_assoc077[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce077[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66
};
static const u8 enc_key077[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input078[] __initconst = {
0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef,
0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5,
0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c,
0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d,
0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf,
0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6,
0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe,
0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5,
0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62,
0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c,
0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f,
0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2,
0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33,
0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e,
0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9,
0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62
};
static const u8 enc_output078[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7,
0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d
};
static const u8 enc_assoc078[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce078[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90
};
static const u8 enc_key078[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input079[] __initconst = {
0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9,
0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6,
0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c,
0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82,
0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21,
0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12,
0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c,
0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d,
0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32,
0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84,
0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3,
0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24,
0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94,
0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53,
0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71,
0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd
};
static const u8 enc_output079[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2,
0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e
};
static const u8 enc_assoc079[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce079[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a
};
static const u8 enc_key079[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input080[] __initconst = {
0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5,
0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9,
0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b,
0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc,
0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38,
0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27,
0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83,
0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b,
0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda,
0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4,
0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc,
0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b,
0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4,
0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00,
0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d,
0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1
};
static const u8 enc_output080[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a,
0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02
};
static const u8 enc_assoc080[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce080[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40
};
static const u8 enc_key080[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input081[] __initconst = {
0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92,
0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c,
0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9,
0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06,
0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a,
0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa,
0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07,
0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6,
0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06,
0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8,
0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5,
0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5,
0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f,
0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86,
0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23,
0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1
};
static const u8 enc_output081[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba,
0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0
};
static const u8 enc_assoc081[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce081[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35
};
static const u8 enc_key081[] __initconst = {
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30
};
/* wycheproof - checking for int overflows */
static const u8 enc_input082[] __initconst = {
0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb,
0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49,
0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16,
0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d,
0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9,
0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e,
0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d,
0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc,
0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6,
0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca,
0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83,
0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08,
0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95,
0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66,
0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83,
0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07
};
static const u8 enc_output082[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42,
0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae
};
static const u8 enc_assoc082[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce082[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5
};
static const u8 enc_key082[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - checking for int overflows */
static const u8 enc_input083[] __initconst = {
0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58,
0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84,
0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5,
0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf,
0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39,
0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03,
0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f,
0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa,
0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08,
0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59,
0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56,
0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9,
0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43,
0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa,
0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba,
0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a
};
static const u8 enc_output083[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b,
0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19
};
static const u8 enc_assoc083[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce083[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4
};
static const u8 enc_key083[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - checking for int overflows */
static const u8 enc_input084[] __initconst = {
0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18,
0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b,
0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99,
0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c,
0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde,
0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2,
0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1,
0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8,
0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b,
0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f,
0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77,
0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8,
0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c,
0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf,
0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97,
0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee
};
static const u8 enc_output084[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab,
0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87
};
static const u8 enc_assoc084[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce084[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8
};
static const u8 enc_key084[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - checking for int overflows */
static const u8 enc_input085[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6,
0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed,
0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7,
0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37,
0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a,
0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b
};
static const u8 enc_output085[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68,
0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43
};
static const u8 enc_assoc085[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce085[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key085[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input086[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output086[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
};
static const u8 enc_assoc086[] __initconst = {
0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1,
0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00
};
static const u8 enc_nonce086[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key086[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input087[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output087[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_assoc087[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f,
0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58
};
static const u8 enc_nonce087[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key087[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input088[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output088[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const u8 enc_assoc088[] __initconst = {
0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f,
0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00
};
static const u8 enc_nonce088[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key088[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input089[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output089[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80,
0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80
};
static const u8 enc_assoc089[] __initconst = {
0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae,
0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02
};
static const u8 enc_nonce089[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key089[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input090[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output090[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f,
0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f
};
static const u8 enc_assoc090[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe,
0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3
};
static const u8 enc_nonce090[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key090[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input091[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output091[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
};
static const u8 enc_assoc091[] __initconst = {
0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30,
0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01
};
static const u8 enc_nonce091[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key091[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - special case tag */
static const u8 enc_input092[] __initconst = {
0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6,
0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd,
0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b,
0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2,
0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19,
0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4,
0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63,
0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d
};
static const u8 enc_output092[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const u8 enc_assoc092[] __initconst = {
0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11,
0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01
};
static const u8 enc_nonce092[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b
};
static const u8 enc_key092[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input093[] __initconst = {
0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d,
0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44,
0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3,
0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b,
0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb,
0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c
};
static const u8 enc_output093[] __initconst = {
0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14,
0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7,
0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96,
0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00,
0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77,
0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a
};
static const u8 enc_assoc093[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce093[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key093[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input094[] __initconst = {
0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90,
0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45,
0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef,
0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09,
0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20,
0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10
};
static const u8 enc_output094[] __initconst = {
0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66,
0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02,
0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29,
0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02,
0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6,
0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc
};
static const u8 enc_assoc094[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce094[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key094[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input095[] __initconst = {
0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b,
0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46,
0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6,
0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a,
0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79,
0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13
};
static const u8 enc_output095[] __initconst = {
0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad,
0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01,
0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70,
0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01,
0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02,
0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0
};
static const u8 enc_assoc095[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce095[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key095[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input096[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60,
0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c,
0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67,
0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94,
0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94,
0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04
};
static const u8 enc_output096[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96,
0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b,
0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52,
0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f,
0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73,
0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8
};
static const u8 enc_assoc096[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce096[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key096[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input097[] __initconst = {
0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6,
0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47,
0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01,
0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96,
0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce,
0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f
};
static const u8 enc_output097[] __initconst = {
0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00,
0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7,
0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d,
0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b,
0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26
};
static const u8 enc_assoc097[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce097[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key097[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input098[] __initconst = {
0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62,
0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45,
0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0,
0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8,
0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f,
0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1
};
static const u8 enc_output098[] __initconst = {
0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94,
0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66,
0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3,
0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e,
0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b
};
static const u8 enc_assoc098[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce098[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key098[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input099[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12,
0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98,
0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95,
0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48,
0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66,
0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8
};
static const u8 enc_output099[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4,
0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf,
0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0,
0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3,
0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56,
0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1
};
static const u8 enc_assoc099[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce099[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key099[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input100[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70,
0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd,
0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77,
0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75,
0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84,
0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5
};
static const u8 enc_output100[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86,
0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa,
0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42,
0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee,
0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59,
0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6
};
static const u8 enc_assoc100[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce100[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key100[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input101[] __initconst = {
0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c,
0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde,
0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92,
0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f,
0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea,
0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88
};
static const u8 enc_output101[] __initconst = {
0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5,
0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d,
0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7,
0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04,
0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65,
0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05
};
static const u8 enc_assoc101[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce101[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key101[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input102[] __initconst = {
0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0,
0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d,
0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73,
0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b,
0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b,
0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c
};
static const u8 enc_output102[] __initconst = {
0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39,
0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e,
0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46,
0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00,
0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56,
0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04
};
static const u8 enc_assoc102[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce102[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key102[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input103[] __initconst = {
0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d,
0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce,
0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c,
0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a,
0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14,
0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d
};
static const u8 enc_output103[] __initconst = {
0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94,
0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d,
0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59,
0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01,
0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a,
0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08
};
static const u8 enc_assoc103[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce103[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key103[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input104[] __initconst = {
0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac,
0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b,
0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9,
0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98,
0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1,
0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f
};
static const u8 enc_output104[] __initconst = {
0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35,
0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88,
0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec,
0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03,
0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d,
0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d
};
static const u8 enc_assoc104[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce104[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key104[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input105[] __initconst = {
0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5,
0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99,
0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e,
0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a,
0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46,
0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d
};
static const u8 enc_output105[] __initconst = {
0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c,
0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a,
0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b,
0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01,
0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88,
0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd
};
static const u8 enc_assoc105[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce105[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key105[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input106[] __initconst = {
0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06,
0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5,
0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68,
0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a,
0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10,
0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d
};
static const u8 enc_output106[] __initconst = {
0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f,
0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76,
0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d,
0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01,
0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88,
0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a
};
static const u8 enc_assoc106[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce106[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key106[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input107[] __initconst = {
0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71,
0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44,
0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c,
0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a,
0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3,
0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13
};
static const u8 enc_output107[] __initconst = {
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87,
0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03,
0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa,
0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01,
0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02,
0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88
};
static const u8 enc_assoc107[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce107[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key107[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input108[] __initconst = {
0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43,
0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6,
0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11,
0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99,
0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69,
0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e
};
static const u8 enc_output108[] __initconst = {
0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda,
0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15,
0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24,
0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02,
0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96,
0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a
};
static const u8 enc_assoc108[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce108[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key108[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input109[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a,
0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb,
0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96,
0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f,
0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59,
0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16
};
static const u8 enc_output109[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac,
0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c,
0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50,
0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04,
0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1,
0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb
};
static const u8 enc_assoc109[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce109[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key109[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input110[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5,
0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9,
0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b,
0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b,
0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4,
0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12
};
static const u8 enc_output110[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43,
0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e,
0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd,
0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00,
0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6,
0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe
};
static const u8 enc_assoc110[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce110[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key110[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input111[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda,
0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55,
0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5,
0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f,
0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a,
0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16
};
static const u8 enc_output111[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c,
0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12,
0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13,
0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04,
0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4,
0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9
};
static const u8 enc_assoc111[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce111[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key111[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input112[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38,
0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a,
0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65,
0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a,
0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa,
0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13
};
static const u8 enc_output112[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce,
0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d,
0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3,
0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01,
0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7,
0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce
};
static const u8 enc_assoc112[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce112[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key112[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input113[] __initconst = {
0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86,
0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f,
0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1,
0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f,
0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e,
0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16
};
static const u8 enc_output113[] __initconst = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70,
0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48,
0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37,
0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04,
0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa,
0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3
};
static const u8 enc_assoc113[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce113[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key113[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input114[] __initconst = {
0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73,
0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc,
0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a,
0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f,
0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2,
0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88
};
static const u8 enc_output114[] __initconst = {
0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea,
0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f,
0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf,
0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04,
0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69,
0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1
};
static const u8 enc_assoc114[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce114[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key114[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input115[] __initconst = {
0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb,
0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68,
0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09,
0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8,
0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33,
0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98,
0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39,
0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4,
0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b,
0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f
};
static const u8 enc_output115[] __initconst = {
0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72,
0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb,
0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06,
0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03,
0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74,
0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05
};
static const u8 enc_assoc115[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce115[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key115[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input116[] __initconst = {
0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd,
0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44,
0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea,
0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a,
0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25,
0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13
};
static const u8 enc_output116[] __initconst = {
0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b,
0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03,
0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c,
0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01,
0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50,
0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2
};
static const u8 enc_assoc116[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce116[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key116[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input117[] __initconst = {
0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d,
0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47,
0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06,
0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08,
0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9,
0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11
};
static const u8 enc_output117[] __initconst = {
0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb,
0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00,
0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0,
0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03,
0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53,
0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7
};
static const u8 enc_assoc117[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce117[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key117[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
/* wycheproof - edge case intermediate sums in poly1305 */
static const u8 enc_input118[] __initconst = {
0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66,
0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c,
0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03,
0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43,
0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca,
0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64,
0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a,
0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f,
0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2,
0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73,
0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85,
0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16
};
static const u8 enc_output118[] __initconst = {
0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5,
0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04,
0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c,
0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04,
0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0,
0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1
};
static const u8 enc_assoc118[] __initconst = {
0xff, 0xff, 0xff, 0xff
};
static const u8 enc_nonce118[] __initconst = {
0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52
};
static const u8 enc_key118[] __initconst = {
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f
};
static const struct chacha20poly1305_testvec
chacha20poly1305_enc_vectors[] __initconst = {
{ enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001,
sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) },
{ enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002,
sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) },
{ enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003,
sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) },
{ enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004,
sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) },
{ enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005,
sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) },
{ enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006,
sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) },
{ enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007,
sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) },
{ enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008,
sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) },
{ enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009,
sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) },
{ enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010,
sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) },
{ enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011,
sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) },
{ enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012,
sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) },
{ enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013,
sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) },
{ enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014,
sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) },
{ enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015,
sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) },
{ enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016,
sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) },
{ enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017,
sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) },
{ enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018,
sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) },
{ enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019,
sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) },
{ enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020,
sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) },
{ enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021,
sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) },
{ enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022,
sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) },
{ enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023,
sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) },
{ enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024,
sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) },
{ enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025,
sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) },
{ enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026,
sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) },
{ enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027,
sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) },
{ enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028,
sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) },
{ enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029,
sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) },
{ enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030,
sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) },
{ enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031,
sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) },
{ enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032,
sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) },
{ enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033,
sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) },
{ enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034,
sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) },
{ enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035,
sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) },
{ enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036,
sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) },
{ enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037,
sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) },
{ enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038,
sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) },
{ enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039,
sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) },
{ enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040,
sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) },
{ enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041,
sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) },
{ enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042,
sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) },
{ enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043,
sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) },
{ enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044,
sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) },
{ enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045,
sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) },
{ enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046,
sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) },
{ enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047,
sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) },
{ enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048,
sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) },
{ enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049,
sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) },
{ enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050,
sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) },
{ enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051,
sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) },
{ enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052,
sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) },
{ enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053,
sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) },
{ enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054,
sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) },
{ enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055,
sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) },
{ enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056,
sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) },
{ enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057,
sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) },
{ enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058,
sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) },
{ enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059,
sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) },
{ enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060,
sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) },
{ enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061,
sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) },
{ enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062,
sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) },
{ enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063,
sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) },
{ enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064,
sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) },
{ enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065,
sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) },
{ enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066,
sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) },
{ enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067,
sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) },
{ enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068,
sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) },
{ enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069,
sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) },
{ enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070,
sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) },
{ enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071,
sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) },
{ enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072,
sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) },
{ enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073,
sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) },
{ enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074,
sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) },
{ enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075,
sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) },
{ enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076,
sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) },
{ enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077,
sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) },
{ enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078,
sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) },
{ enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079,
sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) },
{ enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080,
sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) },
{ enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081,
sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) },
{ enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082,
sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) },
{ enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083,
sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) },
{ enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084,
sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) },
{ enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085,
sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) },
{ enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086,
sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) },
{ enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087,
sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) },
{ enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088,
sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) },
{ enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089,
sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) },
{ enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090,
sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) },
{ enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091,
sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) },
{ enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092,
sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) },
{ enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093,
sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) },
{ enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094,
sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) },
{ enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095,
sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) },
{ enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096,
sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) },
{ enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097,
sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) },
{ enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098,
sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) },
{ enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099,
sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) },
{ enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100,
sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) },
{ enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101,
sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) },
{ enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102,
sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) },
{ enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103,
sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) },
{ enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104,
sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) },
{ enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105,
sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) },
{ enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106,
sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) },
{ enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107,
sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) },
{ enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108,
sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) },
{ enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109,
sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) },
{ enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110,
sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) },
{ enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111,
sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) },
{ enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112,
sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) },
{ enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113,
sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) },
{ enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114,
sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) },
{ enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115,
sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) },
{ enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116,
sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) },
{ enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117,
sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) },
{ enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118,
sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) }
};
static const u8 dec_input001[] __initconst = {
0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4,
0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd,
0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89,
0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2,
0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee,
0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0,
0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00,
0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf,
0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce,
0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81,
0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd,
0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55,
0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61,
0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38,
0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0,
0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4,
0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46,
0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9,
0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e,
0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e,
0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15,
0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a,
0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea,
0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a,
0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99,
0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e,
0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10,
0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10,
0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94,
0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30,
0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf,
0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29,
0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70,
0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb,
0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f,
0x38
};
static const u8 dec_output001[] __initconst = {
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
0x9d
};
static const u8 dec_assoc001[] __initconst = {
0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x4e, 0x91
};
static const u8 dec_nonce001[] __initconst = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08
};
static const u8 dec_key001[] __initconst = {
0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
};
static const u8 dec_input002[] __initconst = {
0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1,
0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92
};
static const u8 dec_output002[] __initconst = { };
static const u8 dec_assoc002[] __initconst = { };
static const u8 dec_nonce002[] __initconst = {
0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e
};
static const u8 dec_key002[] __initconst = {
0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f,
0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86,
0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef,
0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68
};
static const u8 dec_input003[] __initconst = {
0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6,
0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77
};
static const u8 dec_output003[] __initconst = { };
static const u8 dec_assoc003[] __initconst = {
0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b
};
static const u8 dec_nonce003[] __initconst = {
0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d
};
static const u8 dec_key003[] __initconst = {
0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88,
0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a,
0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08,
0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d
};
static const u8 dec_input004[] __initconst = {
0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2,
0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac,
0x89
};
static const u8 dec_output004[] __initconst = {
0xa4
};
static const u8 dec_assoc004[] __initconst = {
0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40
};
static const u8 dec_nonce004[] __initconst = {
0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4
};
static const u8 dec_key004[] __initconst = {
0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8,
0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1,
0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d,
0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e
};
static const u8 dec_input005[] __initconst = {
0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e,
0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c,
0xac
};
static const u8 dec_output005[] __initconst = {
0x2d
};
static const u8 dec_assoc005[] __initconst = { };
static const u8 dec_nonce005[] __initconst = {
0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30
};
static const u8 dec_key005[] __initconst = {
0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31,
0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87,
0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01,
0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87
};
static const u8 dec_input006[] __initconst = {
0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1,
0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15,
0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c,
0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda,
0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11,
0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8,
0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc,
0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3,
0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5,
0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02,
0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93,
0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78,
0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1,
0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66,
0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc,
0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0,
0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d,
0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a,
0xeb
};
static const u8 dec_output006[] __initconst = {
0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a,
0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92,
0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37,
0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50,
0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec,
0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb,
0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66,
0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb,
0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b,
0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e,
0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3,
0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0,
0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb,
0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41,
0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc,
0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde,
0x8f
};
static const u8 dec_assoc006[] __initconst = {
0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b
};
static const u8 dec_nonce006[] __initconst = {
0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c
};
static const u8 dec_key006[] __initconst = {
0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae,
0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78,
0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9,
0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01
};
static const u8 dec_input007[] __initconst = {
0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c,
0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8,
0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c,
0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb,
0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0,
0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21,
0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70,
0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac,
0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99,
0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9,
0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f,
0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7,
0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53,
0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12,
0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6,
0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0,
0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54,
0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6,
0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e,
0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb,
0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30,
0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f,
0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2,
0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e,
0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34,
0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39,
0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7,
0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9,
0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82,
0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04,
0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34,
0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef,
0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42,
0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53
};
static const u8 dec_output007[] __initconst = {
0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5,
0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a,
0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1,
0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17,
0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c,
0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1,
0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51,
0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1,
0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86,
0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a,
0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a,
0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98,
0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36,
0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34,
0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57,
0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84,
0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4,
0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80,
0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82,
0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5,
0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d,
0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c,
0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf,
0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc,
0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3,
0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14,
0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81,
0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77,
0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3,
0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2,
0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b,
0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3
};
static const u8 dec_assoc007[] __initconst = { };
static const u8 dec_nonce007[] __initconst = {
0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0
};
static const u8 dec_key007[] __initconst = {
0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd,
0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c,
0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80,
0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01
};
static const u8 dec_input008[] __initconst = {
0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd,
0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1,
0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93,
0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d,
0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c,
0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6,
0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4,
0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5,
0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84,
0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd,
0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed,
0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab,
0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13,
0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49,
0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6,
0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8,
0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2,
0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94,
0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18,
0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60,
0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8,
0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b,
0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f,
0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c,
0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20,
0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff,
0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9,
0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c,
0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9,
0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6,
0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea,
0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e,
0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82,
0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1,
0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70,
0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1,
0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c,
0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7,
0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc,
0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc,
0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3,
0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb,
0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97,
0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f,
0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39,
0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f,
0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d,
0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2,
0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d,
0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96,
0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b,
0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20,
0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95,
0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb,
0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35,
0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62,
0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9,
0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6,
0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8,
0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a,
0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93,
0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14,
0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99,
0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86,
0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f,
0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54
};
static const u8 dec_output008[] __initconst = {
0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10,
0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2,
0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c,
0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb,
0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12,
0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa,
0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6,
0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4,
0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91,
0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb,
0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47,
0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15,
0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f,
0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a,
0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3,
0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97,
0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80,
0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e,
0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f,
0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10,
0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a,
0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0,
0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35,
0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d,
0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d,
0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57,
0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4,
0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f,
0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39,
0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda,
0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17,
0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43,
0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19,
0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09,
0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21,
0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07,
0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f,
0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b,
0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a,
0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed,
0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2,
0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca,
0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff,
0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b,
0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b,
0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b,
0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6,
0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04,
0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48,
0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b,
0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13,
0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8,
0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f,
0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0,
0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92,
0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a,
0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41,
0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17,
0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30,
0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20,
0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49,
0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a,
0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b,
0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3
};
static const u8 dec_assoc008[] __initconst = { };
static const u8 dec_nonce008[] __initconst = {
0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02
};
static const u8 dec_key008[] __initconst = {
0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53,
0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0,
0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86,
0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba
};
static const u8 dec_input009[] __initconst = {
0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf,
0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66,
0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72,
0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd,
0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28,
0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe,
0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06,
0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5,
0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7,
0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09,
0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a,
0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00,
0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62,
0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb,
0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2,
0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28,
0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e,
0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a,
0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6,
0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83,
0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9,
0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a,
0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79,
0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a,
0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea,
0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b,
0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52,
0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb,
0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89,
0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad,
0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19,
0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71,
0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d,
0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54,
0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a,
0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d,
0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95,
0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42,
0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16,
0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6,
0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf,
0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d,
0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f,
0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b,
0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e,
0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4,
0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c,
0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4,
0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1,
0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb,
0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff,
0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2,
0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06,
0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66,
0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90,
0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55,
0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc,
0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8,
0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62,
0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba,
0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2,
0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89,
0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06,
0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90,
0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf,
0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8,
0xae
};
static const u8 dec_output009[] __initconst = {
0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b,
0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8,
0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca,
0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09,
0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5,
0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85,
0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44,
0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97,
0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77,
0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41,
0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c,
0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00,
0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82,
0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f,
0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e,
0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55,
0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab,
0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17,
0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e,
0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f,
0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82,
0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3,
0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f,
0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0,
0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08,
0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b,
0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85,
0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28,
0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c,
0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62,
0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2,
0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3,
0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62,
0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40,
0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f,
0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b,
0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91,
0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5,
0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c,
0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4,
0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49,
0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04,
0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03,
0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa,
0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec,
0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6,
0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69,
0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36,
0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8,
0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf,
0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe,
0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82,
0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab,
0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d,
0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3,
0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5,
0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34,
0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49,
0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f,
0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d,
0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42,
0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef,
0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27,
0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52,
0x65
};
static const u8 dec_assoc009[] __initconst = {
0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e,
0xef
};
static const u8 dec_nonce009[] __initconst = {
0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78
};
static const u8 dec_key009[] __initconst = {
0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5,
0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86,
0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2,
0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b
};
static const u8 dec_input010[] __initconst = {
0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b,
0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74,
0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1,
0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd,
0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6,
0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5,
0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96,
0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02,
0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30,
0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57,
0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53,
0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65,
0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71,
0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9,
0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18,
0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce,
0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a,
0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69,
0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2,
0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95,
0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49,
0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e,
0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a,
0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a,
0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e,
0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19,
0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b,
0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75,
0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d,
0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d,
0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f,
0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a,
0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d,
0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5,
0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c,
0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77,
0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46,
0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43,
0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe,
0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8,
0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76,
0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47,
0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8,
0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32,
0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59,
0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae,
0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a,
0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3,
0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74,
0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75,
0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2,
0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e,
0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2,
0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9,
0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1,
0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07,
0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79,
0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71,
0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad,
0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a,
0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c,
0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9,
0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79,
0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27,
0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90,
0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe,
0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99,
0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1,
0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9,
0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0,
0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28,
0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e,
0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20,
0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60,
0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47,
0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68,
0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe,
0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33,
0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8,
0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38,
0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7,
0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04,
0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c,
0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f,
0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c,
0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77,
0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54,
0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5,
0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4,
0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2,
0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e,
0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27,
0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f,
0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92,
0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55,
0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe,
0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04,
0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4,
0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56,
0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02,
0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2,
0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8,
0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27,
0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47,
0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10,
0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43,
0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0,
0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee,
0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47,
0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6,
0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d,
0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c,
0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3,
0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b,
0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09,
0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d,
0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1,
0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd,
0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4,
0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63,
0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87,
0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd,
0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e,
0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a,
0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c,
0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38,
0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a,
0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5,
0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9,
0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0
};
static const u8 dec_output010[] __initconst = {
0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf,
0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c,
0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22,
0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc,
0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16,
0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7,
0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4,
0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d,
0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5,
0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46,
0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82,
0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b,
0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a,
0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf,
0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca,
0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95,
0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09,
0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3,
0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3,
0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f,
0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58,
0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad,
0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde,
0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44,
0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a,
0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9,
0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26,
0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc,
0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74,
0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b,
0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93,
0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37,
0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f,
0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d,
0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca,
0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73,
0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f,
0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1,
0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9,
0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76,
0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac,
0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7,
0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce,
0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30,
0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb,
0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa,
0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd,
0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f,
0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb,
0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34,
0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e,
0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f,
0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53,
0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41,
0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e,
0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d,
0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27,
0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e,
0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8,
0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a,
0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12,
0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3,
0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66,
0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0,
0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c,
0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4,
0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49,
0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90,
0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11,
0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c,
0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b,
0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74,
0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c,
0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27,
0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1,
0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27,
0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88,
0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27,
0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b,
0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39,
0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7,
0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc,
0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe,
0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5,
0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf,
0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05,
0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73,
0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda,
0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe,
0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71,
0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed,
0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d,
0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33,
0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f,
0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a,
0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa,
0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e,
0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e,
0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87,
0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5,
0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4,
0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38,
0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34,
0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f,
0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36,
0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69,
0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44,
0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5,
0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce,
0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd,
0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27,
0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f,
0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8,
0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a,
0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5,
0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca,
0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e,
0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92,
0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13,
0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf,
0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6,
0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3,
0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b,
0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d,
0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f,
0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40,
0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c,
0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f
};
static const u8 dec_assoc010[] __initconst = {
0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27,
0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2
};
static const u8 dec_nonce010[] __initconst = {
0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30
};
static const u8 dec_key010[] __initconst = {
0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44,
0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf,
0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74,
0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7
};
static const u8 dec_input011[] __initconst = {
0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8,
0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc,
0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74,
0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73,
0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e,
0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9,
0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e,
0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd,
0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57,
0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19,
0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f,
0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45,
0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e,
0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39,
0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03,
0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f,
0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0,
0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce,
0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb,
0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52,
0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21,
0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a,
0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35,
0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91,
0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b,
0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e,
0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19,
0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07,
0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18,
0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96,
0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68,
0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4,
0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57,
0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c,
0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23,
0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8,
0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6,
0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40,
0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab,
0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb,
0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea,
0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8,
0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31,
0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0,
0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc,
0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94,
0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1,
0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46,
0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6,
0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7,
0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71,
0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a,
0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33,
0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38,
0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23,
0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb,
0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65,
0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73,
0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8,
0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb,
0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a,
0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca,
0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5,
0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71,
0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8,
0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d,
0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6,
0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d,
0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7,
0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5,
0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8,
0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd,
0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29,
0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22,
0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5,
0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67,
0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11,
0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e,
0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09,
0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4,
0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f,
0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa,
0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec,
0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b,
0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d,
0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b,
0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48,
0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3,
0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63,
0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd,
0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78,
0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed,
0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82,
0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f,
0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3,
0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9,
0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72,
0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74,
0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40,
0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b,
0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a,
0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5,
0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98,
0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71,
0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e,
0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4,
0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46,
0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e,
0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f,
0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93,
0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0,
0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5,
0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61,
0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64,
0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85,
0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20,
0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6,
0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc,
0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8,
0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50,
0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4,
0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80,
0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0,
0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a,
0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35,
0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43,
0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12,
0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7,
0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34,
0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42,
0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0,
0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95,
0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74,
0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5,
0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12,
0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6,
0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86,
0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97,
0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45,
0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19,
0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86,
0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c,
0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba,
0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29,
0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6,
0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6,
0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09,
0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31,
0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99,
0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b,
0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca,
0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00,
0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93,
0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3,
0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07,
0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda,
0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90,
0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b,
0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a,
0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6,
0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c,
0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57,
0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15,
0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e,
0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51,
0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75,
0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19,
0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08,
0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14,
0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba,
0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff,
0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90,
0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e,
0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93,
0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad,
0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2,
0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac,
0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d,
0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06,
0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c,
0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91,
0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17,
0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20,
0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7,
0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf,
0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c,
0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2,
0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e,
0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a,
0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05,
0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58,
0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8,
0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d,
0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71,
0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3,
0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe,
0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62,
0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16,
0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66,
0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4,
0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2,
0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35,
0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3,
0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4,
0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f,
0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe,
0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56,
0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b,
0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37,
0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3,
0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f,
0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f,
0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0,
0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70,
0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd,
0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f,
0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e,
0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67,
0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51,
0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23,
0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3,
0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5,
0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09,
0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7,
0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed,
0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb,
0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6,
0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5,
0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96,
0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe,
0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44,
0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6,
0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e,
0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0,
0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79,
0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f,
0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d,
0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82,
0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47,
0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93,
0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6,
0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69,
0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e,
0x2b, 0xdf, 0xcd, 0xf9, 0x3c
};
static const u8 dec_output011[] __initconst = {
0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b,
0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b,
0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d,
0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee,
0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30,
0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20,
0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f,
0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e,
0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66,
0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46,
0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35,
0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6,
0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0,
0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15,
0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13,
0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7,
0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3,
0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37,
0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc,
0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95,
0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8,
0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac,
0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45,
0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf,
0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d,
0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc,
0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45,
0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a,
0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec,
0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e,
0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10,
0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8,
0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66,
0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0,
0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62,
0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b,
0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4,
0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96,
0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7,
0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74,
0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8,
0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b,
0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70,
0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95,
0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3,
0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9,
0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d,
0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e,
0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32,
0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5,
0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80,
0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3,
0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad,
0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d,
0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20,
0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17,
0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6,
0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d,
0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82,
0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c,
0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9,
0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb,
0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96,
0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9,
0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f,
0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40,
0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc,
0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce,
0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71,
0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f,
0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35,
0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90,
0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8,
0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01,
0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1,
0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe,
0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4,
0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf,
0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9,
0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f,
0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04,
0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7,
0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15,
0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc,
0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0,
0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae,
0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb,
0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed,
0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51,
0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52,
0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84,
0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5,
0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4,
0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e,
0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74,
0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f,
0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13,
0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea,
0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b,
0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef,
0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09,
0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe,
0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1,
0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9,
0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15,
0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a,
0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab,
0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36,
0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd,
0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde,
0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd,
0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47,
0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5,
0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69,
0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21,
0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98,
0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07,
0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57,
0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd,
0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03,
0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11,
0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96,
0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91,
0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d,
0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0,
0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9,
0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42,
0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a,
0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18,
0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc,
0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce,
0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc,
0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0,
0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf,
0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7,
0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80,
0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c,
0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82,
0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9,
0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20,
0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58,
0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6,
0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc,
0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50,
0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86,
0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a,
0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80,
0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec,
0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08,
0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c,
0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde,
0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d,
0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17,
0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f,
0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26,
0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96,
0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97,
0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6,
0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55,
0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e,
0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88,
0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5,
0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b,
0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15,
0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1,
0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4,
0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3,
0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf,
0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e,
0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb,
0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76,
0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5,
0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c,
0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde,
0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f,
0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51,
0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9,
0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99,
0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6,
0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04,
0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31,
0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a,
0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56,
0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e,
0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78,
0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a,
0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7,
0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb,
0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6,
0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8,
0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc,
0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84,
0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86,
0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76,
0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a,
0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73,
0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8,
0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6,
0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2,
0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56,
0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb,
0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab,
0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76,
0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69,
0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d,
0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc,
0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22,
0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39,
0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6,
0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9,
0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f,
0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1,
0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83,
0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc,
0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4,
0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59,
0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68,
0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef,
0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1,
0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3,
0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44,
0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09,
0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8,
0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a,
0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d,
0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae,
0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2,
0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10,
0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a,
0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34,
0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f,
0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9,
0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b,
0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d,
0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57,
0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03,
0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87,
0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca,
0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53,
0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f,
0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61,
0x10, 0x1e, 0xbf, 0xec, 0xa8
};
static const u8 dec_assoc011[] __initconst = {
0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7
};
static const u8 dec_nonce011[] __initconst = {
0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa
};
static const u8 dec_key011[] __initconst = {
0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85,
0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca,
0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52,
0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38
};
static const u8 dec_input012[] __initconst = {
0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
0x70, 0xcf, 0xd6
};
static const u8 dec_output012[] __initconst = {
0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
0x78, 0xec, 0x00
};
static const u8 dec_assoc012[] __initconst = {
0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
};
static const u8 dec_nonce012[] __initconst = {
0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
};
static const u8 dec_key012[] __initconst = {
0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
};
static const u8 dec_input013[] __initconst = {
0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3,
0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf,
0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1,
0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f,
0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e,
0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5,
0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b,
0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b,
0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2,
0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1,
0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74,
0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e,
0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae,
0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd,
0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04,
0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55,
0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef,
0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b,
0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74,
0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26,
0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f,
0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64,
0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd,
0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad,
0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b,
0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e,
0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e,
0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0,
0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f,
0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50,
0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97,
0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03,
0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a,
0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15,
0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb,
0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34,
0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47,
0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86,
0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24,
0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c,
0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9,
0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7,
0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48,
0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b,
0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e,
0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61,
0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75,
0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26,
0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74,
0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43,
0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1,
0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79,
0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3,
0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5,
0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9,
0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d,
0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8,
0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26,
0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5,
0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d,
0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29,
0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57,
0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92,
0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9,
0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc,
0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd,
0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57,
0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3,
0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4,
0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c,
0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27,
0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c,
0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5,
0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14,
0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94,
0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b,
0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99,
0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84,
0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a,
0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa,
0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75,
0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74,
0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40,
0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72,
0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f,
0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92,
0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8,
0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c,
0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f,
0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb,
0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a,
0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b,
0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d,
0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c,
0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4,
0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00,
0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b,
0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4,
0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84,
0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba,
0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47,
0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4,
0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88,
0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81,
0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1,
0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a,
0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e,
0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1,
0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07,
0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24,
0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f,
0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a,
0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9,
0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9,
0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51,
0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1,
0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c,
0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53,
0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40,
0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a,
0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2,
0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2,
0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8,
0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07,
0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9,
0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d,
0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde,
0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f,
0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d,
0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d,
0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56,
0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c,
0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3,
0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d,
0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26,
0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10,
0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c,
0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11,
0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf,
0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c,
0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb,
0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79,
0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa,
0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80,
0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08,
0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c,
0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc,
0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab,
0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6,
0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9,
0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7,
0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2,
0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33,
0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2,
0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e,
0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c,
0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b,
0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66,
0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6,
0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44,
0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74,
0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6,
0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f,
0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24,
0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1,
0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2,
0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5,
0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d,
0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0,
0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b,
0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3,
0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0,
0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3,
0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c,
0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b,
0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5,
0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51,
0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71,
0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68,
0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb,
0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e,
0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b,
0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8,
0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb,
0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54,
0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7,
0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff,
0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd,
0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde,
0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c,
0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1,
0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8,
0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14,
0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c,
0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4,
0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06,
0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52,
0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d,
0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c,
0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6,
0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5,
0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f,
0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e,
0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98,
0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8,
0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb,
0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b,
0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79,
0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11,
0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d,
0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10,
0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23,
0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23,
0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90,
0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4,
0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1,
0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7,
0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11,
0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50,
0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8,
0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97,
0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38,
0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f,
0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33,
0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f,
0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75,
0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21,
0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90,
0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8,
0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91,
0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1,
0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f,
0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3,
0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc,
0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a,
0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62,
0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55,
0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23,
0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6,
0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac,
0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12,
0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a,
0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7,
0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec,
0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28,
0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88,
0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4,
0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17,
0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2,
0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33,
0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a,
0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28,
0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62,
0x70, 0xcf, 0xd7
};
static const u8 dec_output013[] __initconst = {
0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0,
0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5,
0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57,
0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff,
0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5,
0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b,
0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46,
0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b,
0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71,
0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0,
0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b,
0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d,
0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f,
0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24,
0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23,
0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e,
0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14,
0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d,
0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb,
0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4,
0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf,
0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e,
0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6,
0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33,
0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb,
0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0,
0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe,
0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00,
0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d,
0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b,
0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50,
0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e,
0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4,
0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28,
0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8,
0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b,
0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86,
0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67,
0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff,
0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59,
0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe,
0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6,
0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e,
0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b,
0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50,
0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39,
0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02,
0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9,
0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a,
0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38,
0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9,
0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65,
0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb,
0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2,
0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae,
0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee,
0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00,
0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c,
0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8,
0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31,
0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68,
0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4,
0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0,
0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11,
0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7,
0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39,
0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1,
0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1,
0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2,
0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66,
0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49,
0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2,
0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5,
0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3,
0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c,
0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa,
0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00,
0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54,
0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87,
0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03,
0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39,
0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40,
0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6,
0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22,
0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5,
0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e,
0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32,
0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53,
0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42,
0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c,
0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68,
0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48,
0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c,
0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce,
0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd,
0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa,
0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69,
0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8,
0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58,
0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0,
0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45,
0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb,
0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33,
0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c,
0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23,
0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80,
0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1,
0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff,
0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24,
0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9,
0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46,
0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8,
0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20,
0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35,
0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63,
0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb,
0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36,
0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a,
0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c,
0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f,
0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02,
0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03,
0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa,
0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16,
0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d,
0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5,
0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7,
0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac,
0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47,
0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3,
0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35,
0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e,
0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6,
0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74,
0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e,
0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a,
0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0,
0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4,
0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8,
0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16,
0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32,
0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65,
0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06,
0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a,
0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7,
0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85,
0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb,
0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46,
0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e,
0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61,
0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb,
0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d,
0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00,
0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5,
0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6,
0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1,
0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a,
0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7,
0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63,
0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38,
0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3,
0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed,
0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49,
0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42,
0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0,
0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f,
0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1,
0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd,
0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d,
0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88,
0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1,
0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25,
0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22,
0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28,
0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f,
0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53,
0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28,
0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8,
0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc,
0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8,
0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb,
0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3,
0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3,
0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac,
0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2,
0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a,
0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad,
0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e,
0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd,
0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf,
0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba,
0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41,
0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91,
0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d,
0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6,
0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf,
0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92,
0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e,
0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72,
0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04,
0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46,
0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55,
0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84,
0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61,
0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d,
0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8,
0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d,
0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87,
0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70,
0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94,
0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f,
0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb,
0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90,
0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31,
0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06,
0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05,
0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7,
0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e,
0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae,
0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2,
0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21,
0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0,
0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d,
0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0,
0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6,
0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5,
0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9,
0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8,
0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57,
0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1,
0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c,
0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b,
0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69,
0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d,
0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d,
0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19,
0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82,
0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20,
0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f,
0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e,
0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f,
0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47,
0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b,
0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4,
0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b,
0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4,
0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9,
0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3,
0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0,
0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16,
0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d,
0x78, 0xec, 0x00
};
static const u8 dec_assoc013[] __initconst = {
0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8,
0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce,
0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c,
0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc,
0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e,
0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f,
0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b,
0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9
};
static const u8 dec_nonce013[] __initconst = {
0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06
};
static const u8 dec_key013[] __initconst = {
0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e,
0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d,
0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e,
0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64
};
static const struct chacha20poly1305_testvec
chacha20poly1305_dec_vectors[] __initconst = {
{ dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001,
sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) },
{ dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002,
sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) },
{ dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003,
sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) },
{ dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004,
sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) },
{ dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005,
sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) },
{ dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006,
sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) },
{ dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007,
sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) },
{ dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008,
sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) },
{ dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009,
sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) },
{ dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010,
sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) },
{ dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011,
sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) },
{ dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012,
sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) },
{ dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013,
sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013),
true }
};
static const u8 xenc_input001[] __initconst = {
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
0x9d
};
static const u8 xenc_output001[] __initconst = {
0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
0x9c
};
static const u8 xenc_assoc001[] __initconst = {
0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x4e, 0x91
};
static const u8 xenc_nonce001[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
};
static const u8 xenc_key001[] __initconst = {
0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
};
static const struct chacha20poly1305_testvec
xchacha20poly1305_enc_vectors[] __initconst = {
{ xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001,
sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) }
};
static const u8 xdec_input001[] __initconst = {
0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77,
0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92,
0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18,
0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d,
0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e,
0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86,
0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2,
0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85,
0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09,
0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49,
0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd,
0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8,
0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f,
0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79,
0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8,
0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0,
0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88,
0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71,
0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91,
0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf,
0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89,
0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46,
0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e,
0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90,
0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b,
0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58,
0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54,
0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1,
0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73,
0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69,
0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05,
0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83,
0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13,
0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8,
0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5,
0x9c
};
static const u8 xdec_output001[] __initconst = {
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74,
0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20,
0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66,
0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69,
0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20,
0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20,
0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d,
0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e,
0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65,
0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f,
0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64,
0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65,
0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61,
0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e,
0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69,
0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72,
0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20,
0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61,
0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61,
0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20,
0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65,
0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20,
0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20,
0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b,
0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67,
0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80,
0x9d
};
static const u8 xdec_assoc001[] __initconst = {
0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x4e, 0x91
};
static const u8 xdec_nonce001[] __initconst = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
};
static const u8 xdec_key001[] __initconst = {
0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a,
0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0,
0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09,
0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0
};
static const struct chacha20poly1305_testvec
xchacha20poly1305_dec_vectors[] __initconst = {
{ xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001,
sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) }
};
/* This is for the selftests-only, since it is only useful for the purpose of
* testing the underlying primitives and interactions.
*/
static void __init
chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u8 nonce[12],
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
const u8 *pad0 = page_address(ZERO_PAGE(0));
struct poly1305_desc_ctx poly1305_state;
u32 chacha20_state[CHACHA_STATE_WORDS];
union {
u8 block0[POLY1305_KEY_SIZE];
__le64 lens[2];
} b = {{ 0 }};
u8 bottom_row[16] = { 0 };
u32 le_key[8];
int i;
memcpy(&bottom_row[4], nonce, 12);
for (i = 0; i < 8; ++i)
le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i);
chacha_init(chacha20_state, le_key, bottom_row);
chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0));
poly1305_init(&poly1305_state, b.block0);
poly1305_update(&poly1305_state, ad, ad_len);
poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf);
chacha20_crypt(chacha20_state, dst, src, src_len);
poly1305_update(&poly1305_state, dst, src_len);
poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf);
b.lens[0] = cpu_to_le64(ad_len);
b.lens[1] = cpu_to_le64(src_len);
poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens));
poly1305_final(&poly1305_state, dst + src_len);
}
static void __init
chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u8 *nonce, const size_t nonce_len,
const u8 key[CHACHA20POLY1305_KEY_SIZE])
{
if (nonce_len == 8)
chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len,
get_unaligned_le64(nonce), key);
else if (nonce_len == 12)
chacha20poly1305_encrypt_bignonce(dst, src, src_len, ad,
ad_len, nonce, key);
else
BUG();
}
static bool __init
decryption_success(bool func_ret, bool expect_failure, int memcmp_result)
{
if (expect_failure)
return !func_ret;
return func_ret && !memcmp_result;
}
bool __init chacha20poly1305_selftest(void)
{
enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 };
size_t i, j, k, total_len;
u8 *computed_output = NULL, *input = NULL;
bool success = true, ret;
struct scatterlist sg_src[3];
computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL);
if (!computed_output || !input) {
pr_err("chacha20poly1305 self-test malloc: FAIL\n");
success = false;
goto out;
}
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
chacha20poly1305_selftest_encrypt(computed_output,
chacha20poly1305_enc_vectors[i].input,
chacha20poly1305_enc_vectors[i].ilen,
chacha20poly1305_enc_vectors[i].assoc,
chacha20poly1305_enc_vectors[i].alen,
chacha20poly1305_enc_vectors[i].nonce,
chacha20poly1305_enc_vectors[i].nlen,
chacha20poly1305_enc_vectors[i].key);
if (memcmp(computed_output,
chacha20poly1305_enc_vectors[i].output,
chacha20poly1305_enc_vectors[i].ilen +
POLY1305_DIGEST_SIZE)) {
pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) {
if (chacha20poly1305_enc_vectors[i].nlen != 8)
continue;
memcpy(computed_output, chacha20poly1305_enc_vectors[i].input,
chacha20poly1305_enc_vectors[i].ilen);
sg_init_one(sg_src, computed_output,
chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE);
ret = chacha20poly1305_encrypt_sg_inplace(sg_src,
chacha20poly1305_enc_vectors[i].ilen,
chacha20poly1305_enc_vectors[i].assoc,
chacha20poly1305_enc_vectors[i].alen,
get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce),
chacha20poly1305_enc_vectors[i].key);
if (!ret || memcmp(computed_output,
chacha20poly1305_enc_vectors[i].output,
chacha20poly1305_enc_vectors[i].ilen +
POLY1305_DIGEST_SIZE)) {
pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
ret = chacha20poly1305_decrypt(computed_output,
chacha20poly1305_dec_vectors[i].input,
chacha20poly1305_dec_vectors[i].ilen,
chacha20poly1305_dec_vectors[i].assoc,
chacha20poly1305_dec_vectors[i].alen,
get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
chacha20poly1305_dec_vectors[i].key);
if (!decryption_success(ret,
chacha20poly1305_dec_vectors[i].failure,
memcmp(computed_output,
chacha20poly1305_dec_vectors[i].output,
chacha20poly1305_dec_vectors[i].ilen -
POLY1305_DIGEST_SIZE))) {
pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) {
memcpy(computed_output, chacha20poly1305_dec_vectors[i].input,
chacha20poly1305_dec_vectors[i].ilen);
sg_init_one(sg_src, computed_output,
chacha20poly1305_dec_vectors[i].ilen);
ret = chacha20poly1305_decrypt_sg_inplace(sg_src,
chacha20poly1305_dec_vectors[i].ilen,
chacha20poly1305_dec_vectors[i].assoc,
chacha20poly1305_dec_vectors[i].alen,
get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce),
chacha20poly1305_dec_vectors[i].key);
if (!decryption_success(ret,
chacha20poly1305_dec_vectors[i].failure,
memcmp(computed_output, chacha20poly1305_dec_vectors[i].output,
chacha20poly1305_dec_vectors[i].ilen -
POLY1305_DIGEST_SIZE))) {
pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) {
memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
xchacha20poly1305_encrypt(computed_output,
xchacha20poly1305_enc_vectors[i].input,
xchacha20poly1305_enc_vectors[i].ilen,
xchacha20poly1305_enc_vectors[i].assoc,
xchacha20poly1305_enc_vectors[i].alen,
xchacha20poly1305_enc_vectors[i].nonce,
xchacha20poly1305_enc_vectors[i].key);
if (memcmp(computed_output,
xchacha20poly1305_enc_vectors[i].output,
xchacha20poly1305_enc_vectors[i].ilen +
POLY1305_DIGEST_SIZE)) {
pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) {
memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN);
ret = xchacha20poly1305_decrypt(computed_output,
xchacha20poly1305_dec_vectors[i].input,
xchacha20poly1305_dec_vectors[i].ilen,
xchacha20poly1305_dec_vectors[i].assoc,
xchacha20poly1305_dec_vectors[i].alen,
xchacha20poly1305_dec_vectors[i].nonce,
xchacha20poly1305_dec_vectors[i].key);
if (!decryption_success(ret,
xchacha20poly1305_dec_vectors[i].failure,
memcmp(computed_output,
xchacha20poly1305_dec_vectors[i].output,
xchacha20poly1305_dec_vectors[i].ilen -
POLY1305_DIGEST_SIZE))) {
pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n",
i + 1);
success = false;
}
}
for (total_len = POLY1305_DIGEST_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST)
&& total_len <= 1 << 10; ++total_len) {
for (i = 0; i <= total_len; ++i) {
for (j = i; j <= total_len; ++j) {
k = 0;
sg_init_table(sg_src, 3);
if (i)
sg_set_buf(&sg_src[k++], input, i);
if (j - i)
sg_set_buf(&sg_src[k++], input + i, j - i);
if (total_len - j)
sg_set_buf(&sg_src[k++], input + j, total_len - j);
sg_init_marker(sg_src, k);
memset(computed_output, 0, total_len);
memset(input, 0, total_len);
if (!chacha20poly1305_encrypt_sg_inplace(sg_src,
total_len - POLY1305_DIGEST_SIZE, NULL, 0,
0, enc_key001))
goto chunkfail;
chacha20poly1305_encrypt(computed_output,
computed_output,
total_len - POLY1305_DIGEST_SIZE, NULL, 0, 0,
enc_key001);
if (memcmp(computed_output, input, total_len))
goto chunkfail;
if (!chacha20poly1305_decrypt(computed_output,
input, total_len, NULL, 0, 0, enc_key001))
goto chunkfail;
for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
if (computed_output[k])
goto chunkfail;
}
if (!chacha20poly1305_decrypt_sg_inplace(sg_src,
total_len, NULL, 0, 0, enc_key001))
goto chunkfail;
for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) {
if (input[k])
goto chunkfail;
}
continue;
chunkfail:
pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n",
total_len, i, j);
success = false;
}
}
}
out:
kfree(computed_output);
kfree(input);
return success;
}
| linux-master | lib/crypto/chacha20poly1305-selftest.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is based in part on Andrew Moon's poly1305-donna, which is in the
* public domain.
*/
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <crypto/internal/poly1305.h>
void poly1305_core_setkey(struct poly1305_core_key *key,
const u8 raw_key[POLY1305_BLOCK_SIZE])
{
u64 t0, t1;
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
t0 = get_unaligned_le64(&raw_key[0]);
t1 = get_unaligned_le64(&raw_key[8]);
key->key.r64[0] = t0 & 0xffc0fffffffULL;
key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL;
key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL;
/* s = 20*r */
key->precomputed_s.r64[0] = key->key.r64[1] * 20;
key->precomputed_s.r64[1] = key->key.r64[2] * 20;
}
EXPORT_SYMBOL(poly1305_core_setkey);
void poly1305_core_blocks(struct poly1305_state *state,
const struct poly1305_core_key *key, const void *src,
unsigned int nblocks, u32 hibit)
{
const u8 *input = src;
u64 hibit64;
u64 r0, r1, r2;
u64 s1, s2;
u64 h0, h1, h2;
u64 c;
u128 d0, d1, d2, d;
if (!nblocks)
return;
hibit64 = ((u64)hibit) << 40;
r0 = key->key.r64[0];
r1 = key->key.r64[1];
r2 = key->key.r64[2];
h0 = state->h64[0];
h1 = state->h64[1];
h2 = state->h64[2];
s1 = key->precomputed_s.r64[0];
s2 = key->precomputed_s.r64[1];
do {
u64 t0, t1;
/* h += m[i] */
t0 = get_unaligned_le64(&input[0]);
t1 = get_unaligned_le64(&input[8]);
h0 += t0 & 0xfffffffffffULL;
h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL;
h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64;
/* h *= r */
d0 = (u128)h0 * r0;
d = (u128)h1 * s2;
d0 += d;
d = (u128)h2 * s1;
d0 += d;
d1 = (u128)h0 * r1;
d = (u128)h1 * r0;
d1 += d;
d = (u128)h2 * s2;
d1 += d;
d2 = (u128)h0 * r2;
d = (u128)h1 * r1;
d2 += d;
d = (u128)h2 * r0;
d2 += d;
/* (partial) h %= p */
c = (u64)(d0 >> 44);
h0 = (u64)d0 & 0xfffffffffffULL;
d1 += c;
c = (u64)(d1 >> 44);
h1 = (u64)d1 & 0xfffffffffffULL;
d2 += c;
c = (u64)(d2 >> 42);
h2 = (u64)d2 & 0x3ffffffffffULL;
h0 += c * 5;
c = h0 >> 44;
h0 = h0 & 0xfffffffffffULL;
h1 += c;
input += POLY1305_BLOCK_SIZE;
} while (--nblocks);
state->h64[0] = h0;
state->h64[1] = h1;
state->h64[2] = h2;
}
EXPORT_SYMBOL(poly1305_core_blocks);
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst)
{
u8 *mac = dst;
u64 h0, h1, h2, c;
u64 g0, g1, g2;
u64 t0, t1;
/* fully carry h */
h0 = state->h64[0];
h1 = state->h64[1];
h2 = state->h64[2];
c = h1 >> 44;
h1 &= 0xfffffffffffULL;
h2 += c;
c = h2 >> 42;
h2 &= 0x3ffffffffffULL;
h0 += c * 5;
c = h0 >> 44;
h0 &= 0xfffffffffffULL;
h1 += c;
c = h1 >> 44;
h1 &= 0xfffffffffffULL;
h2 += c;
c = h2 >> 42;
h2 &= 0x3ffffffffffULL;
h0 += c * 5;
c = h0 >> 44;
h0 &= 0xfffffffffffULL;
h1 += c;
/* compute h + -p */
g0 = h0 + 5;
c = g0 >> 44;
g0 &= 0xfffffffffffULL;
g1 = h1 + c;
c = g1 >> 44;
g1 &= 0xfffffffffffULL;
g2 = h2 + c - (1ULL << 42);
/* select h if h < p, or h + -p if h >= p */
c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1;
g0 &= c;
g1 &= c;
g2 &= c;
c = ~c;
h0 = (h0 & c) | g0;
h1 = (h1 & c) | g1;
h2 = (h2 & c) | g2;
if (likely(nonce)) {
/* h = (h + nonce) */
t0 = ((u64)nonce[1] << 32) | nonce[0];
t1 = ((u64)nonce[3] << 32) | nonce[2];
h0 += t0 & 0xfffffffffffULL;
c = h0 >> 44;
h0 &= 0xfffffffffffULL;
h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c;
c = h1 >> 44;
h1 &= 0xfffffffffffULL;
h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c;
h2 &= 0x3ffffffffffULL;
}
/* mac = h % (2^128) */
h0 = h0 | (h1 << 44);
h1 = (h1 >> 20) | (h2 << 24);
put_unaligned_le64(h0, &mac[0]);
put_unaligned_le64(h1, &mac[8]);
}
EXPORT_SYMBOL(poly1305_core_emit);
| linux-master | lib/crypto/poly1305-donna64.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright (C) 2016-2017 INRIA and Microsoft Corporation.
* Copyright (C) 2018-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This is a machine-generated formally verified implementation of Curve25519
* ECDH from: <https://github.com/mitls/hacl-star>. Though originally machine
* generated, it has been tweaked to be suitable for use in the kernel. It is
* optimized for 64-bit machines that can efficiently work with 128-bit
* integer types.
*/
#include <asm/unaligned.h>
#include <crypto/curve25519.h>
#include <linux/string.h>
static __always_inline u64 u64_eq_mask(u64 a, u64 b)
{
u64 x = a ^ b;
u64 minus_x = ~x + (u64)1U;
u64 x_or_minus_x = x | minus_x;
u64 xnx = x_or_minus_x >> (u32)63U;
u64 c = xnx - (u64)1U;
return c;
}
static __always_inline u64 u64_gte_mask(u64 a, u64 b)
{
u64 x = a;
u64 y = b;
u64 x_xor_y = x ^ y;
u64 x_sub_y = x - y;
u64 x_sub_y_xor_y = x_sub_y ^ y;
u64 q = x_xor_y | x_sub_y_xor_y;
u64 x_xor_q = x ^ q;
u64 x_xor_q_ = x_xor_q >> (u32)63U;
u64 c = x_xor_q_ - (u64)1U;
return c;
}
static __always_inline void modulo_carry_top(u64 *b)
{
u64 b4 = b[4];
u64 b0 = b[0];
u64 b4_ = b4 & 0x7ffffffffffffLLU;
u64 b0_ = b0 + 19 * (b4 >> 51);
b[4] = b4_;
b[0] = b0_;
}
static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input)
{
{
u128 xi = input[0];
output[0] = ((u64)(xi));
}
{
u128 xi = input[1];
output[1] = ((u64)(xi));
}
{
u128 xi = input[2];
output[2] = ((u64)(xi));
}
{
u128 xi = input[3];
output[3] = ((u64)(xi));
}
{
u128 xi = input[4];
output[4] = ((u64)(xi));
}
}
static __always_inline void
fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s)
{
output[0] += (u128)input[0] * s;
output[1] += (u128)input[1] * s;
output[2] += (u128)input[2] * s;
output[3] += (u128)input[3] * s;
output[4] += (u128)input[4] * s;
}
static __always_inline void fproduct_carry_wide_(u128 *tmp)
{
{
u32 ctr = 0;
u128 tctr = tmp[ctr];
u128 tctrp1 = tmp[ctr + 1];
u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
u128 c = ((tctr) >> (51));
tmp[ctr] = ((u128)(r0));
tmp[ctr + 1] = ((tctrp1) + (c));
}
{
u32 ctr = 1;
u128 tctr = tmp[ctr];
u128 tctrp1 = tmp[ctr + 1];
u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
u128 c = ((tctr) >> (51));
tmp[ctr] = ((u128)(r0));
tmp[ctr + 1] = ((tctrp1) + (c));
}
{
u32 ctr = 2;
u128 tctr = tmp[ctr];
u128 tctrp1 = tmp[ctr + 1];
u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
u128 c = ((tctr) >> (51));
tmp[ctr] = ((u128)(r0));
tmp[ctr + 1] = ((tctrp1) + (c));
}
{
u32 ctr = 3;
u128 tctr = tmp[ctr];
u128 tctrp1 = tmp[ctr + 1];
u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU;
u128 c = ((tctr) >> (51));
tmp[ctr] = ((u128)(r0));
tmp[ctr + 1] = ((tctrp1) + (c));
}
}
static __always_inline void fmul_shift_reduce(u64 *output)
{
u64 tmp = output[4];
u64 b0;
{
u32 ctr = 5 - 0 - 1;
u64 z = output[ctr - 1];
output[ctr] = z;
}
{
u32 ctr = 5 - 1 - 1;
u64 z = output[ctr - 1];
output[ctr] = z;
}
{
u32 ctr = 5 - 2 - 1;
u64 z = output[ctr - 1];
output[ctr] = z;
}
{
u32 ctr = 5 - 3 - 1;
u64 z = output[ctr - 1];
output[ctr] = z;
}
output[0] = tmp;
b0 = output[0];
output[0] = 19 * b0;
}
static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input,
u64 *input21)
{
u32 i;
u64 input2i;
{
u64 input2i = input21[0];
fproduct_sum_scalar_multiplication_(output, input, input2i);
fmul_shift_reduce(input);
}
{
u64 input2i = input21[1];
fproduct_sum_scalar_multiplication_(output, input, input2i);
fmul_shift_reduce(input);
}
{
u64 input2i = input21[2];
fproduct_sum_scalar_multiplication_(output, input, input2i);
fmul_shift_reduce(input);
}
{
u64 input2i = input21[3];
fproduct_sum_scalar_multiplication_(output, input, input2i);
fmul_shift_reduce(input);
}
i = 4;
input2i = input21[i];
fproduct_sum_scalar_multiplication_(output, input, input2i);
}
static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21)
{
u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] };
{
u128 b4;
u128 b0;
u128 b4_;
u128 b0_;
u64 i0;
u64 i1;
u64 i0_;
u64 i1_;
u128 t[5] = { 0 };
fmul_mul_shift_reduce_(t, tmp, input21);
fproduct_carry_wide_(t);
b4 = t[4];
b0 = t[0];
b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
t[4] = b4_;
t[0] = b0_;
fproduct_copy_from_wide_(output, t);
i0 = output[0];
i1 = output[1];
i0_ = i0 & 0x7ffffffffffffLLU;
i1_ = i1 + (i0 >> 51);
output[0] = i0_;
output[1] = i1_;
}
}
static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output)
{
u64 r0 = output[0];
u64 r1 = output[1];
u64 r2 = output[2];
u64 r3 = output[3];
u64 r4 = output[4];
u64 d0 = r0 * 2;
u64 d1 = r1 * 2;
u64 d2 = r2 * 2 * 19;
u64 d419 = r4 * 19;
u64 d4 = d419 * 2;
u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) +
(((u128)(d2) * (r3))));
u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) +
(((u128)(r3 * 19) * (r3))));
u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) +
(((u128)(d4) * (r3))));
u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) +
(((u128)(r4) * (d419))));
u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) +
(((u128)(r2) * (r2))));
tmp[0] = s0;
tmp[1] = s1;
tmp[2] = s2;
tmp[3] = s3;
tmp[4] = s4;
}
static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output)
{
u128 b4;
u128 b0;
u128 b4_;
u128 b0_;
u64 i0;
u64 i1;
u64 i0_;
u64 i1_;
fsquare_fsquare__(tmp, output);
fproduct_carry_wide_(tmp);
b4 = tmp[4];
b0 = tmp[0];
b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
tmp[4] = b4_;
tmp[0] = b0_;
fproduct_copy_from_wide_(output, tmp);
i0 = output[0];
i1 = output[1];
i0_ = i0 & 0x7ffffffffffffLLU;
i1_ = i1 + (i0 >> 51);
output[0] = i0_;
output[1] = i1_;
}
static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp,
u32 count1)
{
u32 i;
fsquare_fsquare_(tmp, output);
for (i = 1; i < count1; ++i)
fsquare_fsquare_(tmp, output);
}
static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input,
u32 count1)
{
u128 t[5];
memcpy(output, input, 5 * sizeof(*input));
fsquare_fsquare_times_(output, t, count1);
}
static __always_inline void fsquare_fsquare_times_inplace(u64 *output,
u32 count1)
{
u128 t[5];
fsquare_fsquare_times_(output, t, count1);
}
static __always_inline void crecip_crecip(u64 *out, u64 *z)
{
u64 buf[20] = { 0 };
u64 *a0 = buf;
u64 *t00 = buf + 5;
u64 *b0 = buf + 10;
u64 *t01;
u64 *b1;
u64 *c0;
u64 *a;
u64 *t0;
u64 *b;
u64 *c;
fsquare_fsquare_times(a0, z, 1);
fsquare_fsquare_times(t00, a0, 2);
fmul_fmul(b0, t00, z);
fmul_fmul(a0, b0, a0);
fsquare_fsquare_times(t00, a0, 1);
fmul_fmul(b0, t00, b0);
fsquare_fsquare_times(t00, b0, 5);
t01 = buf + 5;
b1 = buf + 10;
c0 = buf + 15;
fmul_fmul(b1, t01, b1);
fsquare_fsquare_times(t01, b1, 10);
fmul_fmul(c0, t01, b1);
fsquare_fsquare_times(t01, c0, 20);
fmul_fmul(t01, t01, c0);
fsquare_fsquare_times_inplace(t01, 10);
fmul_fmul(b1, t01, b1);
fsquare_fsquare_times(t01, b1, 50);
a = buf;
t0 = buf + 5;
b = buf + 10;
c = buf + 15;
fmul_fmul(c, t0, b);
fsquare_fsquare_times(t0, c, 100);
fmul_fmul(t0, t0, c);
fsquare_fsquare_times_inplace(t0, 50);
fmul_fmul(t0, t0, b);
fsquare_fsquare_times_inplace(t0, 5);
fmul_fmul(out, t0, a);
}
static __always_inline void fsum(u64 *a, u64 *b)
{
a[0] += b[0];
a[1] += b[1];
a[2] += b[2];
a[3] += b[3];
a[4] += b[4];
}
static __always_inline void fdifference(u64 *a, u64 *b)
{
u64 tmp[5] = { 0 };
u64 b0;
u64 b1;
u64 b2;
u64 b3;
u64 b4;
memcpy(tmp, b, 5 * sizeof(*b));
b0 = tmp[0];
b1 = tmp[1];
b2 = tmp[2];
b3 = tmp[3];
b4 = tmp[4];
tmp[0] = b0 + 0x3fffffffffff68LLU;
tmp[1] = b1 + 0x3ffffffffffff8LLU;
tmp[2] = b2 + 0x3ffffffffffff8LLU;
tmp[3] = b3 + 0x3ffffffffffff8LLU;
tmp[4] = b4 + 0x3ffffffffffff8LLU;
{
u64 xi = a[0];
u64 yi = tmp[0];
a[0] = yi - xi;
}
{
u64 xi = a[1];
u64 yi = tmp[1];
a[1] = yi - xi;
}
{
u64 xi = a[2];
u64 yi = tmp[2];
a[2] = yi - xi;
}
{
u64 xi = a[3];
u64 yi = tmp[3];
a[3] = yi - xi;
}
{
u64 xi = a[4];
u64 yi = tmp[4];
a[4] = yi - xi;
}
}
static __always_inline void fscalar(u64 *output, u64 *b, u64 s)
{
u128 tmp[5];
u128 b4;
u128 b0;
u128 b4_;
u128 b0_;
{
u64 xi = b[0];
tmp[0] = ((u128)(xi) * (s));
}
{
u64 xi = b[1];
tmp[1] = ((u128)(xi) * (s));
}
{
u64 xi = b[2];
tmp[2] = ((u128)(xi) * (s));
}
{
u64 xi = b[3];
tmp[3] = ((u128)(xi) * (s));
}
{
u64 xi = b[4];
tmp[4] = ((u128)(xi) * (s));
}
fproduct_carry_wide_(tmp);
b4 = tmp[4];
b0 = tmp[0];
b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU))));
b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51))))))));
tmp[4] = b4_;
tmp[0] = b0_;
fproduct_copy_from_wide_(output, tmp);
}
static __always_inline void fmul(u64 *output, u64 *a, u64 *b)
{
fmul_fmul(output, a, b);
}
static __always_inline void crecip(u64 *output, u64 *input)
{
crecip_crecip(output, input);
}
static __always_inline void point_swap_conditional_step(u64 *a, u64 *b,
u64 swap1, u32 ctr)
{
u32 i = ctr - 1;
u64 ai = a[i];
u64 bi = b[i];
u64 x = swap1 & (ai ^ bi);
u64 ai1 = ai ^ x;
u64 bi1 = bi ^ x;
a[i] = ai1;
b[i] = bi1;
}
static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1)
{
point_swap_conditional_step(a, b, swap1, 5);
point_swap_conditional_step(a, b, swap1, 4);
point_swap_conditional_step(a, b, swap1, 3);
point_swap_conditional_step(a, b, swap1, 2);
point_swap_conditional_step(a, b, swap1, 1);
}
static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap)
{
u64 swap1 = 0 - iswap;
point_swap_conditional5(a, b, swap1);
point_swap_conditional5(a + 5, b + 5, swap1);
}
static __always_inline void point_copy(u64 *output, u64 *input)
{
memcpy(output, input, 5 * sizeof(*input));
memcpy(output + 5, input + 5, 5 * sizeof(*input));
}
static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p,
u64 *pq, u64 *qmqp)
{
u64 *qx = qmqp;
u64 *x2 = pp;
u64 *z2 = pp + 5;
u64 *x3 = ppq;
u64 *z3 = ppq + 5;
u64 *x = p;
u64 *z = p + 5;
u64 *xprime = pq;
u64 *zprime = pq + 5;
u64 buf[40] = { 0 };
u64 *origx = buf;
u64 *origxprime0 = buf + 5;
u64 *xxprime0;
u64 *zzprime0;
u64 *origxprime;
xxprime0 = buf + 25;
zzprime0 = buf + 30;
memcpy(origx, x, 5 * sizeof(*x));
fsum(x, z);
fdifference(z, origx);
memcpy(origxprime0, xprime, 5 * sizeof(*xprime));
fsum(xprime, zprime);
fdifference(zprime, origxprime0);
fmul(xxprime0, xprime, z);
fmul(zzprime0, x, zprime);
origxprime = buf + 5;
{
u64 *xx0;
u64 *zz0;
u64 *xxprime;
u64 *zzprime;
u64 *zzzprime;
xx0 = buf + 15;
zz0 = buf + 20;
xxprime = buf + 25;
zzprime = buf + 30;
zzzprime = buf + 35;
memcpy(origxprime, xxprime, 5 * sizeof(*xxprime));
fsum(xxprime, zzprime);
fdifference(zzprime, origxprime);
fsquare_fsquare_times(x3, xxprime, 1);
fsquare_fsquare_times(zzzprime, zzprime, 1);
fmul(z3, zzzprime, qx);
fsquare_fsquare_times(xx0, x, 1);
fsquare_fsquare_times(zz0, z, 1);
{
u64 *zzz;
u64 *xx;
u64 *zz;
u64 scalar;
zzz = buf + 10;
xx = buf + 15;
zz = buf + 20;
fmul(x2, xx, zz);
fdifference(zz, xx);
scalar = 121665;
fscalar(zzz, zz, scalar);
fsum(zzz, xx);
fmul(z2, zzz, zz);
}
}
}
static __always_inline void
ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
u64 *q, u8 byt)
{
u64 bit0 = (u64)(byt >> 7);
u64 bit;
point_swap_conditional(nq, nqpq, bit0);
addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q);
bit = (u64)(byt >> 7);
point_swap_conditional(nq2, nqpq2, bit);
}
static __always_inline void
ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2,
u64 *nqpq2, u64 *q, u8 byt)
{
u8 byt1;
ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt);
byt1 = byt << 1;
ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1);
}
static __always_inline void
ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2,
u64 *q, u8 byt, u32 i)
{
while (i--) {
ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2,
nqpq2, q, byt);
byt <<= 2;
}
}
static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq,
u64 *nqpq, u64 *nq2,
u64 *nqpq2, u64 *q,
u32 i)
{
while (i--) {
u8 byte = n1[i];
ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q,
byte, 4);
}
}
static void ladder_cmult(u64 *result, u8 *n1, u64 *q)
{
u64 point_buf[40] = { 0 };
u64 *nq = point_buf;
u64 *nqpq = point_buf + 10;
u64 *nq2 = point_buf + 20;
u64 *nqpq2 = point_buf + 30;
point_copy(nqpq, q);
nq[0] = 1;
ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32);
point_copy(result, nq);
}
static __always_inline void format_fexpand(u64 *output, const u8 *input)
{
const u8 *x00 = input + 6;
const u8 *x01 = input + 12;
const u8 *x02 = input + 19;
const u8 *x0 = input + 24;
u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4;
i0 = get_unaligned_le64(input);
i1 = get_unaligned_le64(x00);
i2 = get_unaligned_le64(x01);
i3 = get_unaligned_le64(x02);
i4 = get_unaligned_le64(x0);
output0 = i0 & 0x7ffffffffffffLLU;
output1 = i1 >> 3 & 0x7ffffffffffffLLU;
output2 = i2 >> 6 & 0x7ffffffffffffLLU;
output3 = i3 >> 1 & 0x7ffffffffffffLLU;
output4 = i4 >> 12 & 0x7ffffffffffffLLU;
output[0] = output0;
output[1] = output1;
output[2] = output2;
output[3] = output3;
output[4] = output4;
}
static __always_inline void format_fcontract_first_carry_pass(u64 *input)
{
u64 t0 = input[0];
u64 t1 = input[1];
u64 t2 = input[2];
u64 t3 = input[3];
u64 t4 = input[4];
u64 t1_ = t1 + (t0 >> 51);
u64 t0_ = t0 & 0x7ffffffffffffLLU;
u64 t2_ = t2 + (t1_ >> 51);
u64 t1__ = t1_ & 0x7ffffffffffffLLU;
u64 t3_ = t3 + (t2_ >> 51);
u64 t2__ = t2_ & 0x7ffffffffffffLLU;
u64 t4_ = t4 + (t3_ >> 51);
u64 t3__ = t3_ & 0x7ffffffffffffLLU;
input[0] = t0_;
input[1] = t1__;
input[2] = t2__;
input[3] = t3__;
input[4] = t4_;
}
static __always_inline void format_fcontract_first_carry_full(u64 *input)
{
format_fcontract_first_carry_pass(input);
modulo_carry_top(input);
}
static __always_inline void format_fcontract_second_carry_pass(u64 *input)
{
u64 t0 = input[0];
u64 t1 = input[1];
u64 t2 = input[2];
u64 t3 = input[3];
u64 t4 = input[4];
u64 t1_ = t1 + (t0 >> 51);
u64 t0_ = t0 & 0x7ffffffffffffLLU;
u64 t2_ = t2 + (t1_ >> 51);
u64 t1__ = t1_ & 0x7ffffffffffffLLU;
u64 t3_ = t3 + (t2_ >> 51);
u64 t2__ = t2_ & 0x7ffffffffffffLLU;
u64 t4_ = t4 + (t3_ >> 51);
u64 t3__ = t3_ & 0x7ffffffffffffLLU;
input[0] = t0_;
input[1] = t1__;
input[2] = t2__;
input[3] = t3__;
input[4] = t4_;
}
static __always_inline void format_fcontract_second_carry_full(u64 *input)
{
u64 i0;
u64 i1;
u64 i0_;
u64 i1_;
format_fcontract_second_carry_pass(input);
modulo_carry_top(input);
i0 = input[0];
i1 = input[1];
i0_ = i0 & 0x7ffffffffffffLLU;
i1_ = i1 + (i0 >> 51);
input[0] = i0_;
input[1] = i1_;
}
static __always_inline void format_fcontract_trim(u64 *input)
{
u64 a0 = input[0];
u64 a1 = input[1];
u64 a2 = input[2];
u64 a3 = input[3];
u64 a4 = input[4];
u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU);
u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU);
u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU);
u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU);
u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU);
u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4;
u64 a0_ = a0 - (0x7ffffffffffedLLU & mask);
u64 a1_ = a1 - (0x7ffffffffffffLLU & mask);
u64 a2_ = a2 - (0x7ffffffffffffLLU & mask);
u64 a3_ = a3 - (0x7ffffffffffffLLU & mask);
u64 a4_ = a4 - (0x7ffffffffffffLLU & mask);
input[0] = a0_;
input[1] = a1_;
input[2] = a2_;
input[3] = a3_;
input[4] = a4_;
}
static __always_inline void format_fcontract_store(u8 *output, u64 *input)
{
u64 t0 = input[0];
u64 t1 = input[1];
u64 t2 = input[2];
u64 t3 = input[3];
u64 t4 = input[4];
u64 o0 = t1 << 51 | t0;
u64 o1 = t2 << 38 | t1 >> 13;
u64 o2 = t3 << 25 | t2 >> 26;
u64 o3 = t4 << 12 | t3 >> 39;
u8 *b0 = output;
u8 *b1 = output + 8;
u8 *b2 = output + 16;
u8 *b3 = output + 24;
put_unaligned_le64(o0, b0);
put_unaligned_le64(o1, b1);
put_unaligned_le64(o2, b2);
put_unaligned_le64(o3, b3);
}
static __always_inline void format_fcontract(u8 *output, u64 *input)
{
format_fcontract_first_carry_full(input);
format_fcontract_second_carry_full(input);
format_fcontract_trim(input);
format_fcontract_store(output, input);
}
static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point)
{
u64 *x = point;
u64 *z = point + 5;
u64 buf[10] __aligned(32) = { 0 };
u64 *zmone = buf;
u64 *sc = buf + 5;
crecip(zmone, z);
fmul(sc, x, zmone);
format_fcontract(scalar, sc);
}
void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
u64 buf0[10] __aligned(32) = { 0 };
u64 *x0 = buf0;
u64 *z = buf0 + 5;
u64 *q;
format_fexpand(x0, basepoint);
z[0] = 1;
q = buf0;
{
u8 e[32] __aligned(32) = { 0 };
u8 *scalar;
memcpy(e, secret, 32);
curve25519_clamp_secret(e);
scalar = e;
{
u64 buf[15] = { 0 };
u64 *nq = buf;
u64 *x = nq;
x[0] = 1;
ladder_cmult(nq, scalar, q);
format_scalar_of_point(mypublic, nq);
memzero_explicit(buf, sizeof(buf));
}
memzero_explicit(e, sizeof(e));
}
memzero_explicit(buf0, sizeof(buf0));
}
| linux-master | lib/crypto/curve25519-hacl64.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpi-pow.c - MPI functions
* Copyright (C) 1994, 1996, 1998, 2000 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include <linux/sched.h>
#include <linux/string.h>
#include "mpi-internal.h"
#include "longlong.h"
/****************
* RES = BASE ^ EXP mod MOD
*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
{
mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
struct karatsuba_ctx karactx = {};
mpi_ptr_t xp_marker = NULL;
mpi_ptr_t tspace = NULL;
mpi_ptr_t rp, ep, mp, bp;
mpi_size_t esize, msize, bsize, rsize;
int msign, bsign, rsign;
mpi_size_t size;
int mod_shift_cnt;
int negative_result;
int assign_rp = 0;
mpi_size_t tsize = 0; /* to avoid compiler warning */
/* fixme: we should check that the warning is void */
int rc = -ENOMEM;
esize = exp->nlimbs;
msize = mod->nlimbs;
size = 2 * msize;
msign = mod->sign;
rp = res->d;
ep = exp->d;
if (!msize)
return -EINVAL;
if (!esize) {
/* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
* depending on if MOD equals 1. */
res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
if (res->nlimbs) {
if (mpi_resize(res, 1) < 0)
goto enomem;
rp = res->d;
rp[0] = 1;
}
res->sign = 0;
goto leave;
}
/* Normalize MOD (i.e. make its most significant bit set) as required by
* mpn_divrem. This will make the intermediate values in the calculation
* slightly larger, but the correct result is obtained after a final
* reduction using the original MOD value. */
mp = mp_marker = mpi_alloc_limb_space(msize);
if (!mp)
goto enomem;
mod_shift_cnt = count_leading_zeros(mod->d[msize - 1]);
if (mod_shift_cnt)
mpihelp_lshift(mp, mod->d, msize, mod_shift_cnt);
else
MPN_COPY(mp, mod->d, msize);
bsize = base->nlimbs;
bsign = base->sign;
if (bsize > msize) { /* The base is larger than the module. Reduce it. */
/* Allocate (BSIZE + 1) with space for remainder and quotient.
* (The quotient is (bsize - msize + 1) limbs.) */
bp = bp_marker = mpi_alloc_limb_space(bsize + 1);
if (!bp)
goto enomem;
MPN_COPY(bp, base->d, bsize);
/* We don't care about the quotient, store it above the remainder,
* at BP + MSIZE. */
mpihelp_divrem(bp + msize, 0, bp, bsize, mp, msize);
bsize = msize;
/* Canonicalize the base, since we are going to multiply with it
* quite a few times. */
MPN_NORMALIZE(bp, bsize);
} else
bp = base->d;
if (!bsize) {
res->nlimbs = 0;
res->sign = 0;
goto leave;
}
if (res->alloced < size) {
/* We have to allocate more space for RES. If any of the input
* parameters are identical to RES, defer deallocation of the old
* space. */
if (rp == ep || rp == mp || rp == bp) {
rp = mpi_alloc_limb_space(size);
if (!rp)
goto enomem;
assign_rp = 1;
} else {
if (mpi_resize(res, size) < 0)
goto enomem;
rp = res->d;
}
} else { /* Make BASE, EXP and MOD not overlap with RES. */
if (rp == bp) {
/* RES and BASE are identical. Allocate temp. space for BASE. */
BUG_ON(bp_marker);
bp = bp_marker = mpi_alloc_limb_space(bsize);
if (!bp)
goto enomem;
MPN_COPY(bp, rp, bsize);
}
if (rp == ep) {
/* RES and EXP are identical. Allocate temp. space for EXP. */
ep = ep_marker = mpi_alloc_limb_space(esize);
if (!ep)
goto enomem;
MPN_COPY(ep, rp, esize);
}
if (rp == mp) {
/* RES and MOD are identical. Allocate temporary space for MOD. */
BUG_ON(mp_marker);
mp = mp_marker = mpi_alloc_limb_space(msize);
if (!mp)
goto enomem;
MPN_COPY(mp, rp, msize);
}
}
MPN_COPY(rp, bp, bsize);
rsize = bsize;
rsign = bsign;
{
mpi_size_t i;
mpi_ptr_t xp;
int c;
mpi_limb_t e;
mpi_limb_t carry_limb;
xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
if (!xp)
goto enomem;
negative_result = (ep[0] & 1) && base->sign;
i = esize - 1;
e = ep[i];
c = count_leading_zeros(e);
e = (e << c) << 1; /* shift the exp bits to the left, lose msb */
c = BITS_PER_MPI_LIMB - 1 - c;
/* Main loop.
*
* Make the result be pointed to alternately by XP and RP. This
* helps us avoid block copying, which would otherwise be necessary
* with the overlap restrictions of mpihelp_divmod. With 50% probability
* the result after this loop will be in the area originally pointed
* by RP (==RES->d), and with 50% probability in the area originally
* pointed to by XP.
*/
for (;;) {
while (c) {
mpi_ptr_t tp;
mpi_size_t xsize;
/*if (mpihelp_mul_n(xp, rp, rp, rsize) < 0) goto enomem */
if (rsize < KARATSUBA_THRESHOLD)
mpih_sqr_n_basecase(xp, rp, rsize);
else {
if (!tspace) {
tsize = 2 * rsize;
tspace =
mpi_alloc_limb_space(tsize);
if (!tspace)
goto enomem;
} else if (tsize < (2 * rsize)) {
mpi_free_limb_space(tspace);
tsize = 2 * rsize;
tspace =
mpi_alloc_limb_space(tsize);
if (!tspace)
goto enomem;
}
mpih_sqr_n(xp, rp, rsize, tspace);
}
xsize = 2 * rsize;
if (xsize > msize) {
mpihelp_divrem(xp + msize, 0, xp, xsize,
mp, msize);
xsize = msize;
}
tp = rp;
rp = xp;
xp = tp;
rsize = xsize;
if ((mpi_limb_signed_t) e < 0) {
/*mpihelp_mul( xp, rp, rsize, bp, bsize ); */
if (bsize < KARATSUBA_THRESHOLD) {
mpi_limb_t tmp;
if (mpihelp_mul
(xp, rp, rsize, bp, bsize,
&tmp) < 0)
goto enomem;
} else {
if (mpihelp_mul_karatsuba_case
(xp, rp, rsize, bp, bsize,
&karactx) < 0)
goto enomem;
}
xsize = rsize + bsize;
if (xsize > msize) {
mpihelp_divrem(xp + msize, 0,
xp, xsize, mp,
msize);
xsize = msize;
}
tp = rp;
rp = xp;
xp = tp;
rsize = xsize;
}
e <<= 1;
c--;
cond_resched();
}
i--;
if (i < 0)
break;
e = ep[i];
c = BITS_PER_MPI_LIMB;
}
/* We shifted MOD, the modulo reduction argument, left MOD_SHIFT_CNT
* steps. Adjust the result by reducing it with the original MOD.
*
* Also make sure the result is put in RES->d (where it already
* might be, see above).
*/
if (mod_shift_cnt) {
carry_limb =
mpihelp_lshift(res->d, rp, rsize, mod_shift_cnt);
rp = res->d;
if (carry_limb) {
rp[rsize] = carry_limb;
rsize++;
}
} else {
MPN_COPY(res->d, rp, rsize);
rp = res->d;
}
if (rsize >= msize) {
mpihelp_divrem(rp + msize, 0, rp, rsize, mp, msize);
rsize = msize;
}
/* Remove any leading zero words from the result. */
if (mod_shift_cnt)
mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
MPN_NORMALIZE(rp, rsize);
}
if (negative_result && rsize) {
if (mod_shift_cnt)
mpihelp_rshift(mp, mp, msize, mod_shift_cnt);
mpihelp_sub(rp, mp, msize, rp, rsize);
rsize = msize;
rsign = msign;
MPN_NORMALIZE(rp, rsize);
}
res->nlimbs = rsize;
res->sign = rsign;
leave:
rc = 0;
enomem:
mpihelp_release_karatsuba_ctx(&karactx);
if (assign_rp)
mpi_assign_limb_space(res, rp, size);
if (mp_marker)
mpi_free_limb_space(mp_marker);
if (bp_marker)
mpi_free_limb_space(bp_marker);
if (ep_marker)
mpi_free_limb_space(ep_marker);
if (xp_marker)
mpi_free_limb_space(xp_marker);
if (tspace)
mpi_free_limb_space(tspace);
return rc;
}
EXPORT_SYMBOL_GPL(mpi_powm);
| linux-master | lib/crypto/mpi/mpi-pow.c |
/* mpicoder.c - Coder for the external representation of MPIs
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include <linux/bitops.h>
#include <linux/count_zeros.h>
#include <linux/byteorder/generic.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include "mpi-internal.h"
#define MAX_EXTERN_SCAN_BYTES (16*1024*1024)
#define MAX_EXTERN_MPI_BITS 16384
/**
* mpi_read_raw_data - Read a raw byte stream as a positive integer
* @xbuffer: The data to read
* @nbytes: The amount of data to read
*/
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes)
{
const uint8_t *buffer = xbuffer;
int i, j;
unsigned nbits, nlimbs;
mpi_limb_t a;
MPI val = NULL;
while (nbytes > 0 && buffer[0] == 0) {
buffer++;
nbytes--;
}
nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) {
pr_info("MPI: mpi too large (%u bits)\n", nbits);
return NULL;
}
if (nbytes > 0)
nbits -= count_leading_zeros(buffer[0]) - (BITS_PER_LONG - 8);
nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
val = mpi_alloc(nlimbs);
if (!val)
return NULL;
val->nbits = nbits;
val->sign = 0;
val->nlimbs = nlimbs;
if (nbytes > 0) {
i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
i %= BYTES_PER_MPI_LIMB;
for (j = nlimbs; j > 0; j--) {
a = 0;
for (; i < BYTES_PER_MPI_LIMB; i++) {
a <<= 8;
a |= *buffer++;
}
i = 0;
val->d[j - 1] = a;
}
}
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_raw_data);
MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
{
const uint8_t *buffer = xbuffer;
unsigned int nbits, nbytes;
MPI val;
if (*ret_nread < 2)
return ERR_PTR(-EINVAL);
nbits = buffer[0] << 8 | buffer[1];
if (nbits > MAX_EXTERN_MPI_BITS) {
pr_info("MPI: mpi too large (%u bits)\n", nbits);
return ERR_PTR(-EINVAL);
}
nbytes = DIV_ROUND_UP(nbits, 8);
if (nbytes + 2 > *ret_nread) {
pr_info("MPI: mpi larger than buffer nbytes=%u ret_nread=%u\n",
nbytes, *ret_nread);
return ERR_PTR(-EINVAL);
}
val = mpi_read_raw_data(buffer + 2, nbytes);
if (!val)
return ERR_PTR(-ENOMEM);
*ret_nread = nbytes + 2;
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
/****************
* Fill the mpi VAL from the hex string in STR.
*/
int mpi_fromstr(MPI val, const char *str)
{
int sign = 0;
int prepend_zero = 0;
int i, j, c, c1, c2;
unsigned int nbits, nbytes, nlimbs;
mpi_limb_t a;
if (*str == '-') {
sign = 1;
str++;
}
/* Skip optional hex prefix. */
if (*str == '0' && str[1] == 'x')
str += 2;
nbits = strlen(str);
if (nbits > MAX_EXTERN_SCAN_BYTES) {
mpi_clear(val);
return -EINVAL;
}
nbits *= 4;
if ((nbits % 8))
prepend_zero = 1;
nbytes = (nbits+7) / 8;
nlimbs = (nbytes+BYTES_PER_MPI_LIMB-1) / BYTES_PER_MPI_LIMB;
if (val->alloced < nlimbs)
mpi_resize(val, nlimbs);
i = BYTES_PER_MPI_LIMB - (nbytes % BYTES_PER_MPI_LIMB);
i %= BYTES_PER_MPI_LIMB;
j = val->nlimbs = nlimbs;
val->sign = sign;
for (; j > 0; j--) {
a = 0;
for (; i < BYTES_PER_MPI_LIMB; i++) {
if (prepend_zero) {
c1 = '0';
prepend_zero = 0;
} else
c1 = *str++;
if (!c1) {
mpi_clear(val);
return -EINVAL;
}
c2 = *str++;
if (!c2) {
mpi_clear(val);
return -EINVAL;
}
if (c1 >= '0' && c1 <= '9')
c = c1 - '0';
else if (c1 >= 'a' && c1 <= 'f')
c = c1 - 'a' + 10;
else if (c1 >= 'A' && c1 <= 'F')
c = c1 - 'A' + 10;
else {
mpi_clear(val);
return -EINVAL;
}
c <<= 4;
if (c2 >= '0' && c2 <= '9')
c |= c2 - '0';
else if (c2 >= 'a' && c2 <= 'f')
c |= c2 - 'a' + 10;
else if (c2 >= 'A' && c2 <= 'F')
c |= c2 - 'A' + 10;
else {
mpi_clear(val);
return -EINVAL;
}
a <<= 8;
a |= c;
}
i = 0;
val->d[j-1] = a;
}
return 0;
}
EXPORT_SYMBOL_GPL(mpi_fromstr);
MPI mpi_scanval(const char *string)
{
MPI a;
a = mpi_alloc(0);
if (!a)
return NULL;
if (mpi_fromstr(a, string)) {
mpi_free(a);
return NULL;
}
mpi_normalize(a);
return a;
}
EXPORT_SYMBOL_GPL(mpi_scanval);
static int count_lzeros(MPI a)
{
mpi_limb_t alimb;
int i, lzeros = 0;
for (i = a->nlimbs - 1; i >= 0; i--) {
alimb = a->d[i];
if (alimb == 0) {
lzeros += sizeof(mpi_limb_t);
} else {
lzeros += count_leading_zeros(alimb) / 8;
break;
}
}
return lzeros;
}
/**
* mpi_read_buffer() - read MPI to a buffer provided by user (msb first)
*
* @a: a multi precision integer
* @buf: buffer to which the output will be written to. Needs to be at
* least mpi_get_size(a) long.
* @buf_len: size of the buf.
* @nbytes: receives the actual length of the data written on success and
* the data to-be-written on -EOVERFLOW in case buf_len was too
* small.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
*/
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign)
{
uint8_t *p;
#if BYTES_PER_MPI_LIMB == 4
__be32 alimb;
#elif BYTES_PER_MPI_LIMB == 8
__be64 alimb;
#else
#error please implement for this limb size.
#endif
unsigned int n = mpi_get_size(a);
int i, lzeros;
if (!buf || !nbytes)
return -EINVAL;
if (sign)
*sign = a->sign;
lzeros = count_lzeros(a);
if (buf_len < n - lzeros) {
*nbytes = n - lzeros;
return -EOVERFLOW;
}
p = buf;
*nbytes = n - lzeros;
for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
lzeros %= BYTES_PER_MPI_LIMB;
i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4
alimb = cpu_to_be32(a->d[i]);
#elif BYTES_PER_MPI_LIMB == 8
alimb = cpu_to_be64(a->d[i]);
#else
#error please implement for this limb size.
#endif
memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
p += BYTES_PER_MPI_LIMB - lzeros;
lzeros = 0;
}
return 0;
}
EXPORT_SYMBOL_GPL(mpi_read_buffer);
/*
* mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first).
* Caller must free the return string.
* This function does return a 0 byte buffer with nbytes set to zero if the
* value of A is zero.
*
* @a: a multi precision integer.
* @nbytes: receives the length of this buffer.
* @sign: if not NULL, it will be set to the sign of the a.
*
* Return: Pointer to MPI buffer or NULL on error
*/
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
{
uint8_t *buf;
unsigned int n;
int ret;
if (!nbytes)
return NULL;
n = mpi_get_size(a);
if (!n)
n++;
buf = kmalloc(n, GFP_KERNEL);
if (!buf)
return NULL;
ret = mpi_read_buffer(a, buf, n, nbytes, sign);
if (ret) {
kfree(buf);
return NULL;
}
return buf;
}
EXPORT_SYMBOL_GPL(mpi_get_buffer);
/**
* mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first)
*
* This function works in the same way as the mpi_read_buffer, but it
* takes an sgl instead of u8 * buf.
*
* @a: a multi precision integer
* @sgl: scatterlist to write to. Needs to be at least
* mpi_get_size(a) long.
* @nbytes: the number of bytes to write. Leading bytes will be
* filled with zero.
* @sign: if not NULL, it will be set to the sign of a.
*
* Return: 0 on success or error code in case of error
*/
int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes,
int *sign)
{
u8 *p, *p2;
#if BYTES_PER_MPI_LIMB == 4
__be32 alimb;
#elif BYTES_PER_MPI_LIMB == 8
__be64 alimb;
#else
#error please implement for this limb size.
#endif
unsigned int n = mpi_get_size(a);
struct sg_mapping_iter miter;
int i, x, buf_len;
int nents;
if (sign)
*sign = a->sign;
if (nbytes < n)
return -EOVERFLOW;
nents = sg_nents_for_len(sgl, nbytes);
if (nents < 0)
return -EINVAL;
sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC | SG_MITER_TO_SG);
sg_miter_next(&miter);
buf_len = miter.length;
p2 = miter.addr;
while (nbytes > n) {
i = min_t(unsigned, nbytes - n, buf_len);
memset(p2, 0, i);
p2 += i;
nbytes -= i;
buf_len -= i;
if (!buf_len) {
sg_miter_next(&miter);
buf_len = miter.length;
p2 = miter.addr;
}
}
for (i = a->nlimbs - 1; i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4
alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0;
#elif BYTES_PER_MPI_LIMB == 8
alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0;
#else
#error please implement for this limb size.
#endif
p = (u8 *)&alimb;
for (x = 0; x < sizeof(alimb); x++) {
*p2++ = *p++;
if (!--buf_len) {
sg_miter_next(&miter);
buf_len = miter.length;
p2 = miter.addr;
}
}
}
sg_miter_stop(&miter);
return 0;
}
EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
/*
* mpi_read_raw_from_sgl() - Function allocates an MPI and populates it with
* data from the sgl
*
* This function works in the same way as the mpi_read_raw_data, but it
* takes an sgl instead of void * buffer. i.e. it allocates
* a new MPI and reads the content of the sgl to the MPI.
*
* @sgl: scatterlist to read from
* @nbytes: number of bytes to read
*
* Return: Pointer to a new MPI or NULL on error
*/
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
{
struct sg_mapping_iter miter;
unsigned int nbits, nlimbs;
int x, j, z, lzeros, ents;
unsigned int len;
const u8 *buff;
mpi_limb_t a;
MPI val = NULL;
ents = sg_nents_for_len(sgl, nbytes);
if (ents < 0)
return NULL;
sg_miter_start(&miter, sgl, ents, SG_MITER_ATOMIC | SG_MITER_FROM_SG);
lzeros = 0;
len = 0;
while (nbytes > 0) {
while (len && !*buff) {
lzeros++;
len--;
buff++;
}
if (len && *buff)
break;
sg_miter_next(&miter);
buff = miter.addr;
len = miter.length;
nbytes -= lzeros;
lzeros = 0;
}
miter.consumed = lzeros;
nbytes -= lzeros;
nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) {
sg_miter_stop(&miter);
pr_info("MPI: mpi too large (%u bits)\n", nbits);
return NULL;
}
if (nbytes > 0)
nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
sg_miter_stop(&miter);
nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
val = mpi_alloc(nlimbs);
if (!val)
return NULL;
val->nbits = nbits;
val->sign = 0;
val->nlimbs = nlimbs;
if (nbytes == 0)
return val;
j = nlimbs - 1;
a = 0;
z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
z %= BYTES_PER_MPI_LIMB;
while (sg_miter_next(&miter)) {
buff = miter.addr;
len = min_t(unsigned, miter.length, nbytes);
nbytes -= len;
for (x = 0; x < len; x++) {
a <<= 8;
a |= *buff++;
if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
val->d[j--] = a;
a = 0;
}
}
z += x;
}
return val;
}
EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl);
/* Perform a two's complement operation on buffer P of size N bytes. */
static void twocompl(unsigned char *p, unsigned int n)
{
int i;
for (i = n-1; i >= 0 && !p[i]; i--)
;
if (i >= 0) {
if ((p[i] & 0x01))
p[i] = (((p[i] ^ 0xfe) | 0x01) & 0xff);
else if ((p[i] & 0x02))
p[i] = (((p[i] ^ 0xfc) | 0x02) & 0xfe);
else if ((p[i] & 0x04))
p[i] = (((p[i] ^ 0xf8) | 0x04) & 0xfc);
else if ((p[i] & 0x08))
p[i] = (((p[i] ^ 0xf0) | 0x08) & 0xf8);
else if ((p[i] & 0x10))
p[i] = (((p[i] ^ 0xe0) | 0x10) & 0xf0);
else if ((p[i] & 0x20))
p[i] = (((p[i] ^ 0xc0) | 0x20) & 0xe0);
else if ((p[i] & 0x40))
p[i] = (((p[i] ^ 0x80) | 0x40) & 0xc0);
else
p[i] = 0x80;
for (i--; i >= 0; i--)
p[i] ^= 0xff;
}
}
int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
size_t buflen, size_t *nwritten, MPI a)
{
unsigned int nbits = mpi_get_nbits(a);
size_t len;
size_t dummy_nwritten;
int negative;
if (!nwritten)
nwritten = &dummy_nwritten;
/* Libgcrypt does no always care to set clear the sign if the value
* is 0. For printing this is a bit of a surprise, in particular
* because if some of the formats don't support negative numbers but
* should be able to print a zero. Thus we need this extra test
* for a negative number.
*/
if (a->sign && mpi_cmp_ui(a, 0))
negative = 1;
else
negative = 0;
len = buflen;
*nwritten = 0;
if (format == GCRYMPI_FMT_STD) {
unsigned char *tmp;
int extra = 0;
unsigned int n;
tmp = mpi_get_buffer(a, &n, NULL);
if (!tmp)
return -EINVAL;
if (negative) {
twocompl(tmp, n);
if (!(*tmp & 0x80)) {
/* Need to extend the sign. */
n++;
extra = 2;
}
} else if (n && (*tmp & 0x80)) {
/* Positive but the high bit of the returned buffer is set.
* Thus we need to print an extra leading 0x00 so that the
* output is interpreted as a positive number.
*/
n++;
extra = 1;
}
if (buffer && n > len) {
/* The provided buffer is too short. */
kfree(tmp);
return -E2BIG;
}
if (buffer) {
unsigned char *s = buffer;
if (extra == 1)
*s++ = 0;
else if (extra)
*s++ = 0xff;
memcpy(s, tmp, n-!!extra);
}
kfree(tmp);
*nwritten = n;
return 0;
} else if (format == GCRYMPI_FMT_USG) {
unsigned int n = (nbits + 7)/8;
/* Note: We ignore the sign for this format. */
/* FIXME: for performance reasons we should put this into
* mpi_aprint because we can then use the buffer directly.
*/
if (buffer && n > len)
return -E2BIG;
if (buffer) {
unsigned char *tmp;
tmp = mpi_get_buffer(a, &n, NULL);
if (!tmp)
return -EINVAL;
memcpy(buffer, tmp, n);
kfree(tmp);
}
*nwritten = n;
return 0;
} else if (format == GCRYMPI_FMT_PGP) {
unsigned int n = (nbits + 7)/8;
/* The PGP format can only handle unsigned integers. */
if (negative)
return -EINVAL;
if (buffer && n+2 > len)
return -E2BIG;
if (buffer) {
unsigned char *tmp;
unsigned char *s = buffer;
s[0] = nbits >> 8;
s[1] = nbits;
tmp = mpi_get_buffer(a, &n, NULL);
if (!tmp)
return -EINVAL;
memcpy(s+2, tmp, n);
kfree(tmp);
}
*nwritten = n+2;
return 0;
} else if (format == GCRYMPI_FMT_SSH) {
unsigned char *tmp;
int extra = 0;
unsigned int n;
tmp = mpi_get_buffer(a, &n, NULL);
if (!tmp)
return -EINVAL;
if (negative) {
twocompl(tmp, n);
if (!(*tmp & 0x80)) {
/* Need to extend the sign. */
n++;
extra = 2;
}
} else if (n && (*tmp & 0x80)) {
n++;
extra = 1;
}
if (buffer && n+4 > len) {
kfree(tmp);
return -E2BIG;
}
if (buffer) {
unsigned char *s = buffer;
*s++ = n >> 24;
*s++ = n >> 16;
*s++ = n >> 8;
*s++ = n;
if (extra == 1)
*s++ = 0;
else if (extra)
*s++ = 0xff;
memcpy(s, tmp, n-!!extra);
}
kfree(tmp);
*nwritten = 4+n;
return 0;
} else if (format == GCRYMPI_FMT_HEX) {
unsigned char *tmp;
int i;
int extra = 0;
unsigned int n = 0;
tmp = mpi_get_buffer(a, &n, NULL);
if (!tmp)
return -EINVAL;
if (!n || (*tmp & 0x80))
extra = 2;
if (buffer && 2*n + extra + negative + 1 > len) {
kfree(tmp);
return -E2BIG;
}
if (buffer) {
unsigned char *s = buffer;
if (negative)
*s++ = '-';
if (extra) {
*s++ = '0';
*s++ = '0';
}
for (i = 0; i < n; i++) {
unsigned int c = tmp[i];
*s++ = (c >> 4) < 10 ? '0'+(c>>4) : 'A'+(c>>4)-10;
c &= 15;
*s++ = c < 10 ? '0'+c : 'A'+c-10;
}
*s++ = 0;
*nwritten = s - buffer;
} else {
*nwritten = 2*n + extra + negative + 1;
}
kfree(tmp);
return 0;
} else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(mpi_print);
| linux-master | lib/crypto/mpi/mpicoder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpi-sub-ui.c - Subtract an unsigned integer from an MPI.
*
* Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015
* Free Software Foundation, Inc.
*
* This file was based on the GNU MP Library source file:
* https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h
*
* The GNU MP Library is free software; you can redistribute it and/or modify
* it under the terms of either:
*
* * the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 3 of the License, or (at your
* option) any later version.
*
* or
*
* * the GNU General Public License as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* or both in parallel, as here.
*
* The GNU MP Library is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received copies of the GNU General Public License and the
* GNU Lesser General Public License along with the GNU MP Library. If not,
* see https://www.gnu.org/licenses/.
*/
#include "mpi-internal.h"
int mpi_sub_ui(MPI w, MPI u, unsigned long vval)
{
if (u->nlimbs == 0) {
if (mpi_resize(w, 1) < 0)
return -ENOMEM;
w->d[0] = vval;
w->nlimbs = (vval != 0);
w->sign = (vval != 0);
return 0;
}
/* If not space for W (and possible carry), increase space. */
if (mpi_resize(w, u->nlimbs + 1))
return -ENOMEM;
if (u->sign) {
mpi_limb_t cy;
cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
w->d[u->nlimbs] = cy;
w->nlimbs = u->nlimbs + cy;
w->sign = 1;
} else {
/* The signs are different. Need exact comparison to determine
* which operand to subtract from which.
*/
if (u->nlimbs == 1 && u->d[0] < vval) {
w->d[0] = vval - u->d[0];
w->nlimbs = 1;
w->sign = 1;
} else {
mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval);
/* Size can decrease with at most one limb. */
w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0));
w->sign = 0;
}
}
mpi_normalize(w);
return 0;
}
EXPORT_SYMBOL_GPL(mpi_sub_ui);
| linux-master | lib/crypto/mpi/mpi-sub-ui.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-div.c - MPI helper functions
* Copyright (C) 1994, 1996 Free Software Foundation, Inc.
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
#ifndef UMUL_TIME
#define UMUL_TIME 1
#endif
#ifndef UDIV_TIME
#define UDIV_TIME UMUL_TIME
#endif
mpi_limb_t
mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
mpi_limb_t divisor_limb)
{
mpi_size_t i;
mpi_limb_t n1, n0, r;
mpi_limb_t dummy __maybe_unused;
/* Botch: Should this be handled at all? Rely on callers? */
if (!dividend_size)
return 0;
/* If multiplication is much faster than division, and the
* dividend is large, pre-invert the divisor, and use
* only multiplications in the inner loop.
*
* This test should be read:
* Does it ever help to use udiv_qrnnd_preinv?
* && Does what we save compensate for the inversion overhead?
*/
if (UDIV_TIME > (2 * UMUL_TIME + 6)
&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
int normalization_steps;
normalization_steps = count_leading_zeros(divisor_limb);
if (normalization_steps) {
mpi_limb_t divisor_limb_inverted;
divisor_limb <<= normalization_steps;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*
* Special case for DIVISOR_LIMB == 100...000.
*/
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t)0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(dummy, r, r,
((n1 << normalization_steps)
| (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
divisor_limb, divisor_limb_inverted);
n1 = n0;
}
UDIV_QRNND_PREINV(dummy, r, r,
n1 << normalization_steps,
divisor_limb, divisor_limb_inverted);
return r >> normalization_steps;
} else {
mpi_limb_t divisor_limb_inverted;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*
* Special case for DIVISOR_LIMB == 100...000.
*/
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t)0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
i--;
for ( ; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(dummy, r, r,
n0, divisor_limb, divisor_limb_inverted);
}
return r;
}
} else {
if (UDIV_NEEDS_NORMALIZATION) {
int normalization_steps;
normalization_steps = count_leading_zeros(divisor_limb);
if (normalization_steps) {
divisor_limb <<= normalization_steps;
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(dummy, r, r,
((n1 << normalization_steps)
| (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
divisor_limb);
n1 = n0;
}
udiv_qrnnd(dummy, r, r,
n1 << normalization_steps,
divisor_limb);
return r >> normalization_steps;
}
}
/* No normalization needed, either because udiv_qrnnd doesn't require
* it, or because DIVISOR_LIMB is already normalized.
*/
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
i--;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(dummy, r, r, n0, divisor_limb);
}
return r;
}
}
/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
* the NSIZE-DSIZE least significant quotient limbs at QP
* and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
* non-zero, generate that many fraction bits and append them after the
* other quotient limbs.
* Return the most significant limb of the quotient, this is always 0 or 1.
*
* Preconditions:
* 0. NSIZE >= DSIZE.
* 1. The most significant bit of the divisor must be set.
* 2. QP must either not overlap with the input operands at all, or
* QP + DSIZE >= NP must hold true. (This means that it's
* possible to put the quotient in the high part of NUM, right after the
* remainder in NUM.
* 3. NSIZE >= DSIZE, even if QEXTRA_LIMBS is non-zero.
*/
mpi_limb_t
mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
mpi_ptr_t np, mpi_size_t nsize, mpi_ptr_t dp, mpi_size_t dsize)
{
mpi_limb_t most_significant_q_limb = 0;
switch (dsize) {
case 0:
/* We are asked to divide by zero, so go ahead and do it! (To make
the compiler not remove this statement, return the value.) */
/*
* existing clients of this function have been modified
* not to call it with dsize == 0, so this should not happen
*/
return 1 / dsize;
case 1:
{
mpi_size_t i;
mpi_limb_t n1;
mpi_limb_t d;
d = dp[0];
n1 = np[nsize - 1];
if (n1 >= d) {
n1 -= d;
most_significant_q_limb = 1;
}
qp += qextra_limbs;
for (i = nsize - 2; i >= 0; i--)
udiv_qrnnd(qp[i], n1, n1, np[i], d);
qp -= qextra_limbs;
for (i = qextra_limbs - 1; i >= 0; i--)
udiv_qrnnd(qp[i], n1, n1, 0, d);
np[0] = n1;
}
break;
case 2:
{
mpi_size_t i;
mpi_limb_t n1, n0, n2;
mpi_limb_t d1, d0;
np += nsize - 2;
d1 = dp[1];
d0 = dp[0];
n1 = np[1];
n0 = np[0];
if (n1 >= d1 && (n1 > d1 || n0 >= d0)) {
sub_ddmmss(n1, n0, n1, n0, d1, d0);
most_significant_q_limb = 1;
}
for (i = qextra_limbs + nsize - 2 - 1; i >= 0; i--) {
mpi_limb_t q;
mpi_limb_t r;
if (i >= qextra_limbs)
np--;
else
np[0] = 0;
if (n1 == d1) {
/* Q should be either 111..111 or 111..110. Need special
* treatment of this rare case as normal division would
* give overflow. */
q = ~(mpi_limb_t) 0;
r = n0 + d1;
if (r < d1) { /* Carry in the addition? */
add_ssaaaa(n1, n0, r - d0,
np[0], 0, d0);
qp[i] = q;
continue;
}
n1 = d0 - (d0 != 0 ? 1 : 0);
n0 = -d0;
} else {
udiv_qrnnd(q, r, n1, n0, d1);
umul_ppmm(n1, n0, d0, q);
}
n2 = np[0];
q_test:
if (n1 > r || (n1 == r && n0 > n2)) {
/* The estimated Q was too large. */
q--;
sub_ddmmss(n1, n0, n1, n0, 0, d0);
r += d1;
if (r >= d1) /* If not carry, test Q again. */
goto q_test;
}
qp[i] = q;
sub_ddmmss(n1, n0, r, n2, n1, n0);
}
np[1] = n1;
np[0] = n0;
}
break;
default:
{
mpi_size_t i;
mpi_limb_t dX, d1, n0;
np += nsize - dsize;
dX = dp[dsize - 1];
d1 = dp[dsize - 2];
n0 = np[dsize - 1];
if (n0 >= dX) {
if (n0 > dX
|| mpihelp_cmp(np, dp, dsize - 1) >= 0) {
mpihelp_sub_n(np, np, dp, dsize);
n0 = np[dsize - 1];
most_significant_q_limb = 1;
}
}
for (i = qextra_limbs + nsize - dsize - 1; i >= 0; i--) {
mpi_limb_t q;
mpi_limb_t n1, n2;
mpi_limb_t cy_limb;
if (i >= qextra_limbs) {
np--;
n2 = np[dsize];
} else {
n2 = np[dsize - 1];
MPN_COPY_DECR(np + 1, np, dsize - 1);
np[0] = 0;
}
if (n0 == dX) {
/* This might over-estimate q, but it's probably not worth
* the extra code here to find out. */
q = ~(mpi_limb_t) 0;
} else {
mpi_limb_t r;
udiv_qrnnd(q, r, n0, np[dsize - 1], dX);
umul_ppmm(n1, n0, d1, q);
while (n1 > r
|| (n1 == r
&& n0 > np[dsize - 2])) {
q--;
r += dX;
if (r < dX) /* I.e. "carry in previous addition?" */
break;
n1 -= n0 < d1;
n0 -= d1;
}
}
/* Possible optimization: We already have (q * n0) and (1 * n1)
* after the calculation of q. Taking advantage of that, we
* could make this loop make two iterations less. */
cy_limb = mpihelp_submul_1(np, dp, dsize, q);
if (n2 != cy_limb) {
mpihelp_add_n(np, np, dp, dsize);
q--;
}
qp[i] = q;
n0 = np[dsize - 1];
}
}
}
return most_significant_q_limb;
}
/****************
* Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
* Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
* Return the single-limb remainder.
* There are no constraints on the value of the divisor.
*
* QUOT_PTR and DIVIDEND_PTR might point to the same limb.
*/
mpi_limb_t
mpihelp_divmod_1(mpi_ptr_t quot_ptr,
mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
mpi_limb_t divisor_limb)
{
mpi_size_t i;
mpi_limb_t n1, n0, r;
mpi_limb_t dummy __maybe_unused;
if (!dividend_size)
return 0;
/* If multiplication is much faster than division, and the
* dividend is large, pre-invert the divisor, and use
* only multiplications in the inner loop.
*
* This test should be read:
* Does it ever help to use udiv_qrnnd_preinv?
* && Does what we save compensate for the inversion overhead?
*/
if (UDIV_TIME > (2 * UMUL_TIME + 6)
&& (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
int normalization_steps;
normalization_steps = count_leading_zeros(divisor_limb);
if (normalization_steps) {
mpi_limb_t divisor_limb_inverted;
divisor_limb <<= normalization_steps;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*/
/* Special case for DIVISOR_LIMB == 100...000. */
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t)0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
((n1 << normalization_steps)
| (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
divisor_limb, divisor_limb_inverted);
n1 = n0;
}
UDIV_QRNND_PREINV(quot_ptr[0], r, r,
n1 << normalization_steps,
divisor_limb, divisor_limb_inverted);
return r >> normalization_steps;
} else {
mpi_limb_t divisor_limb_inverted;
/* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
* result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
* most significant bit (with weight 2**N) implicit.
*/
/* Special case for DIVISOR_LIMB == 100...000. */
if (!(divisor_limb << 1))
divisor_limb_inverted = ~(mpi_limb_t) 0;
else
udiv_qrnnd(divisor_limb_inverted, dummy,
-divisor_limb, 0, divisor_limb);
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
quot_ptr[i--] = 0;
for ( ; i >= 0; i--) {
n0 = dividend_ptr[i];
UDIV_QRNND_PREINV(quot_ptr[i], r, r,
n0, divisor_limb, divisor_limb_inverted);
}
return r;
}
} else {
if (UDIV_NEEDS_NORMALIZATION) {
int normalization_steps;
normalization_steps = count_leading_zeros(divisor_limb);
if (normalization_steps) {
divisor_limb <<= normalization_steps;
n1 = dividend_ptr[dividend_size - 1];
r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
/* Possible optimization:
* if (r == 0
* && divisor_limb > ((n1 << normalization_steps)
* | (dividend_ptr[dividend_size - 2] >> ...)))
* ...one division less...
*/
for (i = dividend_size - 2; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(quot_ptr[i + 1], r, r,
((n1 << normalization_steps)
| (n0 >> (BITS_PER_MPI_LIMB - normalization_steps))),
divisor_limb);
n1 = n0;
}
udiv_qrnnd(quot_ptr[0], r, r,
n1 << normalization_steps,
divisor_limb);
return r >> normalization_steps;
}
}
/* No normalization needed, either because udiv_qrnnd doesn't require
* it, or because DIVISOR_LIMB is already normalized.
*/
i = dividend_size - 1;
r = dividend_ptr[i];
if (r >= divisor_limb)
r = 0;
else
quot_ptr[i--] = 0;
for (; i >= 0; i--) {
n0 = dividend_ptr[i];
udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
}
return r;
}
}
| linux-master | lib/crypto/mpi/mpih-div.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-mul_2.c - MPI helper functions
* Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
mpi_limb_t
mpihelp_addmul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb)
{
mpi_limb_t cy_limb;
mpi_size_t j;
mpi_limb_t prod_high, prod_low;
mpi_limb_t x;
/* The loop counter and index J goes from -SIZE to -1. This way
* the loop becomes faster. */
j = -s1_size;
res_ptr -= j;
s1_ptr -= j;
cy_limb = 0;
do {
umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
prod_low += cy_limb;
cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
x = res_ptr[j];
prod_low = x + prod_low;
cy_limb += prod_low < x ? 1 : 0;
res_ptr[j] = prod_low;
} while (++j);
return cy_limb;
}
| linux-master | lib/crypto/mpi/generic_mpih-mul2.c |
/* mpi-div.c - MPI functions
* Copyright (C) 1994, 1996, 1998, 2001, 2002,
* 2003 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "mpi-internal.h"
#include "longlong.h"
void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
{
int divisor_sign = divisor->sign;
MPI temp_divisor = NULL;
/* We need the original value of the divisor after the remainder has been
* preliminary calculated. We have to copy it to temporary space if it's
* the same variable as REM.
*/
if (rem == divisor) {
temp_divisor = mpi_copy(divisor);
divisor = temp_divisor;
}
mpi_tdiv_r(rem, dividend, divisor);
if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
mpi_add(rem, rem, divisor);
if (temp_divisor)
mpi_free(temp_divisor);
}
void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
{
MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
mpi_fdiv_qr(quot, tmp, dividend, divisor);
mpi_free(tmp);
}
void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
{
int divisor_sign = divisor->sign;
MPI temp_divisor = NULL;
if (quot == divisor || rem == divisor) {
temp_divisor = mpi_copy(divisor);
divisor = temp_divisor;
}
mpi_tdiv_qr(quot, rem, dividend, divisor);
if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
mpi_sub_ui(quot, quot, 1);
mpi_add(rem, rem, divisor);
}
if (temp_divisor)
mpi_free(temp_divisor);
}
/* If den == quot, den needs temporary storage.
* If den == rem, den needs temporary storage.
* If num == quot, num needs temporary storage.
* If den has temporary storage, it can be normalized while being copied,
* i.e no extra storage should be allocated.
*/
void mpi_tdiv_r(MPI rem, MPI num, MPI den)
{
mpi_tdiv_qr(NULL, rem, num, den);
}
void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
{
mpi_ptr_t np, dp;
mpi_ptr_t qp, rp;
mpi_size_t nsize = num->nlimbs;
mpi_size_t dsize = den->nlimbs;
mpi_size_t qsize, rsize;
mpi_size_t sign_remainder = num->sign;
mpi_size_t sign_quotient = num->sign ^ den->sign;
unsigned int normalization_steps;
mpi_limb_t q_limb;
mpi_ptr_t marker[5];
int markidx = 0;
/* Ensure space is enough for quotient and remainder.
* We need space for an extra limb in the remainder, because it's
* up-shifted (normalized) below.
*/
rsize = nsize + 1;
mpi_resize(rem, rsize);
qsize = rsize - dsize; /* qsize cannot be bigger than this. */
if (qsize <= 0) {
if (num != rem) {
rem->nlimbs = num->nlimbs;
rem->sign = num->sign;
MPN_COPY(rem->d, num->d, nsize);
}
if (quot) {
/* This needs to follow the assignment to rem, in case the
* numerator and quotient are the same.
*/
quot->nlimbs = 0;
quot->sign = 0;
}
return;
}
if (quot)
mpi_resize(quot, qsize);
/* Read pointers here, when reallocation is finished. */
np = num->d;
dp = den->d;
rp = rem->d;
/* Optimize division by a single-limb divisor. */
if (dsize == 1) {
mpi_limb_t rlimb;
if (quot) {
qp = quot->d;
rlimb = mpihelp_divmod_1(qp, np, nsize, dp[0]);
qsize -= qp[qsize - 1] == 0;
quot->nlimbs = qsize;
quot->sign = sign_quotient;
} else
rlimb = mpihelp_mod_1(np, nsize, dp[0]);
rp[0] = rlimb;
rsize = rlimb != 0?1:0;
rem->nlimbs = rsize;
rem->sign = sign_remainder;
return;
}
if (quot) {
qp = quot->d;
/* Make sure QP and NP point to different objects. Otherwise the
* numerator would be gradually overwritten by the quotient limbs.
*/
if (qp == np) { /* Copy NP object to temporary space. */
np = marker[markidx++] = mpi_alloc_limb_space(nsize);
MPN_COPY(np, qp, nsize);
}
} else /* Put quotient at top of remainder. */
qp = rp + dsize;
normalization_steps = count_leading_zeros(dp[dsize - 1]);
/* Normalize the denominator, i.e. make its most significant bit set by
* shifting it NORMALIZATION_STEPS bits to the left. Also shift the
* numerator the same number of steps (to keep the quotient the same!).
*/
if (normalization_steps) {
mpi_ptr_t tp;
mpi_limb_t nlimb;
/* Shift up the denominator setting the most significant bit of
* the most significant word. Use temporary storage not to clobber
* the original contents of the denominator.
*/
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
mpihelp_lshift(tp, dp, dsize, normalization_steps);
dp = tp;
/* Shift up the numerator, possibly introducing a new most
* significant word. Move the shifted numerator in the remainder
* meanwhile.
*/
nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
if (nlimb) {
rp[nsize] = nlimb;
rsize = nsize + 1;
} else
rsize = nsize;
} else {
/* The denominator is already normalized, as required. Copy it to
* temporary space if it overlaps with the quotient or remainder.
*/
if (dp == rp || (quot && (dp == qp))) {
mpi_ptr_t tp;
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
MPN_COPY(tp, dp, dsize);
dp = tp;
}
/* Move the numerator to the remainder. */
if (rp != np)
MPN_COPY(rp, np, nsize);
rsize = nsize;
}
q_limb = mpihelp_divrem(qp, 0, rp, rsize, dp, dsize);
if (quot) {
qsize = rsize - dsize;
if (q_limb) {
qp[qsize] = q_limb;
qsize += 1;
}
quot->nlimbs = qsize;
quot->sign = sign_quotient;
}
rsize = dsize;
MPN_NORMALIZE(rp, rsize);
if (normalization_steps && rsize) {
mpihelp_rshift(rp, rp, rsize, normalization_steps);
rsize -= rp[rsize - 1] == 0?1:0;
}
rem->nlimbs = rsize;
rem->sign = sign_remainder;
while (markidx) {
markidx--;
mpi_free_limb_space(marker[markidx]);
}
}
| linux-master | lib/crypto/mpi/mpi-div.c |
/* mpi-inv.c - MPI functions
* Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "mpi-internal.h"
/****************
* Calculate the multiplicative inverse X of A mod N
* That is: Find the solution x for
* 1 = (a*x) mod n
*/
int mpi_invm(MPI x, MPI a, MPI n)
{
/* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
* modified according to Michael Penk's solution for Exercise 35
* with further enhancement
*/
MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
unsigned int k;
int sign;
int odd;
if (!mpi_cmp_ui(a, 0))
return 0; /* Inverse does not exists. */
if (!mpi_cmp_ui(n, 1))
return 0; /* Inverse does not exists. */
u = mpi_copy(a);
v = mpi_copy(n);
for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
mpi_rshift(u, u, 1);
mpi_rshift(v, v, 1);
}
odd = mpi_test_bit(v, 0);
u1 = mpi_alloc_set_ui(1);
if (!odd)
u2 = mpi_alloc_set_ui(0);
u3 = mpi_copy(u);
v1 = mpi_copy(v);
if (!odd) {
v2 = mpi_alloc(mpi_get_nlimbs(u));
mpi_sub(v2, u1, u); /* U is used as const 1 */
}
v3 = mpi_copy(v);
if (mpi_test_bit(u, 0)) { /* u is odd */
t1 = mpi_alloc_set_ui(0);
if (!odd) {
t2 = mpi_alloc_set_ui(1);
t2->sign = 1;
}
t3 = mpi_copy(v);
t3->sign = !t3->sign;
goto Y4;
} else {
t1 = mpi_alloc_set_ui(1);
if (!odd)
t2 = mpi_alloc_set_ui(0);
t3 = mpi_copy(u);
}
do {
do {
if (!odd) {
if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
/* one is odd */
mpi_add(t1, t1, v);
mpi_sub(t2, t2, u);
}
mpi_rshift(t1, t1, 1);
mpi_rshift(t2, t2, 1);
mpi_rshift(t3, t3, 1);
} else {
if (mpi_test_bit(t1, 0))
mpi_add(t1, t1, v);
mpi_rshift(t1, t1, 1);
mpi_rshift(t3, t3, 1);
}
Y4:
;
} while (!mpi_test_bit(t3, 0)); /* while t3 is even */
if (!t3->sign) {
mpi_set(u1, t1);
if (!odd)
mpi_set(u2, t2);
mpi_set(u3, t3);
} else {
mpi_sub(v1, v, t1);
sign = u->sign; u->sign = !u->sign;
if (!odd)
mpi_sub(v2, u, t2);
u->sign = sign;
sign = t3->sign; t3->sign = !t3->sign;
mpi_set(v3, t3);
t3->sign = sign;
}
mpi_sub(t1, u1, v1);
if (!odd)
mpi_sub(t2, u2, v2);
mpi_sub(t3, u3, v3);
if (t1->sign) {
mpi_add(t1, t1, v);
if (!odd)
mpi_sub(t2, t2, u);
}
} while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
/* mpi_lshift( u3, k ); */
mpi_set(x, u1);
mpi_free(u1);
mpi_free(v1);
mpi_free(t1);
if (!odd) {
mpi_free(u2);
mpi_free(v2);
mpi_free(t2);
}
mpi_free(u3);
mpi_free(v3);
mpi_free(t3);
mpi_free(u);
mpi_free(v);
return 1;
}
EXPORT_SYMBOL_GPL(mpi_invm);
| linux-master | lib/crypto/mpi/mpi-inv.c |
/* ec.c - Elliptic Curve functions
* Copyright (C) 2007 Free Software Foundation, Inc.
* Copyright (C) 2013 g10 Code GmbH
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "mpi-internal.h"
#include "longlong.h"
#define point_init(a) mpi_point_init((a))
#define point_free(a) mpi_point_free_parts((a))
#define log_error(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
#define log_fatal(fmt, ...) pr_err(fmt, ##__VA_ARGS__)
#define DIM(v) (sizeof(v)/sizeof((v)[0]))
/* Create a new point option. NBITS gives the size in bits of one
* coordinate; it is only used to pre-allocate some resources and
* might also be passed as 0 to use a default value.
*/
MPI_POINT mpi_point_new(unsigned int nbits)
{
MPI_POINT p;
(void)nbits; /* Currently not used. */
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p)
mpi_point_init(p);
return p;
}
EXPORT_SYMBOL_GPL(mpi_point_new);
/* Release the point object P. P may be NULL. */
void mpi_point_release(MPI_POINT p)
{
if (p) {
mpi_point_free_parts(p);
kfree(p);
}
}
EXPORT_SYMBOL_GPL(mpi_point_release);
/* Initialize the fields of a point object. gcry_mpi_point_free_parts
* may be used to release the fields.
*/
void mpi_point_init(MPI_POINT p)
{
p->x = mpi_new(0);
p->y = mpi_new(0);
p->z = mpi_new(0);
}
EXPORT_SYMBOL_GPL(mpi_point_init);
/* Release the parts of a point object. */
void mpi_point_free_parts(MPI_POINT p)
{
mpi_free(p->x); p->x = NULL;
mpi_free(p->y); p->y = NULL;
mpi_free(p->z); p->z = NULL;
}
EXPORT_SYMBOL_GPL(mpi_point_free_parts);
/* Set the value from S into D. */
static void point_set(MPI_POINT d, MPI_POINT s)
{
mpi_set(d->x, s->x);
mpi_set(d->y, s->y);
mpi_set(d->z, s->z);
}
static void point_resize(MPI_POINT p, struct mpi_ec_ctx *ctx)
{
size_t nlimbs = ctx->p->nlimbs;
mpi_resize(p->x, nlimbs);
p->x->nlimbs = nlimbs;
mpi_resize(p->z, nlimbs);
p->z->nlimbs = nlimbs;
if (ctx->model != MPI_EC_MONTGOMERY) {
mpi_resize(p->y, nlimbs);
p->y->nlimbs = nlimbs;
}
}
static void point_swap_cond(MPI_POINT d, MPI_POINT s, unsigned long swap,
struct mpi_ec_ctx *ctx)
{
mpi_swap_cond(d->x, s->x, swap);
if (ctx->model != MPI_EC_MONTGOMERY)
mpi_swap_cond(d->y, s->y, swap);
mpi_swap_cond(d->z, s->z, swap);
}
/* W = W mod P. */
static void ec_mod(MPI w, struct mpi_ec_ctx *ec)
{
if (ec->t.p_barrett)
mpi_mod_barrett(w, w, ec->t.p_barrett);
else
mpi_mod(w, w, ec->p);
}
static void ec_addm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_add(w, u, v);
ec_mod(w, ctx);
}
static void ec_subm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec)
{
mpi_sub(w, u, v);
while (w->sign)
mpi_add(w, w, ec->p);
/*ec_mod(w, ec);*/
}
static void ec_mulm(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_mul(w, u, v);
ec_mod(w, ctx);
}
/* W = 2 * U mod P. */
static void ec_mul2(MPI w, MPI u, struct mpi_ec_ctx *ctx)
{
mpi_lshift(w, u, 1);
ec_mod(w, ctx);
}
static void ec_powm(MPI w, const MPI b, const MPI e,
struct mpi_ec_ctx *ctx)
{
mpi_powm(w, b, e, ctx->p);
/* mpi_abs(w); */
}
/* Shortcut for
* ec_powm(B, B, mpi_const(MPI_C_TWO), ctx);
* for easier optimization.
*/
static void ec_pow2(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
{
/* Using mpi_mul is slightly faster (at least on amd64). */
/* mpi_powm(w, b, mpi_const(MPI_C_TWO), ctx->p); */
ec_mulm(w, b, b, ctx);
}
/* Shortcut for
* ec_powm(B, B, mpi_const(MPI_C_THREE), ctx);
* for easier optimization.
*/
static void ec_pow3(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
{
mpi_powm(w, b, mpi_const(MPI_C_THREE), ctx->p);
}
static void ec_invm(MPI x, MPI a, struct mpi_ec_ctx *ctx)
{
if (!mpi_invm(x, a, ctx->p))
log_error("ec_invm: inverse does not exist:\n");
}
static void mpih_set_cond(mpi_ptr_t wp, mpi_ptr_t up,
mpi_size_t usize, unsigned long set)
{
mpi_size_t i;
mpi_limb_t mask = ((mpi_limb_t)0) - set;
mpi_limb_t x;
for (i = 0; i < usize; i++) {
x = mask & (wp[i] ^ up[i]);
wp[i] = wp[i] ^ x;
}
}
/* Routines for 2^255 - 19. */
#define LIMB_SIZE_25519 ((256+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
static void ec_addm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_25519;
mpi_limb_t n[LIMB_SIZE_25519];
mpi_limb_t borrow;
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("addm_25519: different sizes\n");
memset(n, 0, sizeof(n));
up = u->d;
vp = v->d;
wp = w->d;
mpihelp_add_n(wp, up, vp, wsize);
borrow = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
mpihelp_add_n(wp, wp, n, wsize);
wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
}
static void ec_subm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_25519;
mpi_limb_t n[LIMB_SIZE_25519];
mpi_limb_t borrow;
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("subm_25519: different sizes\n");
memset(n, 0, sizeof(n));
up = u->d;
vp = v->d;
wp = w->d;
borrow = mpihelp_sub_n(wp, up, vp, wsize);
mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
mpihelp_add_n(wp, wp, n, wsize);
wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
}
static void ec_mulm_25519(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_25519;
mpi_limb_t n[LIMB_SIZE_25519*2];
mpi_limb_t m[LIMB_SIZE_25519+1];
mpi_limb_t cy;
int msb;
(void)ctx;
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("mulm_25519: different sizes\n");
up = u->d;
vp = v->d;
wp = w->d;
mpihelp_mul_n(n, up, vp, wsize);
memcpy(wp, n, wsize * BYTES_PER_MPI_LIMB);
wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
memcpy(m, n+LIMB_SIZE_25519-1, (wsize+1) * BYTES_PER_MPI_LIMB);
mpihelp_rshift(m, m, LIMB_SIZE_25519+1, (255 % BITS_PER_MPI_LIMB));
memcpy(n, m, wsize * BYTES_PER_MPI_LIMB);
cy = mpihelp_lshift(m, m, LIMB_SIZE_25519, 4);
m[LIMB_SIZE_25519] = cy;
cy = mpihelp_add_n(m, m, n, wsize);
m[LIMB_SIZE_25519] += cy;
cy = mpihelp_add_n(m, m, n, wsize);
m[LIMB_SIZE_25519] += cy;
cy = mpihelp_add_n(m, m, n, wsize);
m[LIMB_SIZE_25519] += cy;
cy = mpihelp_add_n(wp, wp, m, wsize);
m[LIMB_SIZE_25519] += cy;
memset(m, 0, wsize * BYTES_PER_MPI_LIMB);
msb = (wp[LIMB_SIZE_25519-1] >> (255 % BITS_PER_MPI_LIMB));
m[0] = (m[LIMB_SIZE_25519] * 2 + msb) * 19;
wp[LIMB_SIZE_25519-1] &= ~((mpi_limb_t)1 << (255 % BITS_PER_MPI_LIMB));
mpihelp_add_n(wp, wp, m, wsize);
m[0] = 0;
cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
mpih_set_cond(m, ctx->p->d, wsize, (cy != 0UL));
mpihelp_add_n(wp, wp, m, wsize);
}
static void ec_mul2_25519(MPI w, MPI u, struct mpi_ec_ctx *ctx)
{
ec_addm_25519(w, u, u, ctx);
}
static void ec_pow2_25519(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
{
ec_mulm_25519(w, b, b, ctx);
}
/* Routines for 2^448 - 2^224 - 1. */
#define LIMB_SIZE_448 ((448+BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB)
#define LIMB_SIZE_HALF_448 ((LIMB_SIZE_448+1)/2)
static void ec_addm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_448;
mpi_limb_t n[LIMB_SIZE_448];
mpi_limb_t cy;
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("addm_448: different sizes\n");
memset(n, 0, sizeof(n));
up = u->d;
vp = v->d;
wp = w->d;
cy = mpihelp_add_n(wp, up, vp, wsize);
mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
mpihelp_sub_n(wp, wp, n, wsize);
}
static void ec_subm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_448;
mpi_limb_t n[LIMB_SIZE_448];
mpi_limb_t borrow;
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("subm_448: different sizes\n");
memset(n, 0, sizeof(n));
up = u->d;
vp = v->d;
wp = w->d;
borrow = mpihelp_sub_n(wp, up, vp, wsize);
mpih_set_cond(n, ctx->p->d, wsize, (borrow != 0UL));
mpihelp_add_n(wp, wp, n, wsize);
}
static void ec_mulm_448(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx)
{
mpi_ptr_t wp, up, vp;
mpi_size_t wsize = LIMB_SIZE_448;
mpi_limb_t n[LIMB_SIZE_448*2];
mpi_limb_t a2[LIMB_SIZE_HALF_448];
mpi_limb_t a3[LIMB_SIZE_HALF_448];
mpi_limb_t b0[LIMB_SIZE_HALF_448];
mpi_limb_t b1[LIMB_SIZE_HALF_448];
mpi_limb_t cy;
int i;
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
mpi_limb_t b1_rest, a3_rest;
#endif
if (w->nlimbs != wsize || u->nlimbs != wsize || v->nlimbs != wsize)
log_bug("mulm_448: different sizes\n");
up = u->d;
vp = v->d;
wp = w->d;
mpihelp_mul_n(n, up, vp, wsize);
for (i = 0; i < (wsize + 1) / 2; i++) {
b0[i] = n[i];
b1[i] = n[i+wsize/2];
a2[i] = n[i+wsize];
a3[i] = n[i+wsize+wsize/2];
}
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
b0[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
a2[LIMB_SIZE_HALF_448-1] &= ((mpi_limb_t)1UL << 32)-1;
b1_rest = 0;
a3_rest = 0;
for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
mpi_limb_t b1v, a3v;
b1v = b1[i];
a3v = a3[i];
b1[i] = (b1_rest << 32) | (b1v >> 32);
a3[i] = (a3_rest << 32) | (a3v >> 32);
b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
a3_rest = a3v & (((mpi_limb_t)1UL << 32)-1);
}
#endif
cy = mpihelp_add_n(b0, b0, a2, LIMB_SIZE_HALF_448);
cy += mpihelp_add_n(b0, b0, a3, LIMB_SIZE_HALF_448);
for (i = 0; i < (wsize + 1) / 2; i++)
wp[i] = b0[i];
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
wp[LIMB_SIZE_HALF_448-1] &= (((mpi_limb_t)1UL << 32)-1);
#endif
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
cy = b0[LIMB_SIZE_HALF_448-1] >> 32;
#endif
cy = mpihelp_add_1(b1, b1, LIMB_SIZE_HALF_448, cy);
cy += mpihelp_add_n(b1, b1, a2, LIMB_SIZE_HALF_448);
cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
cy += mpihelp_add_n(b1, b1, a3, LIMB_SIZE_HALF_448);
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
b1_rest = 0;
for (i = (wsize + 1) / 2 - 1; i >= 0; i--) {
mpi_limb_t b1v = b1[i];
b1[i] = (b1_rest << 32) | (b1v >> 32);
b1_rest = b1v & (((mpi_limb_t)1UL << 32)-1);
}
wp[LIMB_SIZE_HALF_448-1] |= (b1_rest << 32);
#endif
for (i = 0; i < wsize / 2; i++)
wp[i+(wsize + 1) / 2] = b1[i];
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
cy = b1[LIMB_SIZE_HALF_448-1];
#endif
memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
#if (LIMB_SIZE_HALF_448 > LIMB_SIZE_448/2)
n[LIMB_SIZE_HALF_448-1] = cy << 32;
#else
n[LIMB_SIZE_HALF_448] = cy;
#endif
n[0] = cy;
mpihelp_add_n(wp, wp, n, wsize);
memset(n, 0, wsize * BYTES_PER_MPI_LIMB);
cy = mpihelp_sub_n(wp, wp, ctx->p->d, wsize);
mpih_set_cond(n, ctx->p->d, wsize, (cy != 0UL));
mpihelp_add_n(wp, wp, n, wsize);
}
static void ec_mul2_448(MPI w, MPI u, struct mpi_ec_ctx *ctx)
{
ec_addm_448(w, u, u, ctx);
}
static void ec_pow2_448(MPI w, const MPI b, struct mpi_ec_ctx *ctx)
{
ec_mulm_448(w, b, b, ctx);
}
struct field_table {
const char *p;
/* computation routines for the field. */
void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
};
static const struct field_table field_table[] = {
{
"0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFED",
ec_addm_25519,
ec_subm_25519,
ec_mulm_25519,
ec_mul2_25519,
ec_pow2_25519
},
{
"0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE"
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
ec_addm_448,
ec_subm_448,
ec_mulm_448,
ec_mul2_448,
ec_pow2_448
},
{ NULL, NULL, NULL, NULL, NULL, NULL },
};
/* Force recomputation of all helper variables. */
static void mpi_ec_get_reset(struct mpi_ec_ctx *ec)
{
ec->t.valid.a_is_pminus3 = 0;
ec->t.valid.two_inv_p = 0;
}
/* Accessor for helper variable. */
static int ec_get_a_is_pminus3(struct mpi_ec_ctx *ec)
{
MPI tmp;
if (!ec->t.valid.a_is_pminus3) {
ec->t.valid.a_is_pminus3 = 1;
tmp = mpi_alloc_like(ec->p);
mpi_sub_ui(tmp, ec->p, 3);
ec->t.a_is_pminus3 = !mpi_cmp(ec->a, tmp);
mpi_free(tmp);
}
return ec->t.a_is_pminus3;
}
/* Accessor for helper variable. */
static MPI ec_get_two_inv_p(struct mpi_ec_ctx *ec)
{
if (!ec->t.valid.two_inv_p) {
ec->t.valid.two_inv_p = 1;
if (!ec->t.two_inv_p)
ec->t.two_inv_p = mpi_alloc(0);
ec_invm(ec->t.two_inv_p, mpi_const(MPI_C_TWO), ec);
}
return ec->t.two_inv_p;
}
static const char *const curve25519_bad_points[] = {
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffed",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000001",
"0x00b8495f16056286fdb1329ceb8d09da6ac49ff1fae35616aeb8413b7c7aebe0",
"0x57119fd0dd4e22d8868e1c58c45c44045bef839c55b1d0b1248c50a3bc959c5f",
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec",
"0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee",
NULL
};
static const char *const curve448_bad_points[] = {
"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"0x00000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000000000000000000000000000000",
"0x00000000000000000000000000000000000000000000000000000000"
"00000000000000000000000000000000000000000000000000000001",
"0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffe"
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffe",
"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
"00000000000000000000000000000000000000000000000000000000",
NULL
};
static const char *const *bad_points_table[] = {
curve25519_bad_points,
curve448_bad_points,
};
static void mpi_ec_coefficient_normalize(MPI a, MPI p)
{
if (a->sign) {
mpi_resize(a, p->nlimbs);
mpihelp_sub_n(a->d, p->d, a->d, p->nlimbs);
a->nlimbs = p->nlimbs;
a->sign = 0;
}
}
/* This function initialized a context for elliptic curve based on the
* field GF(p). P is the prime specifying this field, A is the first
* coefficient. CTX is expected to be zeroized.
*/
void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
enum ecc_dialects dialect,
int flags, MPI p, MPI a, MPI b)
{
int i;
static int use_barrett = -1 /* TODO: 1 or -1 */;
mpi_ec_coefficient_normalize(a, p);
mpi_ec_coefficient_normalize(b, p);
/* Fixme: Do we want to check some constraints? e.g. a < p */
ctx->model = model;
ctx->dialect = dialect;
ctx->flags = flags;
if (dialect == ECC_DIALECT_ED25519)
ctx->nbits = 256;
else
ctx->nbits = mpi_get_nbits(p);
ctx->p = mpi_copy(p);
ctx->a = mpi_copy(a);
ctx->b = mpi_copy(b);
ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
mpi_ec_get_reset(ctx);
if (model == MPI_EC_MONTGOMERY) {
for (i = 0; i < DIM(bad_points_table); i++) {
MPI p_candidate = mpi_scanval(bad_points_table[i][0]);
int match_p = !mpi_cmp(ctx->p, p_candidate);
int j;
mpi_free(p_candidate);
if (!match_p)
continue;
for (j = 0; i < DIM(ctx->t.scratch) && bad_points_table[i][j]; j++)
ctx->t.scratch[j] = mpi_scanval(bad_points_table[i][j]);
}
} else {
/* Allocate scratch variables. */
for (i = 0; i < DIM(ctx->t.scratch); i++)
ctx->t.scratch[i] = mpi_alloc_like(ctx->p);
}
ctx->addm = ec_addm;
ctx->subm = ec_subm;
ctx->mulm = ec_mulm;
ctx->mul2 = ec_mul2;
ctx->pow2 = ec_pow2;
for (i = 0; field_table[i].p; i++) {
MPI f_p;
f_p = mpi_scanval(field_table[i].p);
if (!f_p)
break;
if (!mpi_cmp(p, f_p)) {
ctx->addm = field_table[i].addm;
ctx->subm = field_table[i].subm;
ctx->mulm = field_table[i].mulm;
ctx->mul2 = field_table[i].mul2;
ctx->pow2 = field_table[i].pow2;
mpi_free(f_p);
mpi_resize(ctx->a, ctx->p->nlimbs);
ctx->a->nlimbs = ctx->p->nlimbs;
mpi_resize(ctx->b, ctx->p->nlimbs);
ctx->b->nlimbs = ctx->p->nlimbs;
for (i = 0; i < DIM(ctx->t.scratch) && ctx->t.scratch[i]; i++)
ctx->t.scratch[i]->nlimbs = ctx->p->nlimbs;
break;
}
mpi_free(f_p);
}
}
EXPORT_SYMBOL_GPL(mpi_ec_init);
void mpi_ec_deinit(struct mpi_ec_ctx *ctx)
{
int i;
mpi_barrett_free(ctx->t.p_barrett);
/* Domain parameter. */
mpi_free(ctx->p);
mpi_free(ctx->a);
mpi_free(ctx->b);
mpi_point_release(ctx->G);
mpi_free(ctx->n);
/* The key. */
mpi_point_release(ctx->Q);
mpi_free(ctx->d);
/* Private data of ec.c. */
mpi_free(ctx->t.two_inv_p);
for (i = 0; i < DIM(ctx->t.scratch); i++)
mpi_free(ctx->t.scratch[i]);
}
EXPORT_SYMBOL_GPL(mpi_ec_deinit);
/* Compute the affine coordinates from the projective coordinates in
* POINT. Set them into X and Y. If one coordinate is not required,
* X or Y may be passed as NULL. CTX is the usual context. Returns: 0
* on success or !0 if POINT is at infinity.
*/
int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx)
{
if (!mpi_cmp_ui(point->z, 0))
return -1;
switch (ctx->model) {
case MPI_EC_WEIERSTRASS: /* Using Jacobian coordinates. */
{
MPI z1, z2, z3;
z1 = mpi_new(0);
z2 = mpi_new(0);
ec_invm(z1, point->z, ctx); /* z1 = z^(-1) mod p */
ec_mulm(z2, z1, z1, ctx); /* z2 = z^(-2) mod p */
if (x)
ec_mulm(x, point->x, z2, ctx);
if (y) {
z3 = mpi_new(0);
ec_mulm(z3, z2, z1, ctx); /* z3 = z^(-3) mod p */
ec_mulm(y, point->y, z3, ctx);
mpi_free(z3);
}
mpi_free(z2);
mpi_free(z1);
}
return 0;
case MPI_EC_MONTGOMERY:
{
if (x)
mpi_set(x, point->x);
if (y) {
log_fatal("%s: Getting Y-coordinate on %s is not supported\n",
"mpi_ec_get_affine", "Montgomery");
return -1;
}
}
return 0;
case MPI_EC_EDWARDS:
{
MPI z;
z = mpi_new(0);
ec_invm(z, point->z, ctx);
mpi_resize(z, ctx->p->nlimbs);
z->nlimbs = ctx->p->nlimbs;
if (x) {
mpi_resize(x, ctx->p->nlimbs);
x->nlimbs = ctx->p->nlimbs;
ctx->mulm(x, point->x, z, ctx);
}
if (y) {
mpi_resize(y, ctx->p->nlimbs);
y->nlimbs = ctx->p->nlimbs;
ctx->mulm(y, point->y, z, ctx);
}
mpi_free(z);
}
return 0;
default:
return -1;
}
}
EXPORT_SYMBOL_GPL(mpi_ec_get_affine);
/* RESULT = 2 * POINT (Weierstrass version). */
static void dup_point_weierstrass(MPI_POINT result,
MPI_POINT point, struct mpi_ec_ctx *ctx)
{
#define x3 (result->x)
#define y3 (result->y)
#define z3 (result->z)
#define t1 (ctx->t.scratch[0])
#define t2 (ctx->t.scratch[1])
#define t3 (ctx->t.scratch[2])
#define l1 (ctx->t.scratch[3])
#define l2 (ctx->t.scratch[4])
#define l3 (ctx->t.scratch[5])
if (!mpi_cmp_ui(point->y, 0) || !mpi_cmp_ui(point->z, 0)) {
/* P_y == 0 || P_z == 0 => [1:1:0] */
mpi_set_ui(x3, 1);
mpi_set_ui(y3, 1);
mpi_set_ui(z3, 0);
} else {
if (ec_get_a_is_pminus3(ctx)) {
/* Use the faster case. */
/* L1 = 3(X - Z^2)(X + Z^2) */
/* T1: used for Z^2. */
/* T2: used for the right term. */
ec_pow2(t1, point->z, ctx);
ec_subm(l1, point->x, t1, ctx);
ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
ec_addm(t2, point->x, t1, ctx);
ec_mulm(l1, l1, t2, ctx);
} else {
/* Standard case. */
/* L1 = 3X^2 + aZ^4 */
/* T1: used for aZ^4. */
ec_pow2(l1, point->x, ctx);
ec_mulm(l1, l1, mpi_const(MPI_C_THREE), ctx);
ec_powm(t1, point->z, mpi_const(MPI_C_FOUR), ctx);
ec_mulm(t1, t1, ctx->a, ctx);
ec_addm(l1, l1, t1, ctx);
}
/* Z3 = 2YZ */
ec_mulm(z3, point->y, point->z, ctx);
ec_mul2(z3, z3, ctx);
/* L2 = 4XY^2 */
/* T2: used for Y2; required later. */
ec_pow2(t2, point->y, ctx);
ec_mulm(l2, t2, point->x, ctx);
ec_mulm(l2, l2, mpi_const(MPI_C_FOUR), ctx);
/* X3 = L1^2 - 2L2 */
/* T1: used for L2^2. */
ec_pow2(x3, l1, ctx);
ec_mul2(t1, l2, ctx);
ec_subm(x3, x3, t1, ctx);
/* L3 = 8Y^4 */
/* T2: taken from above. */
ec_pow2(t2, t2, ctx);
ec_mulm(l3, t2, mpi_const(MPI_C_EIGHT), ctx);
/* Y3 = L1(L2 - X3) - L3 */
ec_subm(y3, l2, x3, ctx);
ec_mulm(y3, y3, l1, ctx);
ec_subm(y3, y3, l3, ctx);
}
#undef x3
#undef y3
#undef z3
#undef t1
#undef t2
#undef t3
#undef l1
#undef l2
#undef l3
}
/* RESULT = 2 * POINT (Montgomery version). */
static void dup_point_montgomery(MPI_POINT result,
MPI_POINT point, struct mpi_ec_ctx *ctx)
{
(void)result;
(void)point;
(void)ctx;
log_fatal("%s: %s not yet supported\n",
"mpi_ec_dup_point", "Montgomery");
}
/* RESULT = 2 * POINT (Twisted Edwards version). */
static void dup_point_edwards(MPI_POINT result,
MPI_POINT point, struct mpi_ec_ctx *ctx)
{
#define X1 (point->x)
#define Y1 (point->y)
#define Z1 (point->z)
#define X3 (result->x)
#define Y3 (result->y)
#define Z3 (result->z)
#define B (ctx->t.scratch[0])
#define C (ctx->t.scratch[1])
#define D (ctx->t.scratch[2])
#define E (ctx->t.scratch[3])
#define F (ctx->t.scratch[4])
#define H (ctx->t.scratch[5])
#define J (ctx->t.scratch[6])
/* Compute: (X_3 : Y_3 : Z_3) = 2( X_1 : Y_1 : Z_1 ) */
/* B = (X_1 + Y_1)^2 */
ctx->addm(B, X1, Y1, ctx);
ctx->pow2(B, B, ctx);
/* C = X_1^2 */
/* D = Y_1^2 */
ctx->pow2(C, X1, ctx);
ctx->pow2(D, Y1, ctx);
/* E = aC */
if (ctx->dialect == ECC_DIALECT_ED25519)
ctx->subm(E, ctx->p, C, ctx);
else
ctx->mulm(E, ctx->a, C, ctx);
/* F = E + D */
ctx->addm(F, E, D, ctx);
/* H = Z_1^2 */
ctx->pow2(H, Z1, ctx);
/* J = F - 2H */
ctx->mul2(J, H, ctx);
ctx->subm(J, F, J, ctx);
/* X_3 = (B - C - D) · J */
ctx->subm(X3, B, C, ctx);
ctx->subm(X3, X3, D, ctx);
ctx->mulm(X3, X3, J, ctx);
/* Y_3 = F · (E - D) */
ctx->subm(Y3, E, D, ctx);
ctx->mulm(Y3, Y3, F, ctx);
/* Z_3 = F · J */
ctx->mulm(Z3, F, J, ctx);
#undef X1
#undef Y1
#undef Z1
#undef X3
#undef Y3
#undef Z3
#undef B
#undef C
#undef D
#undef E
#undef F
#undef H
#undef J
}
/* RESULT = 2 * POINT */
static void
mpi_ec_dup_point(MPI_POINT result, MPI_POINT point, struct mpi_ec_ctx *ctx)
{
switch (ctx->model) {
case MPI_EC_WEIERSTRASS:
dup_point_weierstrass(result, point, ctx);
break;
case MPI_EC_MONTGOMERY:
dup_point_montgomery(result, point, ctx);
break;
case MPI_EC_EDWARDS:
dup_point_edwards(result, point, ctx);
break;
}
}
/* RESULT = P1 + P2 (Weierstrass version).*/
static void add_points_weierstrass(MPI_POINT result,
MPI_POINT p1, MPI_POINT p2,
struct mpi_ec_ctx *ctx)
{
#define x1 (p1->x)
#define y1 (p1->y)
#define z1 (p1->z)
#define x2 (p2->x)
#define y2 (p2->y)
#define z2 (p2->z)
#define x3 (result->x)
#define y3 (result->y)
#define z3 (result->z)
#define l1 (ctx->t.scratch[0])
#define l2 (ctx->t.scratch[1])
#define l3 (ctx->t.scratch[2])
#define l4 (ctx->t.scratch[3])
#define l5 (ctx->t.scratch[4])
#define l6 (ctx->t.scratch[5])
#define l7 (ctx->t.scratch[6])
#define l8 (ctx->t.scratch[7])
#define l9 (ctx->t.scratch[8])
#define t1 (ctx->t.scratch[9])
#define t2 (ctx->t.scratch[10])
if ((!mpi_cmp(x1, x2)) && (!mpi_cmp(y1, y2)) && (!mpi_cmp(z1, z2))) {
/* Same point; need to call the duplicate function. */
mpi_ec_dup_point(result, p1, ctx);
} else if (!mpi_cmp_ui(z1, 0)) {
/* P1 is at infinity. */
mpi_set(x3, p2->x);
mpi_set(y3, p2->y);
mpi_set(z3, p2->z);
} else if (!mpi_cmp_ui(z2, 0)) {
/* P2 is at infinity. */
mpi_set(x3, p1->x);
mpi_set(y3, p1->y);
mpi_set(z3, p1->z);
} else {
int z1_is_one = !mpi_cmp_ui(z1, 1);
int z2_is_one = !mpi_cmp_ui(z2, 1);
/* l1 = x1 z2^2 */
/* l2 = x2 z1^2 */
if (z2_is_one)
mpi_set(l1, x1);
else {
ec_pow2(l1, z2, ctx);
ec_mulm(l1, l1, x1, ctx);
}
if (z1_is_one)
mpi_set(l2, x2);
else {
ec_pow2(l2, z1, ctx);
ec_mulm(l2, l2, x2, ctx);
}
/* l3 = l1 - l2 */
ec_subm(l3, l1, l2, ctx);
/* l4 = y1 z2^3 */
ec_powm(l4, z2, mpi_const(MPI_C_THREE), ctx);
ec_mulm(l4, l4, y1, ctx);
/* l5 = y2 z1^3 */
ec_powm(l5, z1, mpi_const(MPI_C_THREE), ctx);
ec_mulm(l5, l5, y2, ctx);
/* l6 = l4 - l5 */
ec_subm(l6, l4, l5, ctx);
if (!mpi_cmp_ui(l3, 0)) {
if (!mpi_cmp_ui(l6, 0)) {
/* P1 and P2 are the same - use duplicate function. */
mpi_ec_dup_point(result, p1, ctx);
} else {
/* P1 is the inverse of P2. */
mpi_set_ui(x3, 1);
mpi_set_ui(y3, 1);
mpi_set_ui(z3, 0);
}
} else {
/* l7 = l1 + l2 */
ec_addm(l7, l1, l2, ctx);
/* l8 = l4 + l5 */
ec_addm(l8, l4, l5, ctx);
/* z3 = z1 z2 l3 */
ec_mulm(z3, z1, z2, ctx);
ec_mulm(z3, z3, l3, ctx);
/* x3 = l6^2 - l7 l3^2 */
ec_pow2(t1, l6, ctx);
ec_pow2(t2, l3, ctx);
ec_mulm(t2, t2, l7, ctx);
ec_subm(x3, t1, t2, ctx);
/* l9 = l7 l3^2 - 2 x3 */
ec_mul2(t1, x3, ctx);
ec_subm(l9, t2, t1, ctx);
/* y3 = (l9 l6 - l8 l3^3)/2 */
ec_mulm(l9, l9, l6, ctx);
ec_powm(t1, l3, mpi_const(MPI_C_THREE), ctx); /* fixme: Use saved value*/
ec_mulm(t1, t1, l8, ctx);
ec_subm(y3, l9, t1, ctx);
ec_mulm(y3, y3, ec_get_two_inv_p(ctx), ctx);
}
}
#undef x1
#undef y1
#undef z1
#undef x2
#undef y2
#undef z2
#undef x3
#undef y3
#undef z3
#undef l1
#undef l2
#undef l3
#undef l4
#undef l5
#undef l6
#undef l7
#undef l8
#undef l9
#undef t1
#undef t2
}
/* RESULT = P1 + P2 (Montgomery version).*/
static void add_points_montgomery(MPI_POINT result,
MPI_POINT p1, MPI_POINT p2,
struct mpi_ec_ctx *ctx)
{
(void)result;
(void)p1;
(void)p2;
(void)ctx;
log_fatal("%s: %s not yet supported\n",
"mpi_ec_add_points", "Montgomery");
}
/* RESULT = P1 + P2 (Twisted Edwards version).*/
static void add_points_edwards(MPI_POINT result,
MPI_POINT p1, MPI_POINT p2,
struct mpi_ec_ctx *ctx)
{
#define X1 (p1->x)
#define Y1 (p1->y)
#define Z1 (p1->z)
#define X2 (p2->x)
#define Y2 (p2->y)
#define Z2 (p2->z)
#define X3 (result->x)
#define Y3 (result->y)
#define Z3 (result->z)
#define A (ctx->t.scratch[0])
#define B (ctx->t.scratch[1])
#define C (ctx->t.scratch[2])
#define D (ctx->t.scratch[3])
#define E (ctx->t.scratch[4])
#define F (ctx->t.scratch[5])
#define G (ctx->t.scratch[6])
#define tmp (ctx->t.scratch[7])
point_resize(result, ctx);
/* Compute: (X_3 : Y_3 : Z_3) = (X_1 : Y_1 : Z_1) + (X_2 : Y_2 : Z_3) */
/* A = Z1 · Z2 */
ctx->mulm(A, Z1, Z2, ctx);
/* B = A^2 */
ctx->pow2(B, A, ctx);
/* C = X1 · X2 */
ctx->mulm(C, X1, X2, ctx);
/* D = Y1 · Y2 */
ctx->mulm(D, Y1, Y2, ctx);
/* E = d · C · D */
ctx->mulm(E, ctx->b, C, ctx);
ctx->mulm(E, E, D, ctx);
/* F = B - E */
ctx->subm(F, B, E, ctx);
/* G = B + E */
ctx->addm(G, B, E, ctx);
/* X_3 = A · F · ((X_1 + Y_1) · (X_2 + Y_2) - C - D) */
ctx->addm(tmp, X1, Y1, ctx);
ctx->addm(X3, X2, Y2, ctx);
ctx->mulm(X3, X3, tmp, ctx);
ctx->subm(X3, X3, C, ctx);
ctx->subm(X3, X3, D, ctx);
ctx->mulm(X3, X3, F, ctx);
ctx->mulm(X3, X3, A, ctx);
/* Y_3 = A · G · (D - aC) */
if (ctx->dialect == ECC_DIALECT_ED25519) {
ctx->addm(Y3, D, C, ctx);
} else {
ctx->mulm(Y3, ctx->a, C, ctx);
ctx->subm(Y3, D, Y3, ctx);
}
ctx->mulm(Y3, Y3, G, ctx);
ctx->mulm(Y3, Y3, A, ctx);
/* Z_3 = F · G */
ctx->mulm(Z3, F, G, ctx);
#undef X1
#undef Y1
#undef Z1
#undef X2
#undef Y2
#undef Z2
#undef X3
#undef Y3
#undef Z3
#undef A
#undef B
#undef C
#undef D
#undef E
#undef F
#undef G
#undef tmp
}
/* Compute a step of Montgomery Ladder (only use X and Z in the point).
* Inputs: P1, P2, and x-coordinate of DIF = P1 - P1.
* Outputs: PRD = 2 * P1 and SUM = P1 + P2.
*/
static void montgomery_ladder(MPI_POINT prd, MPI_POINT sum,
MPI_POINT p1, MPI_POINT p2, MPI dif_x,
struct mpi_ec_ctx *ctx)
{
ctx->addm(sum->x, p2->x, p2->z, ctx);
ctx->subm(p2->z, p2->x, p2->z, ctx);
ctx->addm(prd->x, p1->x, p1->z, ctx);
ctx->subm(p1->z, p1->x, p1->z, ctx);
ctx->mulm(p2->x, p1->z, sum->x, ctx);
ctx->mulm(p2->z, prd->x, p2->z, ctx);
ctx->pow2(p1->x, prd->x, ctx);
ctx->pow2(p1->z, p1->z, ctx);
ctx->addm(sum->x, p2->x, p2->z, ctx);
ctx->subm(p2->z, p2->x, p2->z, ctx);
ctx->mulm(prd->x, p1->x, p1->z, ctx);
ctx->subm(p1->z, p1->x, p1->z, ctx);
ctx->pow2(sum->x, sum->x, ctx);
ctx->pow2(sum->z, p2->z, ctx);
ctx->mulm(prd->z, p1->z, ctx->a, ctx); /* CTX->A: (a-2)/4 */
ctx->mulm(sum->z, sum->z, dif_x, ctx);
ctx->addm(prd->z, p1->x, prd->z, ctx);
ctx->mulm(prd->z, prd->z, p1->z, ctx);
}
/* RESULT = P1 + P2 */
void mpi_ec_add_points(MPI_POINT result,
MPI_POINT p1, MPI_POINT p2,
struct mpi_ec_ctx *ctx)
{
switch (ctx->model) {
case MPI_EC_WEIERSTRASS:
add_points_weierstrass(result, p1, p2, ctx);
break;
case MPI_EC_MONTGOMERY:
add_points_montgomery(result, p1, p2, ctx);
break;
case MPI_EC_EDWARDS:
add_points_edwards(result, p1, p2, ctx);
break;
}
}
EXPORT_SYMBOL_GPL(mpi_ec_add_points);
/* Scalar point multiplication - the main function for ECC. If takes
* an integer SCALAR and a POINT as well as the usual context CTX.
* RESULT will be set to the resulting point.
*/
void mpi_ec_mul_point(MPI_POINT result,
MPI scalar, MPI_POINT point,
struct mpi_ec_ctx *ctx)
{
MPI x1, y1, z1, k, h, yy;
unsigned int i, loops;
struct gcry_mpi_point p1, p2, p1inv;
if (ctx->model == MPI_EC_EDWARDS) {
/* Simple left to right binary method. Algorithm 3.27 from
* {author={Hankerson, Darrel and Menezes, Alfred J. and Vanstone, Scott},
* title = {Guide to Elliptic Curve Cryptography},
* year = {2003}, isbn = {038795273X},
* url = {http://www.cacr.math.uwaterloo.ca/ecc/},
* publisher = {Springer-Verlag New York, Inc.}}
*/
unsigned int nbits;
int j;
if (mpi_cmp(scalar, ctx->p) >= 0)
nbits = mpi_get_nbits(scalar);
else
nbits = mpi_get_nbits(ctx->p);
mpi_set_ui(result->x, 0);
mpi_set_ui(result->y, 1);
mpi_set_ui(result->z, 1);
point_resize(point, ctx);
point_resize(result, ctx);
point_resize(point, ctx);
for (j = nbits-1; j >= 0; j--) {
mpi_ec_dup_point(result, result, ctx);
if (mpi_test_bit(scalar, j))
mpi_ec_add_points(result, result, point, ctx);
}
return;
} else if (ctx->model == MPI_EC_MONTGOMERY) {
unsigned int nbits;
int j;
struct gcry_mpi_point p1_, p2_;
MPI_POINT q1, q2, prd, sum;
unsigned long sw;
mpi_size_t rsize;
/* Compute scalar point multiplication with Montgomery Ladder.
* Note that we don't use Y-coordinate in the points at all.
* RESULT->Y will be filled by zero.
*/
nbits = mpi_get_nbits(scalar);
point_init(&p1);
point_init(&p2);
point_init(&p1_);
point_init(&p2_);
mpi_set_ui(p1.x, 1);
mpi_free(p2.x);
p2.x = mpi_copy(point->x);
mpi_set_ui(p2.z, 1);
point_resize(&p1, ctx);
point_resize(&p2, ctx);
point_resize(&p1_, ctx);
point_resize(&p2_, ctx);
mpi_resize(point->x, ctx->p->nlimbs);
point->x->nlimbs = ctx->p->nlimbs;
q1 = &p1;
q2 = &p2;
prd = &p1_;
sum = &p2_;
for (j = nbits-1; j >= 0; j--) {
MPI_POINT t;
sw = mpi_test_bit(scalar, j);
point_swap_cond(q1, q2, sw, ctx);
montgomery_ladder(prd, sum, q1, q2, point->x, ctx);
point_swap_cond(prd, sum, sw, ctx);
t = q1; q1 = prd; prd = t;
t = q2; q2 = sum; sum = t;
}
mpi_clear(result->y);
sw = (nbits & 1);
point_swap_cond(&p1, &p1_, sw, ctx);
rsize = p1.z->nlimbs;
MPN_NORMALIZE(p1.z->d, rsize);
if (rsize == 0) {
mpi_set_ui(result->x, 1);
mpi_set_ui(result->z, 0);
} else {
z1 = mpi_new(0);
ec_invm(z1, p1.z, ctx);
ec_mulm(result->x, p1.x, z1, ctx);
mpi_set_ui(result->z, 1);
mpi_free(z1);
}
point_free(&p1);
point_free(&p2);
point_free(&p1_);
point_free(&p2_);
return;
}
x1 = mpi_alloc_like(ctx->p);
y1 = mpi_alloc_like(ctx->p);
h = mpi_alloc_like(ctx->p);
k = mpi_copy(scalar);
yy = mpi_copy(point->y);
if (mpi_has_sign(k)) {
k->sign = 0;
ec_invm(yy, yy, ctx);
}
if (!mpi_cmp_ui(point->z, 1)) {
mpi_set(x1, point->x);
mpi_set(y1, yy);
} else {
MPI z2, z3;
z2 = mpi_alloc_like(ctx->p);
z3 = mpi_alloc_like(ctx->p);
ec_mulm(z2, point->z, point->z, ctx);
ec_mulm(z3, point->z, z2, ctx);
ec_invm(z2, z2, ctx);
ec_mulm(x1, point->x, z2, ctx);
ec_invm(z3, z3, ctx);
ec_mulm(y1, yy, z3, ctx);
mpi_free(z2);
mpi_free(z3);
}
z1 = mpi_copy(mpi_const(MPI_C_ONE));
mpi_mul(h, k, mpi_const(MPI_C_THREE)); /* h = 3k */
loops = mpi_get_nbits(h);
if (loops < 2) {
/* If SCALAR is zero, the above mpi_mul sets H to zero and thus
* LOOPs will be zero. To avoid an underflow of I in the main
* loop we set LOOP to 2 and the result to (0,0,0).
*/
loops = 2;
mpi_clear(result->x);
mpi_clear(result->y);
mpi_clear(result->z);
} else {
mpi_set(result->x, point->x);
mpi_set(result->y, yy);
mpi_set(result->z, point->z);
}
mpi_free(yy); yy = NULL;
p1.x = x1; x1 = NULL;
p1.y = y1; y1 = NULL;
p1.z = z1; z1 = NULL;
point_init(&p2);
point_init(&p1inv);
/* Invert point: y = p - y mod p */
point_set(&p1inv, &p1);
ec_subm(p1inv.y, ctx->p, p1inv.y, ctx);
for (i = loops-2; i > 0; i--) {
mpi_ec_dup_point(result, result, ctx);
if (mpi_test_bit(h, i) == 1 && mpi_test_bit(k, i) == 0) {
point_set(&p2, result);
mpi_ec_add_points(result, &p2, &p1, ctx);
}
if (mpi_test_bit(h, i) == 0 && mpi_test_bit(k, i) == 1) {
point_set(&p2, result);
mpi_ec_add_points(result, &p2, &p1inv, ctx);
}
}
point_free(&p1);
point_free(&p2);
point_free(&p1inv);
mpi_free(h);
mpi_free(k);
}
EXPORT_SYMBOL_GPL(mpi_ec_mul_point);
/* Return true if POINT is on the curve described by CTX. */
int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx)
{
int res = 0;
MPI x, y, w;
x = mpi_new(0);
y = mpi_new(0);
w = mpi_new(0);
/* Check that the point is in range. This needs to be done here and
* not after conversion to affine coordinates.
*/
if (mpi_cmpabs(point->x, ctx->p) >= 0)
goto leave;
if (mpi_cmpabs(point->y, ctx->p) >= 0)
goto leave;
if (mpi_cmpabs(point->z, ctx->p) >= 0)
goto leave;
switch (ctx->model) {
case MPI_EC_WEIERSTRASS:
{
MPI xxx;
if (mpi_ec_get_affine(x, y, point, ctx))
goto leave;
xxx = mpi_new(0);
/* y^2 == x^3 + a·x + b */
ec_pow2(y, y, ctx);
ec_pow3(xxx, x, ctx);
ec_mulm(w, ctx->a, x, ctx);
ec_addm(w, w, ctx->b, ctx);
ec_addm(w, w, xxx, ctx);
if (!mpi_cmp(y, w))
res = 1;
mpi_free(xxx);
}
break;
case MPI_EC_MONTGOMERY:
{
#define xx y
/* With Montgomery curve, only X-coordinate is valid. */
if (mpi_ec_get_affine(x, NULL, point, ctx))
goto leave;
/* The equation is: b * y^2 == x^3 + a · x^2 + x */
/* We check if right hand is quadratic residue or not by
* Euler's criterion.
*/
/* CTX->A has (a-2)/4 and CTX->B has b^-1 */
ec_mulm(w, ctx->a, mpi_const(MPI_C_FOUR), ctx);
ec_addm(w, w, mpi_const(MPI_C_TWO), ctx);
ec_mulm(w, w, x, ctx);
ec_pow2(xx, x, ctx);
ec_addm(w, w, xx, ctx);
ec_addm(w, w, mpi_const(MPI_C_ONE), ctx);
ec_mulm(w, w, x, ctx);
ec_mulm(w, w, ctx->b, ctx);
#undef xx
/* Compute Euler's criterion: w^(p-1)/2 */
#define p_minus1 y
ec_subm(p_minus1, ctx->p, mpi_const(MPI_C_ONE), ctx);
mpi_rshift(p_minus1, p_minus1, 1);
ec_powm(w, w, p_minus1, ctx);
res = !mpi_cmp_ui(w, 1);
#undef p_minus1
}
break;
case MPI_EC_EDWARDS:
{
if (mpi_ec_get_affine(x, y, point, ctx))
goto leave;
mpi_resize(w, ctx->p->nlimbs);
w->nlimbs = ctx->p->nlimbs;
/* a · x^2 + y^2 - 1 - b · x^2 · y^2 == 0 */
ctx->pow2(x, x, ctx);
ctx->pow2(y, y, ctx);
if (ctx->dialect == ECC_DIALECT_ED25519)
ctx->subm(w, ctx->p, x, ctx);
else
ctx->mulm(w, ctx->a, x, ctx);
ctx->addm(w, w, y, ctx);
ctx->mulm(x, x, y, ctx);
ctx->mulm(x, x, ctx->b, ctx);
ctx->subm(w, w, x, ctx);
if (!mpi_cmp_ui(w, 1))
res = 1;
}
break;
}
leave:
mpi_free(w);
mpi_free(x);
mpi_free(y);
return res;
}
EXPORT_SYMBOL_GPL(mpi_ec_curve_point);
| linux-master | lib/crypto/mpi/ec.c |
/* mpi-mod.c - Modular reduction
* Copyright (C) 1998, 1999, 2001, 2002, 2003,
* 2007 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*/
#include "mpi-internal.h"
#include "longlong.h"
/* Context used with Barrett reduction. */
struct barrett_ctx_s {
MPI m; /* The modulus - may not be modified. */
int m_copied; /* If true, M needs to be released. */
int k;
MPI y;
MPI r1; /* Helper MPI. */
MPI r2; /* Helper MPI. */
MPI r3; /* Helper MPI allocated on demand. */
};
void mpi_mod(MPI rem, MPI dividend, MPI divisor)
{
mpi_fdiv_r(rem, dividend, divisor);
}
/* This function returns a new context for Barrett based operations on
* the modulus M. This context needs to be released using
* _gcry_mpi_barrett_free. If COPY is true M will be transferred to
* the context and the user may change M. If COPY is false, M may not
* be changed until gcry_mpi_barrett_free has been called.
*/
mpi_barrett_t mpi_barrett_init(MPI m, int copy)
{
mpi_barrett_t ctx;
MPI tmp;
mpi_normalize(m);
ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
if (copy) {
ctx->m = mpi_copy(m);
ctx->m_copied = 1;
} else
ctx->m = m;
ctx->k = mpi_get_nlimbs(m);
tmp = mpi_alloc(ctx->k + 1);
/* Barrett precalculation: y = floor(b^(2k) / m). */
mpi_set_ui(tmp, 1);
mpi_lshift_limbs(tmp, 2 * ctx->k);
mpi_fdiv_q(tmp, tmp, m);
ctx->y = tmp;
ctx->r1 = mpi_alloc(2 * ctx->k + 1);
ctx->r2 = mpi_alloc(2 * ctx->k + 1);
return ctx;
}
void mpi_barrett_free(mpi_barrett_t ctx)
{
if (ctx) {
mpi_free(ctx->y);
mpi_free(ctx->r1);
mpi_free(ctx->r2);
if (ctx->r3)
mpi_free(ctx->r3);
if (ctx->m_copied)
mpi_free(ctx->m);
kfree(ctx);
}
}
/* R = X mod M
*
* Using Barrett reduction. Before using this function
* _gcry_mpi_barrett_init must have been called to do the
* precalculations. CTX is the context created by this precalculation
* and also conveys M. If the Barret reduction could no be done a
* straightforward reduction method is used.
*
* We assume that these conditions are met:
* Input: x =(x_2k-1 ...x_0)_b
* m =(m_k-1 ....m_0)_b with m_k-1 != 0
* Output: r = x mod m
*/
void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
{
MPI m = ctx->m;
int k = ctx->k;
MPI y = ctx->y;
MPI r1 = ctx->r1;
MPI r2 = ctx->r2;
int sign;
mpi_normalize(x);
if (mpi_get_nlimbs(x) > 2*k) {
mpi_mod(r, x, m);
return;
}
sign = x->sign;
x->sign = 0;
/* 1. q1 = floor( x / b^k-1)
* q2 = q1 * y
* q3 = floor( q2 / b^k+1 )
* Actually, we don't need qx, we can work direct on r2
*/
mpi_set(r2, x);
mpi_rshift_limbs(r2, k-1);
mpi_mul(r2, r2, y);
mpi_rshift_limbs(r2, k+1);
/* 2. r1 = x mod b^k+1
* r2 = q3 * m mod b^k+1
* r = r1 - r2
* 3. if r < 0 then r = r + b^k+1
*/
mpi_set(r1, x);
if (r1->nlimbs > k+1) /* Quick modulo operation. */
r1->nlimbs = k+1;
mpi_mul(r2, r2, m);
if (r2->nlimbs > k+1) /* Quick modulo operation. */
r2->nlimbs = k+1;
mpi_sub(r, r1, r2);
if (mpi_has_sign(r)) {
if (!ctx->r3) {
ctx->r3 = mpi_alloc(k + 2);
mpi_set_ui(ctx->r3, 1);
mpi_lshift_limbs(ctx->r3, k + 1);
}
mpi_add(r, r, ctx->r3);
}
/* 4. while r >= m do r = r - m */
while (mpi_cmp(r, m) >= 0)
mpi_sub(r, r, m);
x->sign = sign;
}
void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
{
mpi_mul(w, u, v);
mpi_mod_barrett(w, w, ctx);
}
| linux-master | lib/crypto/mpi/mpi-mod.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-mul_1.c - MPI helper functions
* Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
mpi_limb_t
mpihelp_mul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size,
mpi_limb_t s2_limb)
{
mpi_limb_t cy_limb;
mpi_size_t j;
mpi_limb_t prod_high, prod_low;
/* The loop counter and index J goes from -S1_SIZE to -1. This way
* the loop becomes faster. */
j = -s1_size;
/* Offset the base pointers to compensate for the negative indices. */
s1_ptr -= j;
res_ptr -= j;
cy_limb = 0;
do {
umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
prod_low += cy_limb;
cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
res_ptr[j] = prod_low;
} while (++j);
return cy_limb;
}
| linux-master | lib/crypto/mpi/generic_mpih-mul1.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-add_2.c - MPI helper functions
* Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
mpi_limb_t
mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_ptr_t s2_ptr, mpi_size_t size)
{
mpi_limb_t x, y, cy;
mpi_size_t j;
/* The loop counter and index J goes from -SIZE to -1. This way
the loop becomes faster. */
j = -size;
/* Offset the base pointers to compensate for the negative indices. */
s1_ptr -= j;
s2_ptr -= j;
res_ptr -= j;
cy = 0;
do {
y = s2_ptr[j];
x = s1_ptr[j];
y += cy; /* add previous carry to subtrahend */
cy = y < cy; /* get out carry from that addition */
y = x - y; /* main subtract */
cy += y > x; /* get out carry from the subtract, combine */
res_ptr[j] = y;
} while (++j);
return cy;
}
| linux-master | lib/crypto/mpi/generic_mpih-sub1.c |
/* mpiutil.ac - Utility functions for MPI
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "mpi-internal.h"
/* Constants allocated right away at startup. */
static MPI constants[MPI_NUMBER_OF_CONSTANTS];
/* Initialize the MPI subsystem. This is called early and allows to
* do some initialization without taking care of threading issues.
*/
static int __init mpi_init(void)
{
int idx;
unsigned long value;
for (idx = 0; idx < MPI_NUMBER_OF_CONSTANTS; idx++) {
switch (idx) {
case MPI_C_ZERO:
value = 0;
break;
case MPI_C_ONE:
value = 1;
break;
case MPI_C_TWO:
value = 2;
break;
case MPI_C_THREE:
value = 3;
break;
case MPI_C_FOUR:
value = 4;
break;
case MPI_C_EIGHT:
value = 8;
break;
default:
pr_err("MPI: invalid mpi_const selector %d\n", idx);
return -EFAULT;
}
constants[idx] = mpi_alloc_set_ui(value);
constants[idx]->flags = (16|32);
}
return 0;
}
postcore_initcall(mpi_init);
/* Return a constant MPI descripbed by NO which is one of the
* MPI_C_xxx macros. There is no need to copy this returned value; it
* may be used directly.
*/
MPI mpi_const(enum gcry_mpi_constants no)
{
if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
pr_err("MPI: invalid mpi_const selector %d\n", no);
if (!constants[no])
pr_err("MPI: MPI subsystem not initialized\n");
return constants[no];
}
EXPORT_SYMBOL_GPL(mpi_const);
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
* integers of n bits - So we should change this to bits (or bytes).
*
* But mpi_alloc is used in a lot of places :-)
*/
MPI mpi_alloc(unsigned nlimbs)
{
MPI a;
a = kmalloc(sizeof *a, GFP_KERNEL);
if (!a)
return a;
if (nlimbs) {
a->d = mpi_alloc_limb_space(nlimbs);
if (!a->d) {
kfree(a);
return NULL;
}
} else {
a->d = NULL;
}
a->alloced = nlimbs;
a->nlimbs = 0;
a->sign = 0;
a->flags = 0;
a->nbits = 0;
return a;
}
EXPORT_SYMBOL_GPL(mpi_alloc);
mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
{
size_t len = nlimbs * sizeof(mpi_limb_t);
if (!len)
return NULL;
return kmalloc(len, GFP_KERNEL);
}
void mpi_free_limb_space(mpi_ptr_t a)
{
if (!a)
return;
kfree_sensitive(a);
}
void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs)
{
mpi_free_limb_space(a->d);
a->d = ap;
a->alloced = nlimbs;
}
/****************
* Resize the array of A to NLIMBS. the additional space is cleared
* (set to 0) [done by m_realloc()]
*/
int mpi_resize(MPI a, unsigned nlimbs)
{
void *p;
if (nlimbs <= a->alloced)
return 0; /* no need to do it */
if (a->d) {
p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
kfree_sensitive(a->d);
a->d = p;
} else {
a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!a->d)
return -ENOMEM;
}
a->alloced = nlimbs;
return 0;
}
void mpi_clear(MPI a)
{
if (!a)
return;
a->nlimbs = 0;
a->flags = 0;
}
EXPORT_SYMBOL_GPL(mpi_clear);
void mpi_free(MPI a)
{
if (!a)
return;
if (a->flags & 4)
kfree_sensitive(a->d);
else
mpi_free_limb_space(a->d);
if (a->flags & ~7)
pr_info("invalid flag value in mpi\n");
kfree(a);
}
EXPORT_SYMBOL_GPL(mpi_free);
/****************
* Note: This copy function should not interpret the MPI
* but copy it transparently.
*/
MPI mpi_copy(MPI a)
{
int i;
MPI b;
if (a) {
b = mpi_alloc(a->nlimbs);
b->nlimbs = a->nlimbs;
b->sign = a->sign;
b->flags = a->flags;
b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
for (i = 0; i < b->nlimbs; i++)
b->d[i] = a->d[i];
} else
b = NULL;
return b;
}
/****************
* This function allocates an MPI which is optimized to hold
* a value as large as the one given in the argument and allocates it
* with the same flags as A.
*/
MPI mpi_alloc_like(MPI a)
{
MPI b;
if (a) {
b = mpi_alloc(a->nlimbs);
b->nlimbs = 0;
b->sign = 0;
b->flags = a->flags;
} else
b = NULL;
return b;
}
/* Set U into W and release U. If W is NULL only U will be released. */
void mpi_snatch(MPI w, MPI u)
{
if (w) {
mpi_assign_limb_space(w, u->d, u->alloced);
w->nlimbs = u->nlimbs;
w->sign = u->sign;
w->flags = u->flags;
u->alloced = 0;
u->nlimbs = 0;
u->d = NULL;
}
mpi_free(u);
}
MPI mpi_set(MPI w, MPI u)
{
mpi_ptr_t wp, up;
mpi_size_t usize = u->nlimbs;
int usign = u->sign;
if (!w)
w = mpi_alloc(mpi_get_nlimbs(u));
RESIZE_IF_NEEDED(w, usize);
wp = w->d;
up = u->d;
MPN_COPY(wp, up, usize);
w->nlimbs = usize;
w->flags = u->flags;
w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
w->sign = usign;
return w;
}
EXPORT_SYMBOL_GPL(mpi_set);
MPI mpi_set_ui(MPI w, unsigned long u)
{
if (!w)
w = mpi_alloc(1);
/* FIXME: If U is 0 we have no need to resize and thus possible
* allocating the limbs.
*/
RESIZE_IF_NEEDED(w, 1);
w->d[0] = u;
w->nlimbs = u ? 1 : 0;
w->sign = 0;
w->flags = 0;
return w;
}
EXPORT_SYMBOL_GPL(mpi_set_ui);
MPI mpi_alloc_set_ui(unsigned long u)
{
MPI w = mpi_alloc(1);
w->d[0] = u;
w->nlimbs = u ? 1 : 0;
w->sign = 0;
return w;
}
/****************
* Swap the value of A and B, when SWAP is 1.
* Leave the value when SWAP is 0.
* This implementation should be constant-time regardless of SWAP.
*/
void mpi_swap_cond(MPI a, MPI b, unsigned long swap)
{
mpi_size_t i;
mpi_size_t nlimbs;
mpi_limb_t mask = ((mpi_limb_t)0) - swap;
mpi_limb_t x;
if (a->alloced > b->alloced)
nlimbs = b->alloced;
else
nlimbs = a->alloced;
if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
return;
for (i = 0; i < nlimbs; i++) {
x = mask & (a->d[i] ^ b->d[i]);
a->d[i] = a->d[i] ^ x;
b->d[i] = b->d[i] ^ x;
}
x = mask & (a->nlimbs ^ b->nlimbs);
a->nlimbs = a->nlimbs ^ x;
b->nlimbs = b->nlimbs ^ x;
x = mask & (a->sign ^ b->sign);
a->sign = a->sign ^ x;
b->sign = b->sign ^ x;
}
MODULE_DESCRIPTION("Multiprecision maths library");
MODULE_LICENSE("GPL");
| linux-master | lib/crypto/mpi/mpiutil.c |
/* mpi-mul.c - MPI functions
* Copyright (C) 1994, 1996, 1998, 2001, 2002,
* 2003 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "mpi-internal.h"
void mpi_mul(MPI w, MPI u, MPI v)
{
mpi_size_t usize, vsize, wsize;
mpi_ptr_t up, vp, wp;
mpi_limb_t cy;
int usign, vsign, sign_product;
int assign_wp = 0;
mpi_ptr_t tmp_limb = NULL;
if (u->nlimbs < v->nlimbs) {
/* Swap U and V. */
usize = v->nlimbs;
usign = v->sign;
up = v->d;
vsize = u->nlimbs;
vsign = u->sign;
vp = u->d;
} else {
usize = u->nlimbs;
usign = u->sign;
up = u->d;
vsize = v->nlimbs;
vsign = v->sign;
vp = v->d;
}
sign_product = usign ^ vsign;
wp = w->d;
/* Ensure W has space enough to store the result. */
wsize = usize + vsize;
if (w->alloced < wsize) {
if (wp == up || wp == vp) {
wp = mpi_alloc_limb_space(wsize);
assign_wp = 1;
} else {
mpi_resize(w, wsize);
wp = w->d;
}
} else { /* Make U and V not overlap with W. */
if (wp == up) {
/* W and U are identical. Allocate temporary space for U. */
up = tmp_limb = mpi_alloc_limb_space(usize);
/* Is V identical too? Keep it identical with U. */
if (wp == vp)
vp = up;
/* Copy to the temporary space. */
MPN_COPY(up, wp, usize);
} else if (wp == vp) {
/* W and V are identical. Allocate temporary space for V. */
vp = tmp_limb = mpi_alloc_limb_space(vsize);
/* Copy to the temporary space. */
MPN_COPY(vp, wp, vsize);
}
}
if (!vsize)
wsize = 0;
else {
mpihelp_mul(wp, up, usize, vp, vsize, &cy);
wsize -= cy ? 0:1;
}
if (assign_wp)
mpi_assign_limb_space(w, wp, wsize);
w->nlimbs = wsize;
w->sign = sign_product;
if (tmp_limb)
mpi_free_limb_space(tmp_limb);
}
EXPORT_SYMBOL_GPL(mpi_mul);
void mpi_mulm(MPI w, MPI u, MPI v, MPI m)
{
mpi_mul(w, u, v);
mpi_tdiv_r(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_mulm);
| linux-master | lib/crypto/mpi/mpi-mul.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-mul.c - MPI helper functions
* Copyright (C) 1994, 1996, 1998, 1999,
* 2000 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include <linux/string.h>
#include "mpi-internal.h"
#include "longlong.h"
#define MPN_MUL_N_RECURSE(prodp, up, vp, size, tspace) \
do { \
if ((size) < KARATSUBA_THRESHOLD) \
mul_n_basecase(prodp, up, vp, size); \
else \
mul_n(prodp, up, vp, size, tspace); \
} while (0);
#define MPN_SQR_N_RECURSE(prodp, up, size, tspace) \
do { \
if ((size) < KARATSUBA_THRESHOLD) \
mpih_sqr_n_basecase(prodp, up, size); \
else \
mpih_sqr_n(prodp, up, size, tspace); \
} while (0);
/* Multiply the natural numbers u (pointed to by UP) and v (pointed to by VP),
* both with SIZE limbs, and store the result at PRODP. 2 * SIZE limbs are
* always stored. Return the most significant limb.
*
* Argument constraints:
* 1. PRODP != UP and PRODP != VP, i.e. the destination
* must be distinct from the multiplier and the multiplicand.
*
*
* Handle simple cases with traditional multiplication.
*
* This is the most critical code of multiplication. All multiplies rely
* on this, both small and huge. Small ones arrive here immediately. Huge
* ones arrive here as this is the base case for Karatsuba's recursive
* algorithm below.
*/
static mpi_limb_t
mul_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
{
mpi_size_t i;
mpi_limb_t cy;
mpi_limb_t v_limb;
/* Multiply by the first limb in V separately, as the result can be
* stored (not added) to PROD. We also avoid a loop for zeroing. */
v_limb = vp[0];
if (v_limb <= 1) {
if (v_limb == 1)
MPN_COPY(prodp, up, size);
else
MPN_ZERO(prodp, size);
cy = 0;
} else
cy = mpihelp_mul_1(prodp, up, size, v_limb);
prodp[size] = cy;
prodp++;
/* For each iteration in the outer loop, multiply one limb from
* U with one limb from V, and add it to PROD. */
for (i = 1; i < size; i++) {
v_limb = vp[i];
if (v_limb <= 1) {
cy = 0;
if (v_limb == 1)
cy = mpihelp_add_n(prodp, prodp, up, size);
} else
cy = mpihelp_addmul_1(prodp, up, size, v_limb);
prodp[size] = cy;
prodp++;
}
return cy;
}
static void
mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp,
mpi_size_t size, mpi_ptr_t tspace)
{
if (size & 1) {
/* The size is odd, and the code below doesn't handle that.
* Multiply the least significant (size - 1) limbs with a recursive
* call, and handle the most significant limb of S1 and S2
* separately.
* A slightly faster way to do this would be to make the Karatsuba
* code below behave as if the size were even, and let it check for
* odd size in the end. I.e., in essence move this code to the end.
* Doing so would save us a recursive call, and potentially make the
* stack grow a lot less.
*/
mpi_size_t esize = size - 1; /* even size */
mpi_limb_t cy_limb;
MPN_MUL_N_RECURSE(prodp, up, vp, esize, tspace);
cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, vp[esize]);
prodp[esize + esize] = cy_limb;
cy_limb = mpihelp_addmul_1(prodp + esize, vp, size, up[esize]);
prodp[esize + size] = cy_limb;
} else {
/* Anatolij Alekseevich Karatsuba's divide-and-conquer algorithm.
*
* Split U in two pieces, U1 and U0, such that
* U = U0 + U1*(B**n),
* and V in V1 and V0, such that
* V = V0 + V1*(B**n).
*
* UV is then computed recursively using the identity
*
* 2n n n n
* UV = (B + B )U V + B (U -U )(V -V ) + (B + 1)U V
* 1 1 1 0 0 1 0 0
*
* Where B = 2**BITS_PER_MP_LIMB.
*/
mpi_size_t hsize = size >> 1;
mpi_limb_t cy;
int negflg;
/* Product H. ________________ ________________
* |_____U1 x V1____||____U0 x V0_____|
* Put result in upper part of PROD and pass low part of TSPACE
* as new TSPACE.
*/
MPN_MUL_N_RECURSE(prodp + size, up + hsize, vp + hsize, hsize,
tspace);
/* Product M. ________________
* |_(U1-U0)(V0-V1)_|
*/
if (mpihelp_cmp(up + hsize, up, hsize) >= 0) {
mpihelp_sub_n(prodp, up + hsize, up, hsize);
negflg = 0;
} else {
mpihelp_sub_n(prodp, up, up + hsize, hsize);
negflg = 1;
}
if (mpihelp_cmp(vp + hsize, vp, hsize) >= 0) {
mpihelp_sub_n(prodp + hsize, vp + hsize, vp, hsize);
negflg ^= 1;
} else {
mpihelp_sub_n(prodp + hsize, vp, vp + hsize, hsize);
/* No change of NEGFLG. */
}
/* Read temporary operands from low part of PROD.
* Put result in low part of TSPACE using upper part of TSPACE
* as new TSPACE.
*/
MPN_MUL_N_RECURSE(tspace, prodp, prodp + hsize, hsize,
tspace + size);
/* Add/copy product H. */
MPN_COPY(prodp + hsize, prodp + size, hsize);
cy = mpihelp_add_n(prodp + size, prodp + size,
prodp + size + hsize, hsize);
/* Add product M (if NEGFLG M is a negative number) */
if (negflg)
cy -=
mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace,
size);
else
cy +=
mpihelp_add_n(prodp + hsize, prodp + hsize, tspace,
size);
/* Product L. ________________ ________________
* |________________||____U0 x V0_____|
* Read temporary operands from low part of PROD.
* Put result in low part of TSPACE using upper part of TSPACE
* as new TSPACE.
*/
MPN_MUL_N_RECURSE(tspace, up, vp, hsize, tspace + size);
/* Add/copy Product L (twice) */
cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
if (cy)
mpihelp_add_1(prodp + hsize + size,
prodp + hsize + size, hsize, cy);
MPN_COPY(prodp, tspace, hsize);
cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
hsize);
if (cy)
mpihelp_add_1(prodp + size, prodp + size, size, 1);
}
}
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size)
{
mpi_size_t i;
mpi_limb_t cy_limb;
mpi_limb_t v_limb;
/* Multiply by the first limb in V separately, as the result can be
* stored (not added) to PROD. We also avoid a loop for zeroing. */
v_limb = up[0];
if (v_limb <= 1) {
if (v_limb == 1)
MPN_COPY(prodp, up, size);
else
MPN_ZERO(prodp, size);
cy_limb = 0;
} else
cy_limb = mpihelp_mul_1(prodp, up, size, v_limb);
prodp[size] = cy_limb;
prodp++;
/* For each iteration in the outer loop, multiply one limb from
* U with one limb from V, and add it to PROD. */
for (i = 1; i < size; i++) {
v_limb = up[i];
if (v_limb <= 1) {
cy_limb = 0;
if (v_limb == 1)
cy_limb = mpihelp_add_n(prodp, prodp, up, size);
} else
cy_limb = mpihelp_addmul_1(prodp, up, size, v_limb);
prodp[size] = cy_limb;
prodp++;
}
}
void
mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
{
if (size & 1) {
/* The size is odd, and the code below doesn't handle that.
* Multiply the least significant (size - 1) limbs with a recursive
* call, and handle the most significant limb of S1 and S2
* separately.
* A slightly faster way to do this would be to make the Karatsuba
* code below behave as if the size were even, and let it check for
* odd size in the end. I.e., in essence move this code to the end.
* Doing so would save us a recursive call, and potentially make the
* stack grow a lot less.
*/
mpi_size_t esize = size - 1; /* even size */
mpi_limb_t cy_limb;
MPN_SQR_N_RECURSE(prodp, up, esize, tspace);
cy_limb = mpihelp_addmul_1(prodp + esize, up, esize, up[esize]);
prodp[esize + esize] = cy_limb;
cy_limb = mpihelp_addmul_1(prodp + esize, up, size, up[esize]);
prodp[esize + size] = cy_limb;
} else {
mpi_size_t hsize = size >> 1;
mpi_limb_t cy;
/* Product H. ________________ ________________
* |_____U1 x U1____||____U0 x U0_____|
* Put result in upper part of PROD and pass low part of TSPACE
* as new TSPACE.
*/
MPN_SQR_N_RECURSE(prodp + size, up + hsize, hsize, tspace);
/* Product M. ________________
* |_(U1-U0)(U0-U1)_|
*/
if (mpihelp_cmp(up + hsize, up, hsize) >= 0)
mpihelp_sub_n(prodp, up + hsize, up, hsize);
else
mpihelp_sub_n(prodp, up, up + hsize, hsize);
/* Read temporary operands from low part of PROD.
* Put result in low part of TSPACE using upper part of TSPACE
* as new TSPACE. */
MPN_SQR_N_RECURSE(tspace, prodp, hsize, tspace + size);
/* Add/copy product H */
MPN_COPY(prodp + hsize, prodp + size, hsize);
cy = mpihelp_add_n(prodp + size, prodp + size,
prodp + size + hsize, hsize);
/* Add product M (if NEGFLG M is a negative number). */
cy -= mpihelp_sub_n(prodp + hsize, prodp + hsize, tspace, size);
/* Product L. ________________ ________________
* |________________||____U0 x U0_____|
* Read temporary operands from low part of PROD.
* Put result in low part of TSPACE using upper part of TSPACE
* as new TSPACE. */
MPN_SQR_N_RECURSE(tspace, up, hsize, tspace + size);
/* Add/copy Product L (twice). */
cy += mpihelp_add_n(prodp + hsize, prodp + hsize, tspace, size);
if (cy)
mpihelp_add_1(prodp + hsize + size,
prodp + hsize + size, hsize, cy);
MPN_COPY(prodp, tspace, hsize);
cy = mpihelp_add_n(prodp + hsize, prodp + hsize, tspace + hsize,
hsize);
if (cy)
mpihelp_add_1(prodp + size, prodp + size, size, 1);
}
}
void mpihelp_mul_n(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
{
if (up == vp) {
if (size < KARATSUBA_THRESHOLD)
mpih_sqr_n_basecase(prodp, up, size);
else {
mpi_ptr_t tspace;
tspace = mpi_alloc_limb_space(2 * size);
mpih_sqr_n(prodp, up, size, tspace);
mpi_free_limb_space(tspace);
}
} else {
if (size < KARATSUBA_THRESHOLD)
mul_n_basecase(prodp, up, vp, size);
else {
mpi_ptr_t tspace;
tspace = mpi_alloc_limb_space(2 * size);
mul_n(prodp, up, vp, size, tspace);
mpi_free_limb_space(tspace);
}
}
}
int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
mpi_ptr_t vp, mpi_size_t vsize,
struct karatsuba_ctx *ctx)
{
mpi_limb_t cy;
if (!ctx->tspace || ctx->tspace_size < vsize) {
if (ctx->tspace)
mpi_free_limb_space(ctx->tspace);
ctx->tspace = mpi_alloc_limb_space(2 * vsize);
if (!ctx->tspace)
return -ENOMEM;
ctx->tspace_size = vsize;
}
MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace);
prodp += vsize;
up += vsize;
usize -= vsize;
if (usize >= vsize) {
if (!ctx->tp || ctx->tp_size < vsize) {
if (ctx->tp)
mpi_free_limb_space(ctx->tp);
ctx->tp = mpi_alloc_limb_space(2 * vsize);
if (!ctx->tp) {
if (ctx->tspace)
mpi_free_limb_space(ctx->tspace);
ctx->tspace = NULL;
return -ENOMEM;
}
ctx->tp_size = vsize;
}
do {
MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace);
cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize);
mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize,
cy);
prodp += vsize;
up += vsize;
usize -= vsize;
} while (usize >= vsize);
}
if (usize) {
if (usize < KARATSUBA_THRESHOLD) {
mpi_limb_t tmp;
if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp)
< 0)
return -ENOMEM;
} else {
if (!ctx->next) {
ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx->next)
return -ENOMEM;
}
if (mpihelp_mul_karatsuba_case(ctx->tspace,
vp, vsize,
up, usize,
ctx->next) < 0)
return -ENOMEM;
}
cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize);
mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy);
}
return 0;
}
void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx)
{
struct karatsuba_ctx *ctx2;
if (ctx->tp)
mpi_free_limb_space(ctx->tp);
if (ctx->tspace)
mpi_free_limb_space(ctx->tspace);
for (ctx = ctx->next; ctx; ctx = ctx2) {
ctx2 = ctx->next;
if (ctx->tp)
mpi_free_limb_space(ctx->tp);
if (ctx->tspace)
mpi_free_limb_space(ctx->tspace);
kfree(ctx);
}
}
/* Multiply the natural numbers u (pointed to by UP, with USIZE limbs)
* and v (pointed to by VP, with VSIZE limbs), and store the result at
* PRODP. USIZE + VSIZE limbs are always stored, but if the input
* operands are normalized. Return the most significant limb of the
* result.
*
* NOTE: The space pointed to by PRODP is overwritten before finished
* with U and V, so overlap is an error.
*
* Argument constraints:
* 1. USIZE >= VSIZE.
* 2. PRODP != UP and PRODP != VP, i.e. the destination
* must be distinct from the multiplier and the multiplicand.
*/
int
mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
mpi_ptr_t vp, mpi_size_t vsize, mpi_limb_t *_result)
{
mpi_ptr_t prod_endp = prodp + usize + vsize - 1;
mpi_limb_t cy;
struct karatsuba_ctx ctx;
if (vsize < KARATSUBA_THRESHOLD) {
mpi_size_t i;
mpi_limb_t v_limb;
if (!vsize) {
*_result = 0;
return 0;
}
/* Multiply by the first limb in V separately, as the result can be
* stored (not added) to PROD. We also avoid a loop for zeroing. */
v_limb = vp[0];
if (v_limb <= 1) {
if (v_limb == 1)
MPN_COPY(prodp, up, usize);
else
MPN_ZERO(prodp, usize);
cy = 0;
} else
cy = mpihelp_mul_1(prodp, up, usize, v_limb);
prodp[usize] = cy;
prodp++;
/* For each iteration in the outer loop, multiply one limb from
* U with one limb from V, and add it to PROD. */
for (i = 1; i < vsize; i++) {
v_limb = vp[i];
if (v_limb <= 1) {
cy = 0;
if (v_limb == 1)
cy = mpihelp_add_n(prodp, prodp, up,
usize);
} else
cy = mpihelp_addmul_1(prodp, up, usize, v_limb);
prodp[usize] = cy;
prodp++;
}
*_result = cy;
return 0;
}
memset(&ctx, 0, sizeof ctx);
if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0)
return -ENOMEM;
mpihelp_release_karatsuba_ctx(&ctx);
*_result = *prod_endp;
return 0;
}
| linux-master | lib/crypto/mpi/mpih-mul.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpih-rshift.c - MPI helper functions
* Copyright (C) 1994, 1996, 1998, 1999,
* 2000, 2001 Free Software Foundation, Inc.
*
* This file is part of GNUPG
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
/* Shift U (pointed to by UP and USIZE limbs long) CNT bits to the right
* and store the USIZE least significant limbs of the result at WP.
* The bits shifted out to the right are returned.
*
* Argument constraints:
* 1. 0 < CNT < BITS_PER_MP_LIMB
* 2. If the result is to be written over the input, WP must be <= UP.
*/
mpi_limb_t
mpihelp_rshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned cnt)
{
mpi_limb_t high_limb, low_limb;
unsigned sh_1, sh_2;
mpi_size_t i;
mpi_limb_t retval;
sh_1 = cnt;
wp -= 1;
sh_2 = BITS_PER_MPI_LIMB - sh_1;
high_limb = up[0];
retval = high_limb << sh_2;
low_limb = high_limb;
for (i = 1; i < usize; i++) {
high_limb = up[i];
wp[i] = (low_limb >> sh_1) | (high_limb << sh_2);
low_limb = high_limb;
}
wp[i] = low_limb >> sh_1;
return retval;
}
| linux-master | lib/crypto/mpi/generic_mpih-rshift.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-sub.c - MPI helper functions
* Copyright (C) 1994, 1996 Free Software Foundation, Inc.
* Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
/****************
* Compare OP1_PTR/OP1_SIZE with OP2_PTR/OP2_SIZE.
* There are no restrictions on the relative sizes of
* the two arguments.
* Return 1 if OP1 > OP2, 0 if they are equal, and -1 if OP1 < OP2.
*/
int mpihelp_cmp(mpi_ptr_t op1_ptr, mpi_ptr_t op2_ptr, mpi_size_t size)
{
mpi_size_t i;
mpi_limb_t op1_word, op2_word;
for (i = size - 1; i >= 0; i--) {
op1_word = op1_ptr[i];
op2_word = op2_ptr[i];
if (op1_word != op2_word)
goto diff;
}
return 0;
diff:
/* This can *not* be simplified to
* op2_word - op2_word
* since that expression might give signed overflow. */
return (op1_word > op2_word) ? 1 : -1;
}
| linux-master | lib/crypto/mpi/mpih-cmp.c |
/* mpi-cmp.c - MPI functions
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "mpi-internal.h"
int mpi_cmp_ui(MPI u, unsigned long v)
{
mpi_limb_t limb = v;
mpi_normalize(u);
if (u->nlimbs == 0) {
if (v == 0)
return 0;
else
return -1;
}
if (u->sign)
return -1;
if (u->nlimbs > 1)
return 1;
if (u->d[0] == limb)
return 0;
else if (u->d[0] > limb)
return 1;
else
return -1;
}
EXPORT_SYMBOL_GPL(mpi_cmp_ui);
static int do_mpi_cmp(MPI u, MPI v, int absmode)
{
mpi_size_t usize;
mpi_size_t vsize;
int usign;
int vsign;
int cmp;
mpi_normalize(u);
mpi_normalize(v);
usize = u->nlimbs;
vsize = v->nlimbs;
usign = absmode ? 0 : u->sign;
vsign = absmode ? 0 : v->sign;
/* Compare sign bits. */
if (!usign && vsign)
return 1;
if (usign && !vsign)
return -1;
/* U and V are either both positive or both negative. */
if (usize != vsize && !usign && !vsign)
return usize - vsize;
if (usize != vsize && usign && vsign)
return vsize + usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
if (!cmp)
return 0;
if ((cmp < 0?1:0) == (usign?1:0))
return 1;
return -1;
}
int mpi_cmp(MPI u, MPI v)
{
return do_mpi_cmp(u, v, 0);
}
EXPORT_SYMBOL_GPL(mpi_cmp);
int mpi_cmpabs(MPI u, MPI v)
{
return do_mpi_cmp(u, v, 1);
}
EXPORT_SYMBOL_GPL(mpi_cmpabs);
| linux-master | lib/crypto/mpi/mpi-cmp.c |
/* mpi-bit.c - MPI bit level functions
* Copyright (C) 1998, 1999 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "mpi-internal.h"
#include "longlong.h"
#define A_LIMB_1 ((mpi_limb_t) 1)
/****************
* Sometimes we have MSL (most significant limbs) which are 0;
* this is for some reasons not good, so this function removes them.
*/
void mpi_normalize(MPI a)
{
for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
;
}
EXPORT_SYMBOL_GPL(mpi_normalize);
/****************
* Return the number of bits in A.
*/
unsigned mpi_get_nbits(MPI a)
{
unsigned n;
mpi_normalize(a);
if (a->nlimbs) {
mpi_limb_t alimb = a->d[a->nlimbs - 1];
if (alimb)
n = count_leading_zeros(alimb);
else
n = BITS_PER_MPI_LIMB;
n = BITS_PER_MPI_LIMB - n + (a->nlimbs - 1) * BITS_PER_MPI_LIMB;
} else
n = 0;
return n;
}
EXPORT_SYMBOL_GPL(mpi_get_nbits);
/****************
* Test whether bit N is set.
*/
int mpi_test_bit(MPI a, unsigned int n)
{
unsigned int limbno, bitno;
mpi_limb_t limb;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return 0; /* too far left: this is a 0 */
limb = a->d[limbno];
return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
}
EXPORT_SYMBOL_GPL(mpi_test_bit);
/****************
* Set bit N of A.
*/
void mpi_set_bit(MPI a, unsigned int n)
{
unsigned int i, limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs) {
for (i = a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
mpi_resize(a, limbno+1);
a->nlimbs = limbno+1;
}
a->d[limbno] |= (A_LIMB_1<<bitno);
}
/****************
* Set bit N of A. and clear all bits above
*/
void mpi_set_highbit(MPI a, unsigned int n)
{
unsigned int i, limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs) {
for (i = a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
mpi_resize(a, limbno+1);
a->nlimbs = limbno+1;
}
a->d[limbno] |= (A_LIMB_1<<bitno);
for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno+1;
}
EXPORT_SYMBOL_GPL(mpi_set_highbit);
/****************
* clear bit N of A and all bits above
*/
void mpi_clear_highbit(MPI a, unsigned int n)
{
unsigned int limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* not allocated, therefore no need to clear bits :-) */
for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno+1;
}
/****************
* Clear bit N of A.
*/
void mpi_clear_bit(MPI a, unsigned int n)
{
unsigned int limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* Don't need to clear this bit, it's far too left. */
a->d[limbno] &= ~(A_LIMB_1 << bitno);
}
EXPORT_SYMBOL_GPL(mpi_clear_bit);
/****************
* Shift A by COUNT limbs to the right
* This is used only within the MPI library
*/
void mpi_rshift_limbs(MPI a, unsigned int count)
{
mpi_ptr_t ap = a->d;
mpi_size_t n = a->nlimbs;
unsigned int i;
if (count >= n) {
a->nlimbs = 0;
return;
}
for (i = 0; i < n - count; i++)
ap[i] = ap[i+count];
ap[i] = 0;
a->nlimbs -= count;
}
/*
* Shift A by N bits to the right.
*/
void mpi_rshift(MPI x, MPI a, unsigned int n)
{
mpi_size_t xsize;
unsigned int i;
unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
unsigned int nbits = (n%BITS_PER_MPI_LIMB);
if (x == a) {
/* In-place operation. */
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
return;
}
if (nlimbs) {
for (i = 0; i < x->nlimbs - nlimbs; i++)
x->d[i] = x->d[i+nlimbs];
x->d[i] = 0;
x->nlimbs -= nlimbs;
}
if (x->nlimbs && nbits)
mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
} else if (nlimbs) {
/* Copy and shift by more or equal bits than in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
RESIZE_IF_NEEDED(x, xsize);
x->nlimbs = xsize;
for (i = 0; i < a->nlimbs; i++)
x->d[i] = a->d[i];
x->nlimbs = i;
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
return;
}
if (nlimbs) {
for (i = 0; i < x->nlimbs - nlimbs; i++)
x->d[i] = x->d[i+nlimbs];
x->d[i] = 0;
x->nlimbs -= nlimbs;
}
if (x->nlimbs && nbits)
mpihelp_rshift(x->d, x->d, x->nlimbs, nbits);
} else {
/* Copy and shift by less than bits in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
RESIZE_IF_NEEDED(x, xsize);
x->nlimbs = xsize;
if (xsize) {
if (nbits)
mpihelp_rshift(x->d, a->d, x->nlimbs, nbits);
else {
/* The rshift helper function is not specified for
* NBITS==0, thus we do a plain copy here.
*/
for (i = 0; i < x->nlimbs; i++)
x->d[i] = a->d[i];
}
}
}
MPN_NORMALIZE(x->d, x->nlimbs);
}
EXPORT_SYMBOL_GPL(mpi_rshift);
/****************
* Shift A by COUNT limbs to the left
* This is used only within the MPI library
*/
void mpi_lshift_limbs(MPI a, unsigned int count)
{
mpi_ptr_t ap;
int n = a->nlimbs;
int i;
if (!count || !n)
return;
RESIZE_IF_NEEDED(a, n+count);
ap = a->d;
for (i = n-1; i >= 0; i--)
ap[i+count] = ap[i];
for (i = 0; i < count; i++)
ap[i] = 0;
a->nlimbs += count;
}
/*
* Shift A by N bits to the left.
*/
void mpi_lshift(MPI x, MPI a, unsigned int n)
{
unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
unsigned int nbits = (n%BITS_PER_MPI_LIMB);
if (x == a && !n)
return; /* In-place shift with an amount of zero. */
if (x != a) {
/* Copy A to X. */
unsigned int alimbs = a->nlimbs;
int asign = a->sign;
mpi_ptr_t xp, ap;
RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
xp = x->d;
ap = a->d;
MPN_COPY(xp, ap, alimbs);
x->nlimbs = alimbs;
x->flags = a->flags;
x->sign = asign;
}
if (nlimbs && !nbits) {
/* Shift a full number of limbs. */
mpi_lshift_limbs(x, nlimbs);
} else if (n) {
/* We use a very dump approach: Shift left by the number of
* limbs plus one and than fix it up by an rshift.
*/
mpi_lshift_limbs(x, nlimbs+1);
mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
}
MPN_NORMALIZE(x->d, x->nlimbs);
}
| linux-master | lib/crypto/mpi/mpi-bit.c |
/* mpi-add.c - MPI functions
* Copyright (C) 1994, 1996, 1998, 2001, 2002,
* 2003 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "mpi-internal.h"
/****************
* Add the unsigned integer V to the mpi-integer U and store the
* result in W. U and V may be the same.
*/
void mpi_add_ui(MPI w, MPI u, unsigned long v)
{
mpi_ptr_t wp, up;
mpi_size_t usize, wsize;
int usign, wsign;
usize = u->nlimbs;
usign = u->sign;
wsign = 0;
/* If not space for W (and possible carry), increase space. */
wsize = usize + 1;
if (w->alloced < wsize)
mpi_resize(w, wsize);
/* These must be after realloc (U may be the same as W). */
up = u->d;
wp = w->d;
if (!usize) { /* simple */
wp[0] = v;
wsize = v ? 1:0;
} else if (!usign) { /* mpi is not negative */
mpi_limb_t cy;
cy = mpihelp_add_1(wp, up, usize, v);
wp[usize] = cy;
wsize = usize + cy;
} else {
/* The signs are different. Need exact comparison to determine
* which operand to subtract from which.
*/
if (usize == 1 && up[0] < v) {
wp[0] = v - up[0];
wsize = 1;
} else {
mpihelp_sub_1(wp, up, usize, v);
/* Size can decrease with at most one limb. */
wsize = usize - (wp[usize-1] == 0);
wsign = 1;
}
}
w->nlimbs = wsize;
w->sign = wsign;
}
void mpi_add(MPI w, MPI u, MPI v)
{
mpi_ptr_t wp, up, vp;
mpi_size_t usize, vsize, wsize;
int usign, vsign, wsign;
if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
usize = v->nlimbs;
usign = v->sign;
vsize = u->nlimbs;
vsign = u->sign;
wsize = usize + 1;
RESIZE_IF_NEEDED(w, wsize);
/* These must be after realloc (u or v may be the same as w). */
up = v->d;
vp = u->d;
} else {
usize = u->nlimbs;
usign = u->sign;
vsize = v->nlimbs;
vsign = v->sign;
wsize = usize + 1;
RESIZE_IF_NEEDED(w, wsize);
/* These must be after realloc (u or v may be the same as w). */
up = u->d;
vp = v->d;
}
wp = w->d;
wsign = 0;
if (!vsize) { /* simple */
MPN_COPY(wp, up, usize);
wsize = usize;
wsign = usign;
} else if (usign != vsign) { /* different sign */
/* This test is right since USIZE >= VSIZE */
if (usize != vsize) {
mpihelp_sub(wp, up, usize, vp, vsize);
wsize = usize;
MPN_NORMALIZE(wp, wsize);
wsign = usign;
} else if (mpihelp_cmp(up, vp, usize) < 0) {
mpihelp_sub_n(wp, vp, up, usize);
wsize = usize;
MPN_NORMALIZE(wp, wsize);
if (!usign)
wsign = 1;
} else {
mpihelp_sub_n(wp, up, vp, usize);
wsize = usize;
MPN_NORMALIZE(wp, wsize);
if (usign)
wsign = 1;
}
} else { /* U and V have same sign. Add them. */
mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
wp[usize] = cy;
wsize = usize + cy;
if (usign)
wsign = 1;
}
w->nlimbs = wsize;
w->sign = wsign;
}
EXPORT_SYMBOL_GPL(mpi_add);
void mpi_sub(MPI w, MPI u, MPI v)
{
MPI vv = mpi_copy(v);
vv->sign = !vv->sign;
mpi_add(w, u, vv);
mpi_free(vv);
}
EXPORT_SYMBOL_GPL(mpi_sub);
void mpi_addm(MPI w, MPI u, MPI v, MPI m)
{
mpi_add(w, u, v);
mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_addm);
void mpi_subm(MPI w, MPI u, MPI v, MPI m)
{
mpi_sub(w, u, v);
mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_subm);
| linux-master | lib/crypto/mpi/mpi-add.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-mul_3.c - MPI helper functions
* Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
mpi_limb_t
mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_size_t s1_size, mpi_limb_t s2_limb)
{
mpi_limb_t cy_limb;
mpi_size_t j;
mpi_limb_t prod_high, prod_low;
mpi_limb_t x;
/* The loop counter and index J goes from -SIZE to -1. This way
* the loop becomes faster. */
j = -s1_size;
res_ptr -= j;
s1_ptr -= j;
cy_limb = 0;
do {
umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb);
prod_low += cy_limb;
cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high;
x = res_ptr[j];
prod_low = x - prod_low;
cy_limb += prod_low > x ? 1 : 0;
res_ptr[j] = prod_low;
} while (++j);
return cy_limb;
}
| linux-master | lib/crypto/mpi/generic_mpih-mul3.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-lshift.c - MPI helper functions
* Copyright (C) 1994, 1996, 1998, 2001 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
/* Shift U (pointed to by UP and USIZE digits long) CNT bits to the left
* and store the USIZE least significant digits of the result at WP.
* Return the bits shifted out from the most significant digit.
*
* Argument constraints:
* 1. 0 < CNT < BITS_PER_MP_LIMB
* 2. If the result is to be written over the input, WP must be >= UP.
*/
mpi_limb_t
mpihelp_lshift(mpi_ptr_t wp, mpi_ptr_t up, mpi_size_t usize, unsigned int cnt)
{
mpi_limb_t high_limb, low_limb;
unsigned sh_1, sh_2;
mpi_size_t i;
mpi_limb_t retval;
sh_1 = cnt;
wp += 1;
sh_2 = BITS_PER_MPI_LIMB - sh_1;
i = usize - 1;
low_limb = up[i];
retval = low_limb >> sh_2;
high_limb = low_limb;
while (--i >= 0) {
low_limb = up[i];
wp[i] = (high_limb << sh_1) | (low_limb >> sh_2);
high_limb = low_limb;
}
wp[i] = high_limb << sh_1;
return retval;
}
| linux-master | lib/crypto/mpi/generic_mpih-lshift.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* mpihelp-add_1.c - MPI helper functions
* Copyright (C) 1994, 1996, 1997, 1998,
* 2000 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
* The GNU MP Library itself is published under the LGPL;
* however I decided to publish this code under the plain GPL.
*/
#include "mpi-internal.h"
#include "longlong.h"
mpi_limb_t
mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr,
mpi_ptr_t s2_ptr, mpi_size_t size)
{
mpi_limb_t x, y, cy;
mpi_size_t j;
/* The loop counter and index J goes from -SIZE to -1. This way
the loop becomes faster. */
j = -size;
/* Offset the base pointers to compensate for the negative indices. */
s1_ptr -= j;
s2_ptr -= j;
res_ptr -= j;
cy = 0;
do {
y = s2_ptr[j];
x = s1_ptr[j];
y += cy; /* add previous carry to one addend */
cy = y < cy; /* get out carry from that addition */
y += x; /* add other addend */
cy += y < x; /* get out carry from that add, combine */
res_ptr[j] = y;
} while (++j);
return cy;
}
| linux-master | lib/crypto/mpi/generic_mpih-add1.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strcpy(small, LITERAL_LARGE)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strcpy-lit.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memmove(instance.buf, large_src, sizeof(large_src))
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-memmove.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcpy(instance.buf, large_src, sizeof(large_src))
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memscan(small, 0x7A, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow-memscan.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
strncpy(small, large_src, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/write_overflow-strncpy-src.c |
// SPDX-License-Identifier: GPL-2.0-only
#define TEST \
memcmp(large, small, sizeof(small) + 1)
#include "test_fortify.h"
| linux-master | lib/test_fortify/read_overflow2-memcmp.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.