python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CMAC: Cipher Block Mode for Authentication
*
* Copyright © 2013 Jussi Kivilinna <[email protected]>
*
* Based on work by:
* Copyright © 2013 Tom St Denis <[email protected]>
* Based on crypto/xcbc.c:
* Copyright © 2006 USAGI/WIDE Project,
* Author: Kazunori Miyazawa <[email protected]>
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
/*
* +------------------------
* | <parent tfm>
* +------------------------
* | cmac_tfm_ctx
* +------------------------
* | consts (block size * 2)
* +------------------------
*/
struct cmac_tfm_ctx {
struct crypto_cipher *child;
u8 ctx[];
};
/*
* +------------------------
* | <shash desc>
* +------------------------
* | cmac_desc_ctx
* +------------------------
* | odds (block size)
* +------------------------
* | prev (block size)
* +------------------------
*/
struct cmac_desc_ctx {
unsigned int len;
u8 ctx[];
};
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
__be64 *consts = PTR_ALIGN((void *)ctx->ctx,
(alignmask | (__alignof__(__be64) - 1)) + 1);
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
err = crypto_cipher_setkey(ctx->child, inkey, keylen);
if (err)
return err;
/* encrypt the zero block */
memset(consts, 0, bs);
crypto_cipher_encrypt_one(ctx->child, (u8 *)consts, (u8 *)consts);
switch (bs) {
case 16:
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
_const[1] = be64_to_cpu(consts[0]);
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
for (i = 0; i < 4; i += 2) {
msb_mask = ((s64)_const[1] >> 63) & gfmask;
_const[1] = (_const[1] << 1) | (_const[0] >> 63);
_const[0] = (_const[0] << 1) ^ msb_mask;
consts[i + 0] = cpu_to_be64(_const[1]);
consts[i + 1] = cpu_to_be64(_const[0]);
}
break;
case 8:
gfmask = 0x1B;
_const[0] = be64_to_cpu(consts[0]);
/* gf(2^64) multiply zero-ciphertext with u and u^2 */
for (i = 0; i < 2; i++) {
msb_mask = ((s64)_const[0] >> 63) & gfmask;
_const[0] = (_const[0] << 1) ^ msb_mask;
consts[i] = cpu_to_be64(_const[0]);
}
break;
}
return 0;
}
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs;
ctx->len = 0;
memset(prev, 0, bs);
return 0;
}
static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
u8 *prev = odds + bs;
/* checking the data can fill the block */
if ((ctx->len + len) <= bs) {
memcpy(odds + ctx->len, p, len);
ctx->len += len;
return 0;
}
/* filling odds with new data and encrypting it */
memcpy(odds + ctx->len, p, bs - ctx->len);
len -= bs - ctx->len;
p += bs - ctx->len;
crypto_xor(prev, odds, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
/* clearing the length */
ctx->len = 0;
/* encrypting the rest of data */
while (len > bs) {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
}
/* keeping the surplus of blocksize */
if (len) {
memcpy(odds, p, len);
ctx->len = len;
}
return 0;
}
static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
unsigned long alignmask = crypto_shash_alignmask(parent);
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *consts = PTR_ALIGN((void *)tctx->ctx,
(alignmask | (__alignof__(__be64) - 1)) + 1);
u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
u8 *prev = odds + bs;
unsigned int offset = 0;
if (ctx->len != bs) {
unsigned int rlen;
u8 *p = odds + ctx->len;
*p = 0x80;
p++;
rlen = bs - ctx->len - 1;
if (rlen)
memset(p, 0, rlen);
offset += bs;
}
crypto_xor(prev, odds, bs);
crypto_xor(prev, consts + offset, bs);
crypto_cipher_encrypt_one(tfm, out, prev);
return 0;
}
static int cmac_init_tfm(struct crypto_shash *tfm)
{
struct shash_instance *inst = shash_alg_instance(tfm);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
struct crypto_cipher_spawn *spawn;
struct crypto_cipher *cipher;
spawn = shash_instance_ctx(inst);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm)
{
struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_clone_cipher(octx->child);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void cmac_exit_tfm(struct crypto_shash *tfm)
{
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
switch (alg->cra_blocksize) {
case 16:
case 8:
break;
default:
err = -EINVAL;
goto err_free_inst;
}
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
alignmask = alg->cra_alignmask;
inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
inst->alg.descsize =
ALIGN(sizeof(struct cmac_desc_ctx), crypto_tfm_ctx_alignment())
+ (alignmask & ~(crypto_tfm_ctx_alignment() - 1))
+ alg->cra_blocksize * 2;
inst->alg.base.cra_ctxsize =
ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
+ ((alignmask | (__alignof__(__be64) - 1)) &
~(crypto_tfm_ctx_alignment() - 1))
+ alg->cra_blocksize * 2;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
inst->alg.final = crypto_cmac_digest_final;
inst->alg.setkey = crypto_cmac_digest_setkey;
inst->alg.init_tfm = cmac_init_tfm;
inst->alg.clone_tfm = cmac_clone_tfm;
inst->alg.exit_tfm = cmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template crypto_cmac_tmpl = {
.name = "cmac",
.create = cmac_create,
.module = THIS_MODULE,
};
static int __init crypto_cmac_module_init(void)
{
return crypto_register_template(&crypto_cmac_tmpl);
}
static void __exit crypto_cmac_module_exit(void)
{
crypto_unregister_template(&crypto_cmac_tmpl);
}
subsys_initcall(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("cmac");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/cmac.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* DES & Triple DES EDE Cipher Algorithms.
*
* Copyright (c) 2005 Dag Arne Osvik <[email protected]>
*/
#include <asm/byteorder.h>
#include <crypto/algapi.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <crypto/internal/des.h>
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des_ctx *dctx = crypto_tfm_ctx(tfm);
int err;
err = des_expand_key(dctx, key, keylen);
if (err == -ENOKEY) {
if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
err = -EINVAL;
else
err = 0;
}
if (err)
memset(dctx, 0, sizeof(*dctx));
return err;
}
static void crypto_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct des_ctx *dctx = crypto_tfm_ctx(tfm);
des_encrypt(dctx, dst, src);
}
static void crypto_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct des_ctx *dctx = crypto_tfm_ctx(tfm);
des_decrypt(dctx, dst, src);
}
static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
int err;
err = des3_ede_expand_key(dctx, key, keylen);
if (err == -ENOKEY) {
if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
err = -EINVAL;
else
err = 0;
}
if (err)
memset(dctx, 0, sizeof(*dctx));
return err;
}
static void crypto_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst,
const u8 *src)
{
const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
des3_ede_encrypt(dctx, dst, src);
}
static void crypto_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst,
const u8 *src)
{
const struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
des3_ede_decrypt(dctx, dst, src);
}
static struct crypto_alg des_algs[2] = { {
.cra_name = "des",
.cra_driver_name = "des-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = crypto_des_encrypt,
.cia_decrypt = crypto_des_decrypt } }
}, {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_setkey,
.cia_encrypt = crypto_des3_ede_encrypt,
.cia_decrypt = crypto_des3_ede_decrypt } }
} };
static int __init des_generic_mod_init(void)
{
return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
}
static void __exit des_generic_mod_fini(void)
{
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
subsys_initcall(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
MODULE_AUTHOR("Dag Arne Osvik <[email protected]>");
MODULE_ALIAS_CRYPTO("des");
MODULE_ALIAS_CRYPTO("des-generic");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_ALIAS_CRYPTO("des3_ede-generic");
| linux-master | crypto/des_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CTR: Counter mode
*
* (C) Copyright IBM Corp. 2007 - Joy Latten <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/ctr.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
struct crypto_rfc3686_ctx {
struct crypto_skcipher *child;
u8 nonce[CTR_RFC3686_NONCE_SIZE];
};
struct crypto_rfc3686_req_ctx {
u8 iv[CTR_RFC3686_BLOCK_SIZE];
struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
};
static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
u8 *ctrblk = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_inc(ctrblk, bsize);
}
static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *ctrblk = walk->iv;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), dst, ctrblk);
crypto_xor(dst, src, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
u8 *ctrblk = walk->iv;
u8 *src = walk->src.virt.addr;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
crypto_xor(src, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_ctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
else
nbytes = crypto_ctr_crypt_segment(&walk, cipher);
err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
crypto_ctr_crypt_final(&walk, cipher);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
/* Block size must be >= 4 bytes. */
err = -EINVAL;
if (alg->cra_blocksize < 4)
goto out_free_inst;
/* If this is false we'd fail the alignment of crypto_inc. */
if (alg->cra_blocksize % 4)
goto out_free_inst;
/* CTR mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.encrypt = crypto_ctr_crypt;
inst->alg.decrypt = crypto_ctr_crypt;
err = skcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
}
return err;
}
static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
/* the nonce is stored in bytes at end of key */
if (keylen < CTR_RFC3686_NONCE_SIZE)
return -EINVAL;
memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
CTR_RFC3686_NONCE_SIZE);
keylen -= CTR_RFC3686_NONCE_SIZE;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, keylen);
}
static int crypto_rfc3686_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child = ctx->child;
unsigned long align = crypto_skcipher_alignmask(tfm);
struct crypto_rfc3686_req_ctx *rctx =
(void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
struct skcipher_request *subreq = &rctx->subreq;
u8 *iv = rctx->iv;
/* set up counter block */
memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
/* initialize counter portion of counter block */
*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
skcipher_request_set_tfm(subreq, child);
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, iv);
return crypto_skcipher_encrypt(subreq);
}
static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
unsigned long align;
unsigned int reqsize;
cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
align = crypto_skcipher_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
crypto_skcipher_reqsize(cipher);
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
}
static void crypto_rfc3686_free(struct skcipher_instance *inst)
{
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
crypto_drop_skcipher(spawn);
kfree(inst);
}
static int crypto_rfc3686_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct skcipher_instance *inst;
struct skcipher_alg *alg;
struct crypto_skcipher_spawn *spawn;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_skcipher_alg(spawn);
/* We only support 16-byte blocks. */
err = -EINVAL;
if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc3686(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
CTR_RFC3686_NONCE_SIZE;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
CTR_RFC3686_NONCE_SIZE;
inst->alg.setkey = crypto_rfc3686_setkey;
inst->alg.encrypt = crypto_rfc3686_crypt;
inst->alg.decrypt = crypto_rfc3686_crypt;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
inst->alg.init = crypto_rfc3686_init_tfm;
inst->alg.exit = crypto_rfc3686_exit_tfm;
inst->free = crypto_rfc3686_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_rfc3686_free(inst);
}
return err;
}
static struct crypto_template crypto_ctr_tmpls[] = {
{
.name = "ctr",
.create = crypto_ctr_create,
.module = THIS_MODULE,
}, {
.name = "rfc3686",
.create = crypto_rfc3686_create,
.module = THIS_MODULE,
},
};
static int __init crypto_ctr_module_init(void)
{
return crypto_register_templates(crypto_ctr_tmpls,
ARRAY_SIZE(crypto_ctr_tmpls));
}
static void __exit crypto_ctr_module_exit(void)
{
crypto_unregister_templates(crypto_ctr_tmpls,
ARRAY_SIZE(crypto_ctr_tmpls));
}
subsys_initcall(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("rfc3686");
MODULE_ALIAS_CRYPTO("ctr");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/ctr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API for the 842 software compression algorithm.
*
* Copyright (C) IBM Corporation, 2011-2015
*
* Original Authors: Robert Jennings <[email protected]>
* Seth Jennings <[email protected]>
*
* Rewrite: Dan Streetman <[email protected]>
*
* This is the software implementation of compression and decompression using
* the 842 format. This uses the software 842 library at lib/842/ which is
* only a reference implementation, and is very, very slow as compared to other
* software compressors. You probably do not want to use this software
* compression. If you have access to the PowerPC 842 compression hardware, you
* want to use the 842 hardware compression interface, which is at:
* drivers/crypto/nx/nx-842-crypto.c
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/sw842.h>
#include <crypto/internal/scompress.h>
struct crypto842_ctx {
void *wmem; /* working memory for compress */
};
static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int crypto842_init(struct crypto_tfm *tfm)
{
struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->wmem = crypto842_alloc_ctx(NULL);
if (IS_ERR(ctx->wmem))
return -ENOMEM;
return 0;
}
static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
kfree(ctx);
}
static void crypto842_exit(struct crypto_tfm *tfm)
{
struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
crypto842_free_ctx(NULL, ctx->wmem);
}
static int crypto842_compress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
return sw842_compress(src, slen, dst, dlen, ctx->wmem);
}
static int crypto842_scompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
return sw842_compress(src, slen, dst, dlen, ctx);
}
static int crypto842_decompress(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
return sw842_decompress(src, slen, dst, dlen);
}
static int crypto842_sdecompress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
return sw842_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
.cra_name = "842",
.cra_driver_name = "842-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct crypto842_ctx),
.cra_module = THIS_MODULE,
.cra_init = crypto842_init,
.cra_exit = crypto842_exit,
.cra_u = { .compress = {
.coa_compress = crypto842_compress,
.coa_decompress = crypto842_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = crypto842_alloc_ctx,
.free_ctx = crypto842_free_ctx,
.compress = crypto842_scompress,
.decompress = crypto842_sdecompress,
.base = {
.cra_name = "842",
.cra_driver_name = "842-scomp",
.cra_priority = 100,
.cra_module = THIS_MODULE,
}
};
static int __init crypto842_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg);
return ret;
}
return ret;
}
subsys_initcall(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
module_exit(crypto842_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("842 Software Compression Algorithm");
MODULE_ALIAS_CRYPTO("842");
MODULE_ALIAS_CRYPTO("842-generic");
MODULE_AUTHOR("Dan Streetman <[email protected]>");
| linux-master | crypto/842.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* af_alg: User-space algorithm interface
*
* This file provides the user-space API for algorithms.
*
* Copyright (c) 2010 Herbert Xu <[email protected]>
*/
#include <linux/atomic.h>
#include <crypto/if_alg.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/key-type.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/security.h>
#include <linux/string.h>
#include <keys/user-type.h>
#include <keys/trusted-type.h>
#include <keys/encrypted-type.h>
struct alg_type_list {
const struct af_alg_type *type;
struct list_head list;
};
static struct proto alg_proto = {
.name = "ALG",
.owner = THIS_MODULE,
.obj_size = sizeof(struct alg_sock),
};
static LIST_HEAD(alg_types);
static DECLARE_RWSEM(alg_types_sem);
static const struct af_alg_type *alg_get_type(const char *name)
{
const struct af_alg_type *type = ERR_PTR(-ENOENT);
struct alg_type_list *node;
down_read(&alg_types_sem);
list_for_each_entry(node, &alg_types, list) {
if (strcmp(node->type->name, name))
continue;
if (try_module_get(node->type->owner))
type = node->type;
break;
}
up_read(&alg_types_sem);
return type;
}
int af_alg_register_type(const struct af_alg_type *type)
{
struct alg_type_list *node;
int err = -EEXIST;
down_write(&alg_types_sem);
list_for_each_entry(node, &alg_types, list) {
if (!strcmp(node->type->name, type->name))
goto unlock;
}
node = kmalloc(sizeof(*node), GFP_KERNEL);
err = -ENOMEM;
if (!node)
goto unlock;
type->ops->owner = THIS_MODULE;
if (type->ops_nokey)
type->ops_nokey->owner = THIS_MODULE;
node->type = type;
list_add(&node->list, &alg_types);
err = 0;
unlock:
up_write(&alg_types_sem);
return err;
}
EXPORT_SYMBOL_GPL(af_alg_register_type);
int af_alg_unregister_type(const struct af_alg_type *type)
{
struct alg_type_list *node;
int err = -ENOENT;
down_write(&alg_types_sem);
list_for_each_entry(node, &alg_types, list) {
if (strcmp(node->type->name, type->name))
continue;
list_del(&node->list);
kfree(node);
err = 0;
break;
}
up_write(&alg_types_sem);
return err;
}
EXPORT_SYMBOL_GPL(af_alg_unregister_type);
static void alg_do_release(const struct af_alg_type *type, void *private)
{
if (!type)
return;
type->release(private);
module_put(type->owner);
}
int af_alg_release(struct socket *sock)
{
if (sock->sk) {
sock_put(sock->sk);
sock->sk = NULL;
}
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
void af_alg_release_parent(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
unsigned int nokey = atomic_read(&ask->nokey_refcnt);
sk = ask->parent;
ask = alg_sk(sk);
if (nokey)
atomic_dec(&ask->nokey_refcnt);
if (atomic_dec_and_test(&ask->refcnt))
sock_put(sk);
}
EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg_new *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
int err;
if (sock->state == SS_CONNECTED)
return -EINVAL;
BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
offsetof(struct sockaddr_alg, salg_name));
BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
if (addr_len < sizeof(*sa) + 1)
return -EINVAL;
/* If caller uses non-allowed flag, return error. */
if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
return -EINVAL;
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
type = alg_get_type(sa->salg_type);
if (PTR_ERR(type) == -ENOENT) {
request_module("algif-%s", sa->salg_type);
type = alg_get_type(sa->salg_type);
}
if (IS_ERR(type))
return PTR_ERR(type);
private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
}
err = -EBUSY;
lock_sock(sk);
if (atomic_read(&ask->refcnt))
goto unlock;
swap(ask->type, type);
swap(ask->private, private);
err = 0;
unlock:
release_sock(sk);
alg_do_release(type, private);
return err;
}
static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type = ask->type;
u8 *key;
int err;
key = sock_kmalloc(sk, keylen, GFP_KERNEL);
if (!key)
return -ENOMEM;
err = -EFAULT;
if (copy_from_sockptr(key, ukey, keylen))
goto out;
err = type->setkey(ask->private, key, keylen);
out:
sock_kzfree_s(sk, key, keylen);
return err;
}
#ifdef CONFIG_KEYS
static const u8 *key_data_ptr_user(const struct key *key,
unsigned int *datalen)
{
const struct user_key_payload *ukp;
ukp = user_key_payload_locked(key);
if (IS_ERR_OR_NULL(ukp))
return ERR_PTR(-EKEYREVOKED);
*datalen = key->datalen;
return ukp->data;
}
static const u8 *key_data_ptr_encrypted(const struct key *key,
unsigned int *datalen)
{
const struct encrypted_key_payload *ekp;
ekp = dereference_key_locked(key);
if (IS_ERR_OR_NULL(ekp))
return ERR_PTR(-EKEYREVOKED);
*datalen = ekp->decrypted_datalen;
return ekp->decrypted_data;
}
static const u8 *key_data_ptr_trusted(const struct key *key,
unsigned int *datalen)
{
const struct trusted_key_payload *tkp;
tkp = dereference_key_locked(key);
if (IS_ERR_OR_NULL(tkp))
return ERR_PTR(-EKEYREVOKED);
*datalen = tkp->key_len;
return tkp->key;
}
static struct key *lookup_key(key_serial_t serial)
{
key_ref_t key_ref;
key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH);
if (IS_ERR(key_ref))
return ERR_CAST(key_ref);
return key_ref_to_ptr(key_ref);
}
static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
unsigned int optlen)
{
const struct af_alg_type *type = ask->type;
u8 *key_data = NULL;
unsigned int key_datalen;
key_serial_t serial;
struct key *key;
const u8 *ret;
int err;
if (optlen != sizeof(serial))
return -EINVAL;
if (copy_from_sockptr(&serial, optval, optlen))
return -EFAULT;
key = lookup_key(serial);
if (IS_ERR(key))
return PTR_ERR(key);
down_read(&key->sem);
ret = ERR_PTR(-ENOPROTOOPT);
if (!strcmp(key->type->name, "user") ||
!strcmp(key->type->name, "logon")) {
ret = key_data_ptr_user(key, &key_datalen);
} else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) &&
!strcmp(key->type->name, "encrypted")) {
ret = key_data_ptr_encrypted(key, &key_datalen);
} else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) &&
!strcmp(key->type->name, "trusted")) {
ret = key_data_ptr_trusted(key, &key_datalen);
}
if (IS_ERR(ret)) {
up_read(&key->sem);
key_put(key);
return PTR_ERR(ret);
}
key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
key_put(key);
return -ENOMEM;
}
memcpy(key_data, ret, key_datalen);
up_read(&key->sem);
key_put(key);
err = type->setkey(ask->private, key_data, key_datalen);
sock_kzfree_s(&ask->sk, key_data, key_datalen);
return err;
}
#else
static inline int alg_setkey_by_key_serial(struct alg_sock *ask,
sockptr_t optval,
unsigned int optlen)
{
return -ENOPROTOOPT;
}
#endif
static int alg_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
int err = -EBUSY;
lock_sock(sk);
if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
goto unlock;
type = ask->type;
err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
switch (optname) {
case ALG_SET_KEY:
case ALG_SET_KEY_BY_KEY_SERIAL:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setkey)
goto unlock;
if (optname == ALG_SET_KEY_BY_KEY_SERIAL)
err = alg_setkey_by_key_serial(ask, optval, optlen);
else
err = alg_setkey(sk, optval, optlen);
break;
case ALG_SET_AEAD_AUTHSIZE:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
break;
case ALG_SET_DRBG_ENTROPY:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setentropy)
goto unlock;
err = type->setentropy(ask->private, optval, optlen);
}
unlock:
release_sock(sk);
return err;
}
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
{
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
struct sock *sk2;
unsigned int nokey;
int err;
lock_sock(sk);
type = ask->type;
err = -EINVAL;
if (!type)
goto unlock;
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
err = -ENOMEM;
if (!sk2)
goto unlock;
sock_init_data(newsock, sk2);
security_sock_graft(sk2, newsock);
security_sk_clone(sk, sk2);
/*
* newsock->ops assigned here to allow type->accept call to override
* them when required.
*/
newsock->ops = type->ops;
err = type->accept(ask->private, sk2);
nokey = err == -ENOKEY;
if (nokey && type->accept_nokey)
err = type->accept_nokey(ask->private, sk2);
if (err)
goto unlock;
if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
sock_hold(sk);
if (nokey) {
atomic_inc(&ask->nokey_refcnt);
atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
}
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
newsock->state = SS_CONNECTED;
if (nokey)
newsock->ops = type->ops_nokey;
err = 0;
unlock:
release_sock(sk);
return err;
}
EXPORT_SYMBOL_GPL(af_alg_accept);
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
return af_alg_accept(sock->sk, newsock, kern);
}
static const struct proto_ops alg_proto_ops = {
.family = PF_ALG,
.owner = THIS_MODULE,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.bind = alg_bind,
.release = af_alg_release,
.setsockopt = alg_setsockopt,
.accept = alg_accept,
};
static void alg_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
alg_do_release(ask->type, ask->private);
}
static int alg_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
int err;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
if (protocol != 0)
return -EPROTONOSUPPORT;
err = -ENOMEM;
sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
if (!sk)
goto out;
sock->ops = &alg_proto_ops;
sock_init_data(sock, sk);
sk->sk_destruct = alg_sock_destruct;
return 0;
out:
return err;
}
static const struct net_proto_family alg_family = {
.family = PF_ALG,
.create = alg_create,
.owner = THIS_MODULE,
};
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
}
void af_alg_free_sg(struct af_alg_sgl *sgl)
{
int i;
if (sgl->sgt.sgl) {
if (sgl->need_unpin)
for (i = 0; i < sgl->sgt.nents; i++)
unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
if (sgl->sgt.sgl != sgl->sgl)
kvfree(sgl->sgt.sgl);
sgl->sgt.sgl = NULL;
}
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
{
struct cmsghdr *cmsg;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
if (cmsg->cmsg_level != SOL_ALG)
continue;
switch (cmsg->cmsg_type) {
case ALG_SET_IV:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
return -EINVAL;
con->iv = (void *)CMSG_DATA(cmsg);
if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
sizeof(*con->iv)))
return -EINVAL;
break;
case ALG_SET_OP:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
return -EINVAL;
con->op = *(u32 *)CMSG_DATA(cmsg);
break;
case ALG_SET_AEAD_ASSOCLEN:
if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
return -EINVAL;
con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
break;
default:
return -EINVAL;
}
}
return 0;
}
/**
* af_alg_alloc_tsgl - allocate the TX SGL
*
* @sk: socket of connection to user space
* Return: 0 upon success, < 0 upon error
*/
static int af_alg_alloc_tsgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct af_alg_tsgl *sgl;
struct scatterlist *sg = NULL;
sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list);
if (!list_empty(&ctx->tsgl_list))
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
sgl = sock_kmalloc(sk,
struct_size(sgl, sg, (MAX_SGL_ENTS + 1)),
GFP_KERNEL);
if (!sgl)
return -ENOMEM;
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg)
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
list_add_tail(&sgl->list, &ctx->tsgl_list);
}
return 0;
}
/**
* af_alg_count_tsgl - Count number of TX SG entries
*
* The counting starts from the beginning of the SGL to @bytes. If
* an @offset is provided, the counting of the SG entries starts at the @offset.
*
* @sk: socket of connection to user space
* @bytes: Count the number of SG entries holding given number of bytes.
* @offset: Start the counting of SG entries from the given offset.
* Return: Number of TX SG entries found given the constraints
*/
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
{
const struct alg_sock *ask = alg_sk(sk);
const struct af_alg_ctx *ctx = ask->private;
const struct af_alg_tsgl *sgl;
unsigned int i;
unsigned int sgl_count = 0;
if (!bytes)
return 0;
list_for_each_entry(sgl, &ctx->tsgl_list, list) {
const struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t bytes_count;
/* Skip offset */
if (offset >= sg[i].length) {
offset -= sg[i].length;
bytes -= sg[i].length;
continue;
}
bytes_count = sg[i].length - offset;
offset = 0;
sgl_count++;
/* If we have seen requested number of bytes, stop */
if (bytes_count >= bytes)
return sgl_count;
bytes -= bytes_count;
}
}
return sgl_count;
}
EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
/**
* af_alg_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to @dst. The caller must release
* the pages. If @dst_offset is given only reassign the pages to @dst starting
* at the @dst_offset (byte). The caller must ensure that @dst is large
* enough (e.g. by using af_alg_count_tsgl with the same offset).
*
* @sk: socket of connection to user space
* @used: Number of bytes to pull from TX SGL
* @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
* @dst_offset: Reassign the TX SGL from given offset. All buffers before
* reaching the offset is released.
*/
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct af_alg_tsgl *sgl;
struct scatterlist *sg;
unsigned int i, j = 0;
while (!list_empty(&ctx->tsgl_list)) {
sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl,
list);
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t plen = min_t(size_t, used, sg[i].length);
struct page *page = sg_page(sg + i);
if (!page)
continue;
/*
* Assumption: caller created af_alg_count_tsgl(len)
* SG entries in dst.
*/
if (dst) {
if (dst_offset >= plen) {
/* discard page before offset */
dst_offset -= plen;
} else {
/* reassign page to dst after offset */
get_page(page);
sg_set_page(dst + j, page,
plen - dst_offset,
sg[i].offset + dst_offset);
dst_offset = 0;
j++;
}
}
sg[i].length -= plen;
sg[i].offset += plen;
used -= plen;
ctx->used -= plen;
if (sg[i].length)
return;
put_page(page);
sg_assign_page(sg + i, NULL);
}
list_del(&sgl->list);
sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
}
if (!ctx->used)
ctx->merge = 0;
ctx->init = ctx->more;
}
EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
/**
* af_alg_free_areq_sgls - Release TX and RX SGLs of the request
*
* @areq: Request holding the TX and RX SGL
*/
static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct af_alg_rsgl *rsgl, *tmp;
struct scatterlist *tsgl;
struct scatterlist *sg;
unsigned int i;
list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused);
af_alg_free_sg(&rsgl->sgl);
list_del(&rsgl->list);
if (rsgl != &areq->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
}
tsgl = areq->tsgl;
if (tsgl) {
for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
if (!sg_page(sg))
continue;
put_page(sg_page(sg));
}
sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
}
}
/**
* af_alg_wait_for_wmem - wait for availability of writable memory
*
* @sk: socket of connection to user space
* @flags: If MSG_DONTWAIT is set, then only report if function would sleep
* Return: 0 when writable memory is available, < 0 upon error
*/
static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err = -ERESTARTSYS;
long timeout;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
if (signal_pending(current))
break;
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
err = 0;
break;
}
}
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
/**
* af_alg_wmem_wakeup - wakeup caller when writable memory is available
*
* @sk: socket of connection to user space
*/
void af_alg_wmem_wakeup(struct sock *sk)
{
struct socket_wq *wq;
if (!af_alg_writable(sk))
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
EPOLLRDNORM |
EPOLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
/**
* af_alg_wait_for_data - wait for availability of TX data
*
* @sk: socket of connection to user space
* @flags: If MSG_DONTWAIT is set, then only report if function would sleep
* @min: Set to minimum request size if partial requests are allowed.
* Return: 0 when writable memory is available, < 0 upon error
*/
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
long timeout;
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
if (signal_pending(current))
break;
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout,
ctx->init && (!ctx->more ||
(min && ctx->used >= min)),
&wait)) {
err = 0;
break;
}
}
remove_wait_queue(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
/**
* af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
*
* @sk: socket of connection to user space
*/
static void af_alg_data_wakeup(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct socket_wq *wq;
if (!ctx->used)
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
EPOLLRDNORM |
EPOLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
/**
* af_alg_sendmsg - implementation of sendmsg system call handler
*
* The sendmsg system call handler obtains the user data and stores it
* in ctx->tsgl_list. This implies allocation of the required numbers of
* struct af_alg_tsgl.
*
* In addition, the ctx is filled with the information sent via CMSG.
*
* @sock: socket of connection to user space
* @msg: message from user space
* @size: size of message from user space
* @ivsize: the size of the IV for the cipher operation to verify that the
* user-space-provided IV has the right size
* Return: the number of copied data upon success, < 0 upon error
*/
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct af_alg_tsgl *sgl;
struct af_alg_control con = {};
long copied = 0;
bool enc = false;
bool init = false;
int err = 0;
if (msg->msg_controllen) {
err = af_alg_cmsg_send(msg, &con);
if (err)
return err;
init = true;
switch (con.op) {
case ALG_OP_ENCRYPT:
enc = true;
break;
case ALG_OP_DECRYPT:
enc = false;
break;
default:
return -EINVAL;
}
if (con.iv && con.iv->ivlen != ivsize)
return -EINVAL;
}
lock_sock(sk);
if (ctx->init && !ctx->more) {
if (ctx->used) {
err = -EINVAL;
goto unlock;
}
pr_info_once(
"%s sent an empty control message without MSG_MORE.\n",
current->comm);
}
ctx->init = true;
if (init) {
ctx->enc = enc;
if (con.iv)
memcpy(ctx->iv, con.iv->iv, ivsize);
ctx->aead_assoclen = con.aead_assoclen;
}
while (size) {
struct scatterlist *sg;
size_t len = size;
ssize_t plen;
/* use the existing memory in an allocated page */
if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
sgl = list_entry(ctx->tsgl_list.prev,
struct af_alg_tsgl, list);
sg = sgl->sg + sgl->cur - 1;
len = min_t(size_t, len,
PAGE_SIZE - sg->offset - sg->length);
err = memcpy_from_msg(page_address(sg_page(sg)) +
sg->offset + sg->length,
msg, len);
if (err)
goto unlock;
sg->length += len;
ctx->merge = (sg->offset + sg->length) &
(PAGE_SIZE - 1);
ctx->used += len;
copied += len;
size -= len;
continue;
}
if (!af_alg_writable(sk)) {
err = af_alg_wait_for_wmem(sk, msg->msg_flags);
if (err)
goto unlock;
}
/* allocate a new page */
len = min_t(unsigned long, len, af_alg_sndbuf(sk));
err = af_alg_alloc_tsgl(sk);
if (err)
goto unlock;
sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl,
list);
sg = sgl->sg;
if (sgl->cur)
sg_unmark_end(sg + sgl->cur - 1);
if (msg->msg_flags & MSG_SPLICE_PAGES) {
struct sg_table sgtable = {
.sgl = sg,
.nents = sgl->cur,
.orig_nents = sgl->cur,
};
plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable,
MAX_SGL_ENTS - sgl->cur, 0);
if (plen < 0) {
err = plen;
goto unlock;
}
for (; sgl->cur < sgtable.nents; sgl->cur++)
get_page(sg_page(&sg[sgl->cur]));
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
ctx->merge = 0;
} else {
do {
struct page *pg;
unsigned int i = sgl->cur;
plen = min_t(size_t, len, PAGE_SIZE);
pg = alloc_page(GFP_KERNEL);
if (!pg) {
err = -ENOMEM;
goto unlock;
}
sg_assign_page(sg + i, pg);
err = memcpy_from_msg(
page_address(sg_page(sg + i)),
msg, plen);
if (err) {
__free_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
goto unlock;
}
sg[i].length = plen;
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
ctx->merge = plen & (PAGE_SIZE - 1);
}
if (!size)
sg_mark_end(sg + sgl->cur - 1);
}
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
unlock:
af_alg_data_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/**
* af_alg_free_resources - release resources required for crypto request
* @areq: Request holding the TX and RX SGL
*/
void af_alg_free_resources(struct af_alg_async_req *areq)
{
struct sock *sk = areq->sk;
af_alg_free_areq_sgls(areq);
sock_kfree_s(sk, areq, areq->areqlen);
}
EXPORT_SYMBOL_GPL(af_alg_free_resources);
/**
* af_alg_async_cb - AIO callback handler
* @data: async request completion data
* @err: if non-zero, error result to be returned via ki_complete();
* otherwise return the AIO output length via ki_complete().
*
* This handler cleans up the struct af_alg_async_req upon completion of the
* AIO operation.
*
* The number of bytes to be generated with the AIO operation must be set
* in areq->outlen before the AIO callback handler is invoked.
*/
void af_alg_async_cb(void *data, int err)
{
struct af_alg_async_req *areq = data;
struct sock *sk = areq->sk;
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
/* Buffer size written by crypto operation. */
resultlen = areq->outlen;
af_alg_free_resources(areq);
sock_put(sk);
iocb->ki_complete(iocb, err ? err : (int)resultlen);
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
/**
* af_alg_poll - poll system call handler
* @file: file pointer
* @sock: socket to poll
* @wait: poll_table
*/
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
__poll_t mask;
sock_poll_wait(file, sock, wait);
mask = 0;
if (!ctx->more || ctx->used)
mask |= EPOLLIN | EPOLLRDNORM;
if (af_alg_writable(sk))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
}
EXPORT_SYMBOL_GPL(af_alg_poll);
/**
* af_alg_alloc_areq - allocate struct af_alg_async_req
*
* @sk: socket of connection to user space
* @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
* Return: allocated data structure or ERR_PTR upon error
*/
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen)
{
struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
if (unlikely(!areq))
return ERR_PTR(-ENOMEM);
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;
areq->tsgl_entries = 0;
return areq;
}
EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
/**
* af_alg_get_rsgl - create the RX SGL for the output data from the crypto
* operation
*
* @sk: socket of connection to user space
* @msg: user space message
* @flags: flags used to invoke recvmsg with
* @areq: instance of the cryptographic request that will hold the RX SGL
* @maxsize: maximum number of bytes to be pulled from user space
* @outlen: number of bytes in the RX SGL
* Return: 0 on success, < 0 upon error
*/
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
struct af_alg_async_req *areq, size_t maxsize,
size_t *outlen)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
size_t len = 0;
while (maxsize > len && msg_data_left(msg)) {
struct af_alg_rsgl *rsgl;
ssize_t err;
size_t seglen;
/* limit the amount of readable buffers */
if (!af_alg_readable(sk))
break;
seglen = min_t(size_t, (maxsize - len),
msg_data_left(msg));
if (list_empty(&areq->rsgl_list)) {
rsgl = &areq->first_rsgl;
} else {
rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
if (unlikely(!rsgl))
return -ENOMEM;
}
rsgl->sgl.need_unpin =
iov_iter_extract_will_pin(&msg->msg_iter);
rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
rsgl->sgl.sgt.nents = 0;
rsgl->sgl.sgt.orig_nents = 0;
list_add_tail(&rsgl->list, &areq->rsgl_list);
sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES);
err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt,
ALG_MAX_PAGES, 0);
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1);
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
areq->last_rsgl = rsgl;
len += err;
atomic_add(err, &ctx->rcvused);
rsgl->sg_num_bytes = err;
}
*outlen = len;
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_get_rsgl);
static int __init af_alg_init(void)
{
int err = proto_register(&alg_proto, 0);
if (err)
goto out;
err = sock_register(&alg_family);
if (err != 0)
goto out_unregister_proto;
out:
return err;
out_unregister_proto:
proto_unregister(&alg_proto);
goto out;
}
static void __exit af_alg_exit(void)
{
sock_unregister(PF_ALG);
proto_unregister(&alg_proto);
}
module_init(af_alg_init);
module_exit(af_alg_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_ALG);
| linux-master | crypto/af_alg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Algorithm testing framework and tests.
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2002 Jean-Francois Dive <[email protected]>
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <[email protected]>
* Copyright (c) 2019 Google LLC
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony ([email protected])
* Adrian Hoban <[email protected]>
* Gabriele Paoloni <[email protected]>
* Tadeusz Struk ([email protected])
* Copyright (c) 2010, Intel Corporation.
*/
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/once.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uio.h>
#include <crypto/rng.h>
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include "internal.h"
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
static bool panic_on_fail;
module_param(panic_on_fail, bool, 0444);
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
static bool noextratests;
module_param(noextratests, bool, 0644);
MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
#endif
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
return 0;
}
#else
#include "testmgr.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define XBUFSIZE 8
/*
* Used by test_cipher()
*/
#define ENCRYPT 1
#define DECRYPT 0
struct aead_test_suite {
const struct aead_testvec *vecs;
unsigned int count;
/*
* Set if trying to decrypt an inauthentic ciphertext with this
* algorithm might result in EINVAL rather than EBADMSG, due to other
* validation the algorithm does on the inputs such as length checks.
*/
unsigned int einval_allowed : 1;
/*
* Set if this algorithm requires that the IV be located at the end of
* the AAD buffer, in addition to being given in the normal way. The
* behavior when the two IV copies differ is implementation-defined.
*/
unsigned int aad_iv : 1;
};
struct cipher_test_suite {
const struct cipher_testvec *vecs;
unsigned int count;
};
struct comp_test_suite {
struct {
const struct comp_testvec *vecs;
unsigned int count;
} comp, decomp;
};
struct hash_test_suite {
const struct hash_testvec *vecs;
unsigned int count;
};
struct cprng_test_suite {
const struct cprng_testvec *vecs;
unsigned int count;
};
struct drbg_test_suite {
const struct drbg_testvec *vecs;
unsigned int count;
};
struct akcipher_test_suite {
const struct akcipher_testvec *vecs;
unsigned int count;
};
struct kpp_test_suite {
const struct kpp_testvec *vecs;
unsigned int count;
};
struct alg_test_desc {
const char *alg;
const char *generic_driver;
int (*test)(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask);
int fips_allowed; /* set if alg is allowed in fips mode */
union {
struct aead_test_suite aead;
struct cipher_test_suite cipher;
struct comp_test_suite comp;
struct hash_test_suite hash;
struct cprng_test_suite cprng;
struct drbg_test_suite drbg;
struct akcipher_test_suite akcipher;
struct kpp_test_suite kpp;
} suite;
};
static void hexdump(unsigned char *buf, unsigned int len)
{
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_pages((unsigned long)buf[i], order);
return -ENOMEM;
}
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
return __testmgr_alloc_buf(buf, 0);
}
static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_pages((unsigned long)buf[i], order);
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
__testmgr_free_buf(buf, 0);
}
#define TESTMGR_POISON_BYTE 0xfe
#define TESTMGR_POISON_LEN 16
static inline void testmgr_poison(void *addr, size_t len)
{
memset(addr, TESTMGR_POISON_BYTE, len);
}
/* Is the memory region still fully poisoned? */
static inline bool testmgr_is_poison(const void *addr, size_t len)
{
return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
}
/* flush type for hash algorithms */
enum flush_type {
/* merge with update of previous buffer(s) */
FLUSH_TYPE_NONE = 0,
/* update with previous buffer(s) before doing this one */
FLUSH_TYPE_FLUSH,
/* likewise, but also export and re-import the intermediate state */
FLUSH_TYPE_REIMPORT,
};
/* finalization function for hash algorithms */
enum finalization_type {
FINALIZATION_TYPE_FINAL, /* use final() */
FINALIZATION_TYPE_FINUP, /* use finup() */
FINALIZATION_TYPE_DIGEST, /* use digest() */
};
/*
* Whether the crypto operation will occur in-place, and if so whether the
* source and destination scatterlist pointers will coincide (req->src ==
* req->dst), or whether they'll merely point to two separate scatterlists
* (req->src != req->dst) that reference the same underlying memory.
*
* This is only relevant for algorithm types that support in-place operation.
*/
enum inplace_mode {
OUT_OF_PLACE,
INPLACE_ONE_SGLIST,
INPLACE_TWO_SGLISTS,
};
#define TEST_SG_TOTAL 10000
/**
* struct test_sg_division - description of a scatterlist entry
*
* This struct describes one entry of a scatterlist being constructed to check a
* crypto test vector.
*
* @proportion_of_total: length of this chunk relative to the total length,
* given as a proportion out of TEST_SG_TOTAL so that it
* scales to fit any test vector
* @offset: byte offset into a 2-page buffer at which this chunk will start
* @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the
* @offset
* @flush_type: for hashes, whether an update() should be done now vs.
* continuing to accumulate data
* @nosimd: if doing the pending update(), do it with SIMD disabled?
*/
struct test_sg_division {
unsigned int proportion_of_total;
unsigned int offset;
bool offset_relative_to_alignmask;
enum flush_type flush_type;
bool nosimd;
};
/**
* struct testvec_config - configuration for testing a crypto test vector
*
* This struct describes the data layout and other parameters with which each
* crypto test vector can be tested.
*
* @name: name of this config, logged for debugging purposes if a test fails
* @inplace_mode: whether and how to operate on the data in-place, if applicable
* @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
* @src_divs: description of how to arrange the source scatterlist
* @dst_divs: description of how to arrange the dst scatterlist, if applicable
* for the algorithm type. Defaults to @src_divs if unset.
* @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1],
* where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
* @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @iv_offset
* @key_offset: misalignment of the key, where 0 is default alignment
* @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @key_offset
* @finalization_type: what finalization function to use for hashes
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
struct testvec_config {
const char *name;
enum inplace_mode inplace_mode;
u32 req_flags;
struct test_sg_division src_divs[XBUFSIZE];
struct test_sg_division dst_divs[XBUFSIZE];
unsigned int iv_offset;
unsigned int key_offset;
bool iv_offset_relative_to_alignmask;
bool key_offset_relative_to_alignmask;
enum finalization_type finalization_type;
bool nosimd;
};
#define TESTVEC_CONFIG_NAMELEN 192
/*
* The following are the lists of testvec_configs to test for each algorithm
* type when the basic crypto self-tests are enabled, i.e. when
* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test
* coverage, while keeping the test time much shorter than the full fuzz tests
* so that the basic tests can be enabled in a wider range of circumstances.
*/
/* Configs for skciphers and aeads */
static const struct testvec_config default_cipher_testvec_configs[] = {
{
.name = "in-place (one sglist)",
.inplace_mode = INPLACE_ONE_SGLIST,
.src_divs = { { .proportion_of_total = 10000 } },
}, {
.name = "in-place (two sglists)",
.inplace_mode = INPLACE_TWO_SGLISTS,
.src_divs = { { .proportion_of_total = 10000 } },
}, {
.name = "out-of-place",
.inplace_mode = OUT_OF_PLACE,
.src_divs = { { .proportion_of_total = 10000 } },
}, {
.name = "unaligned buffer, offset=1",
.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
.iv_offset = 1,
.key_offset = 1,
}, {
.name = "buffer aligned only to alignmask",
.src_divs = {
{
.proportion_of_total = 10000,
.offset = 1,
.offset_relative_to_alignmask = true,
},
},
.iv_offset = 1,
.iv_offset_relative_to_alignmask = true,
.key_offset = 1,
.key_offset_relative_to_alignmask = true,
}, {
.name = "two even aligned splits",
.src_divs = {
{ .proportion_of_total = 5000 },
{ .proportion_of_total = 5000 },
},
}, {
.name = "one src, two even splits dst",
.inplace_mode = OUT_OF_PLACE,
.src_divs = { { .proportion_of_total = 10000 } },
.dst_divs = {
{ .proportion_of_total = 5000 },
{ .proportion_of_total = 5000 },
},
}, {
.name = "uneven misaligned splits, may sleep",
.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
.src_divs = {
{ .proportion_of_total = 1900, .offset = 33 },
{ .proportion_of_total = 3300, .offset = 7 },
{ .proportion_of_total = 4800, .offset = 18 },
},
.iv_offset = 3,
.key_offset = 3,
}, {
.name = "misaligned splits crossing pages, inplace",
.inplace_mode = INPLACE_ONE_SGLIST,
.src_divs = {
{
.proportion_of_total = 7500,
.offset = PAGE_SIZE - 32
}, {
.proportion_of_total = 2500,
.offset = PAGE_SIZE - 7
},
},
}
};
static const struct testvec_config default_hash_testvec_configs[] = {
{
.name = "init+update+final aligned buffer",
.src_divs = { { .proportion_of_total = 10000 } },
.finalization_type = FINALIZATION_TYPE_FINAL,
}, {
.name = "init+finup aligned buffer",
.src_divs = { { .proportion_of_total = 10000 } },
.finalization_type = FINALIZATION_TYPE_FINUP,
}, {
.name = "digest aligned buffer",
.src_divs = { { .proportion_of_total = 10000 } },
.finalization_type = FINALIZATION_TYPE_DIGEST,
}, {
.name = "init+update+final misaligned buffer",
.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
.finalization_type = FINALIZATION_TYPE_FINAL,
.key_offset = 1,
}, {
.name = "digest buffer aligned only to alignmask",
.src_divs = {
{
.proportion_of_total = 10000,
.offset = 1,
.offset_relative_to_alignmask = true,
},
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
.key_offset = 1,
.key_offset_relative_to_alignmask = true,
}, {
.name = "init+update+update+final two even splits",
.src_divs = {
{ .proportion_of_total = 5000 },
{
.proportion_of_total = 5000,
.flush_type = FLUSH_TYPE_FLUSH,
},
},
.finalization_type = FINALIZATION_TYPE_FINAL,
}, {
.name = "digest uneven misaligned splits, may sleep",
.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
.src_divs = {
{ .proportion_of_total = 1900, .offset = 33 },
{ .proportion_of_total = 3300, .offset = 7 },
{ .proportion_of_total = 4800, .offset = 18 },
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
}, {
.name = "digest misaligned splits crossing pages",
.src_divs = {
{
.proportion_of_total = 7500,
.offset = PAGE_SIZE - 32,
}, {
.proportion_of_total = 2500,
.offset = PAGE_SIZE - 7,
},
},
.finalization_type = FINALIZATION_TYPE_DIGEST,
}, {
.name = "import/export",
.src_divs = {
{
.proportion_of_total = 6500,
.flush_type = FLUSH_TYPE_REIMPORT,
}, {
.proportion_of_total = 3500,
.flush_type = FLUSH_TYPE_REIMPORT,
},
},
.finalization_type = FINALIZATION_TYPE_FINAL,
}
};
static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
{
unsigned int remaining = TEST_SG_TOTAL;
unsigned int ndivs = 0;
do {
remaining -= divs[ndivs++].proportion_of_total;
} while (remaining);
return ndivs;
}
#define SGDIVS_HAVE_FLUSHES BIT(0)
#define SGDIVS_HAVE_NOSIMD BIT(1)
static bool valid_sg_divisions(const struct test_sg_division *divs,
unsigned int count, int *flags_ret)
{
unsigned int total = 0;
unsigned int i;
for (i = 0; i < count && total != TEST_SG_TOTAL; i++) {
if (divs[i].proportion_of_total <= 0 ||
divs[i].proportion_of_total > TEST_SG_TOTAL - total)
return false;
total += divs[i].proportion_of_total;
if (divs[i].flush_type != FLUSH_TYPE_NONE)
*flags_ret |= SGDIVS_HAVE_FLUSHES;
if (divs[i].nosimd)
*flags_ret |= SGDIVS_HAVE_NOSIMD;
}
return total == TEST_SG_TOTAL &&
memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
}
/*
* Check whether the given testvec_config is valid. This isn't strictly needed
* since every testvec_config should be valid, but check anyway so that people
* don't unknowingly add broken configs that don't do what they wanted.
*/
static bool valid_testvec_config(const struct testvec_config *cfg)
{
int flags = 0;
if (cfg->name == NULL)
return false;
if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
&flags))
return false;
if (cfg->dst_divs[0].proportion_of_total) {
if (!valid_sg_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs), &flags))
return false;
} else {
if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
return false;
/* defaults to dst_divs=src_divs */
}
if (cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) >
MAX_ALGAPI_ALIGNMASK + 1)
return false;
if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
return false;
if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
return false;
return true;
}
struct test_sglist {
char *bufs[XBUFSIZE];
struct scatterlist sgl[XBUFSIZE];
struct scatterlist sgl_saved[XBUFSIZE];
struct scatterlist *sgl_ptr;
unsigned int nents;
};
static int init_test_sglist(struct test_sglist *tsgl)
{
return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
}
static void destroy_test_sglist(struct test_sglist *tsgl)
{
return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
}
/**
* build_test_sglist() - build a scatterlist for a crypto test
*
* @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page
* buffers which the scatterlist @tsgl->sgl[] will be made to point into.
* @divs: the layout specification on which the scatterlist will be based
* @alignmask: the algorithm's alignmask
* @total_len: the total length of the scatterlist to build in bytes
* @data: if non-NULL, the buffers will be filled with this data until it ends.
* Otherwise the buffers will be poisoned. In both cases, some bytes
* past the end of each buffer will be poisoned to help detect overruns.
* @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry
* corresponds will be returned here. This will match @divs except
* that divisions resolving to a length of 0 are omitted as they are
* not included in the scatterlist.
*
* Return: 0 or a -errno value
*/
static int build_test_sglist(struct test_sglist *tsgl,
const struct test_sg_division *divs,
const unsigned int alignmask,
const unsigned int total_len,
struct iov_iter *data,
const struct test_sg_division *out_divs[XBUFSIZE])
{
struct {
const struct test_sg_division *div;
size_t length;
} partitions[XBUFSIZE];
const unsigned int ndivs = count_test_sg_divisions(divs);
unsigned int len_remaining = total_len;
unsigned int i;
BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
return -EINVAL;
/* Calculate the (div, length) pairs */
tsgl->nents = 0;
for (i = 0; i < ndivs; i++) {
unsigned int len_this_sg =
min(len_remaining,
(total_len * divs[i].proportion_of_total +
TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
if (len_this_sg != 0) {
partitions[tsgl->nents].div = &divs[i];
partitions[tsgl->nents].length = len_this_sg;
tsgl->nents++;
len_remaining -= len_this_sg;
}
}
if (tsgl->nents == 0) {
partitions[tsgl->nents].div = &divs[0];
partitions[tsgl->nents].length = 0;
tsgl->nents++;
}
partitions[tsgl->nents - 1].length += len_remaining;
/* Set up the sgl entries and fill the data or poison */
sg_init_table(tsgl->sgl, tsgl->nents);
for (i = 0; i < tsgl->nents; i++) {
unsigned int offset = partitions[i].div->offset;
void *addr;
if (partitions[i].div->offset_relative_to_alignmask)
offset += alignmask;
while (offset + partitions[i].length + TESTMGR_POISON_LEN >
2 * PAGE_SIZE) {
if (WARN_ON(offset <= 0))
return -EINVAL;
offset /= 2;
}
addr = &tsgl->bufs[i][offset];
sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length);
if (out_divs)
out_divs[i] = partitions[i].div;
if (data) {
size_t copy_len, copied;
copy_len = min(partitions[i].length, data->count);
copied = copy_from_iter(addr, copy_len, data);
if (WARN_ON(copied != copy_len))
return -EINVAL;
testmgr_poison(addr + copy_len, partitions[i].length +
TESTMGR_POISON_LEN - copy_len);
} else {
testmgr_poison(addr, partitions[i].length +
TESTMGR_POISON_LEN);
}
}
sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
tsgl->sgl_ptr = tsgl->sgl;
memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
return 0;
}
/*
* Verify that a scatterlist crypto operation produced the correct output.
*
* @tsgl: scatterlist containing the actual output
* @expected_output: buffer containing the expected output
* @len_to_check: length of @expected_output in bytes
* @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result
* @check_poison: verify that the poison bytes after each chunk are intact?
*
* Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
*/
static int verify_correct_output(const struct test_sglist *tsgl,
const char *expected_output,
unsigned int len_to_check,
unsigned int unchecked_prefix_len,
bool check_poison)
{
unsigned int i;
for (i = 0; i < tsgl->nents; i++) {
struct scatterlist *sg = &tsgl->sgl_ptr[i];
unsigned int len = sg->length;
unsigned int offset = sg->offset;
const char *actual_output;
if (unchecked_prefix_len) {
if (unchecked_prefix_len >= len) {
unchecked_prefix_len -= len;
continue;
}
offset += unchecked_prefix_len;
len -= unchecked_prefix_len;
unchecked_prefix_len = 0;
}
len = min(len, len_to_check);
actual_output = page_address(sg_page(sg)) + offset;
if (memcmp(expected_output, actual_output, len) != 0)
return -EINVAL;
if (check_poison &&
!testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN))
return -EOVERFLOW;
len_to_check -= len;
expected_output += len;
}
if (WARN_ON(len_to_check != 0))
return -EINVAL;
return 0;
}
static bool is_test_sglist_corrupted(const struct test_sglist *tsgl)
{
unsigned int i;
for (i = 0; i < tsgl->nents; i++) {
if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link)
return true;
if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset)
return true;
if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length)
return true;
}
return false;
}
struct cipher_test_sglists {
struct test_sglist src;
struct test_sglist dst;
};
static struct cipher_test_sglists *alloc_cipher_test_sglists(void)
{
struct cipher_test_sglists *tsgls;
tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL);
if (!tsgls)
return NULL;
if (init_test_sglist(&tsgls->src) != 0)
goto fail_kfree;
if (init_test_sglist(&tsgls->dst) != 0)
goto fail_destroy_src;
return tsgls;
fail_destroy_src:
destroy_test_sglist(&tsgls->src);
fail_kfree:
kfree(tsgls);
return NULL;
}
static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls)
{
if (tsgls) {
destroy_test_sglist(&tsgls->src);
destroy_test_sglist(&tsgls->dst);
kfree(tsgls);
}
}
/* Build the src and dst scatterlists for an skcipher or AEAD test */
static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
const struct testvec_config *cfg,
unsigned int alignmask,
unsigned int src_total_len,
unsigned int dst_total_len,
const struct kvec *inputs,
unsigned int nr_inputs)
{
struct iov_iter input;
int err;
iov_iter_kvec(&input, ITER_SOURCE, inputs, nr_inputs, src_total_len);
err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
cfg->inplace_mode != OUT_OF_PLACE ?
max(dst_total_len, src_total_len) :
src_total_len,
&input, NULL);
if (err)
return err;
/*
* In-place crypto operations can use the same scatterlist for both the
* source and destination (req->src == req->dst), or can use separate
* scatterlists (req->src != req->dst) which point to the same
* underlying memory. Make sure to test both cases.
*/
if (cfg->inplace_mode == INPLACE_ONE_SGLIST) {
tsgls->dst.sgl_ptr = tsgls->src.sgl;
tsgls->dst.nents = tsgls->src.nents;
return 0;
}
if (cfg->inplace_mode == INPLACE_TWO_SGLISTS) {
/*
* For now we keep it simple and only test the case where the
* two scatterlists have identical entries, rather than
* different entries that split up the same memory differently.
*/
memcpy(tsgls->dst.sgl, tsgls->src.sgl,
tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
memcpy(tsgls->dst.sgl_saved, tsgls->src.sgl,
tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
tsgls->dst.sgl_ptr = tsgls->dst.sgl;
tsgls->dst.nents = tsgls->src.nents;
return 0;
}
/* Out of place */
return build_test_sglist(&tsgls->dst,
cfg->dst_divs[0].proportion_of_total ?
cfg->dst_divs : cfg->src_divs,
alignmask, dst_total_len, NULL, NULL);
}
/*
* Support for testing passing a misaligned key to setkey():
*
* If cfg->key_offset is set, copy the key into a new buffer at that offset,
* optionally adding alignmask. Else, just use the key directly.
*/
static int prepare_keybuf(const u8 *key, unsigned int ksize,
const struct testvec_config *cfg,
unsigned int alignmask,
const u8 **keybuf_ret, const u8 **keyptr_ret)
{
unsigned int key_offset = cfg->key_offset;
u8 *keybuf = NULL, *keyptr = (u8 *)key;
if (key_offset != 0) {
if (cfg->key_offset_relative_to_alignmask)
key_offset += alignmask;
keybuf = kmalloc(key_offset + ksize, GFP_KERNEL);
if (!keybuf)
return -ENOMEM;
keyptr = keybuf + key_offset;
memcpy(keyptr, key, ksize);
}
*keybuf_ret = keybuf;
*keyptr_ret = keyptr;
return 0;
}
/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */
#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \
({ \
const u8 *keybuf, *keyptr; \
int err; \
\
err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \
&keybuf, &keyptr); \
if (err == 0) { \
err = setkey_f((tfm), keyptr, (ksize)); \
kfree(keybuf); \
} \
err; \
})
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* The fuzz tests use prandom instead of the normal Linux RNG since they don't
* need cryptographically secure random numbers. This greatly improves the
* performance of these tests, especially if they are run before the Linux RNG
* has been initialized or if they are run on a lockdep-enabled kernel.
*/
static inline void init_rnd_state(struct rnd_state *rng)
{
prandom_seed_state(rng, get_random_u64());
}
static inline u8 prandom_u8(struct rnd_state *rng)
{
return prandom_u32_state(rng);
}
static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
{
/*
* This is slightly biased for non-power-of-2 values of 'ceil', but this
* isn't important here.
*/
return prandom_u32_state(rng) % ceil;
}
static inline bool prandom_bool(struct rnd_state *rng)
{
return prandom_u32_below(rng, 2);
}
static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
u32 floor, u32 ceil)
{
return floor + prandom_u32_below(rng, ceil - floor + 1);
}
/* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(struct rnd_state *rng,
unsigned int max_len)
{
unsigned int len = prandom_u32_below(rng, max_len + 1);
switch (prandom_u32_below(rng, 4)) {
case 0:
return len % 64;
case 1:
return len % 256;
case 2:
return len % 1024;
default:
return len;
}
}
/* Flip a random bit in the given nonempty data buffer */
static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t bitpos;
bitpos = prandom_u32_below(rng, size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8);
}
/* Flip a random byte in the given nonempty data buffer */
static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
{
buf[prandom_u32_below(rng, size)] ^= 0xff;
}
/* Sometimes make some random changes to the given nonempty data buffer */
static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
{
size_t num_flips;
size_t i;
/* Sometimes flip some bits */
if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
size * 8);
for (i = 0; i < num_flips; i++)
flip_random_bit(rng, buf, size);
}
/* Sometimes flip some bytes */
if (prandom_u32_below(rng, 4) == 0) {
num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
for (i = 0; i < num_flips; i++)
flip_random_byte(rng, buf, size);
}
}
/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
{
u8 b;
u8 increment;
size_t i;
if (count == 0)
return;
switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
case 0:
case 1:
/* All the same byte, plus optional mutations */
switch (prandom_u32_below(rng, 4)) {
case 0:
b = 0x00;
break;
case 1:
b = 0xff;
break;
default:
b = prandom_u8(rng);
break;
}
memset(buf, b, count);
mutate_buffer(rng, buf, count);
break;
case 2:
/* Ascending or descending bytes, plus optional mutations */
increment = prandom_u8(rng);
b = prandom_u8(rng);
for (i = 0; i < count; i++, b += increment)
buf[i] = b;
mutate_buffer(rng, buf, count);
break;
default:
/* Fully random bytes */
prandom_bytes_state(rng, buf, count);
}
}
static char *generate_random_sgl_divisions(struct rnd_state *rng,
struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
bool gen_flushes, u32 req_flags)
{
struct test_sg_division *div = divs;
unsigned int remaining = TEST_SG_TOTAL;
do {
unsigned int this_len;
const char *flushtype_str;
if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
else
this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
if (prandom_u32_below(rng, 4) == 0)
div->offset = prandom_u32_inclusive(rng,
PAGE_SIZE - 128,
PAGE_SIZE - 1);
else if (prandom_bool(rng))
div->offset = prandom_u32_below(rng, 32);
else
div->offset = prandom_u32_below(rng, PAGE_SIZE);
if (prandom_u32_below(rng, 8) == 0)
div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) {
switch (prandom_u32_below(rng, 4)) {
case 0:
div->flush_type = FLUSH_TYPE_REIMPORT;
break;
case 1:
div->flush_type = FLUSH_TYPE_FLUSH;
break;
}
}
if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
prandom_bool(rng))
div->nosimd = true;
switch (div->flush_type) {
case FLUSH_TYPE_FLUSH:
if (div->nosimd)
flushtype_str = "<flush,nosimd>";
else
flushtype_str = "<flush>";
break;
case FLUSH_TYPE_REIMPORT:
if (div->nosimd)
flushtype_str = "<reimport,nosimd>";
else
flushtype_str = "<reimport>";
break;
default:
flushtype_str = "";
break;
}
BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
this_len / 100, this_len % 100,
div->offset_relative_to_alignmask ?
"alignmask" : "",
div->offset, this_len == remaining ? "" : ", ");
remaining -= this_len;
div++;
} while (remaining);
return p;
}
/* Generate a random testvec_config for fuzz testing */
static void generate_random_testvec_config(struct rnd_state *rng,
struct testvec_config *cfg,
char *name, size_t max_namelen)
{
char *p = name;
char * const end = name + max_namelen;
memset(cfg, 0, sizeof(*cfg));
cfg->name = name;
p += scnprintf(p, end - p, "random:");
switch (prandom_u32_below(rng, 4)) {
case 0:
case 1:
cfg->inplace_mode = OUT_OF_PLACE;
break;
case 2:
cfg->inplace_mode = INPLACE_ONE_SGLIST;
p += scnprintf(p, end - p, " inplace_one_sglist");
break;
default:
cfg->inplace_mode = INPLACE_TWO_SGLISTS;
p += scnprintf(p, end - p, " inplace_two_sglists");
break;
}
if (prandom_bool(rng)) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep");
}
switch (prandom_u32_below(rng, 4)) {
case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final");
break;
case 1:
cfg->finalization_type = FINALIZATION_TYPE_FINUP;
p += scnprintf(p, end - p, " use_finup");
break;
default:
cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
p += scnprintf(p, end - p, " use_digest");
break;
}
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
p += scnprintf(p, end - p, " src_divs=[");
p = generate_random_sgl_divisions(rng, cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
FINALIZATION_TYPE_DIGEST),
cfg->req_flags);
p += scnprintf(p, end - p, "]");
if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(rng, cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
p, end, false,
cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
if (prandom_bool(rng)) {
cfg->iv_offset = prandom_u32_inclusive(rng, 1,
MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
}
if (prandom_bool(rng)) {
cfg->key_offset = prandom_u32_inclusive(rng, 1,
MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
}
WARN_ON_ONCE(!valid_testvec_config(cfg));
}
static void crypto_disable_simd_for_test(void)
{
migrate_disable();
__this_cpu_write(crypto_simd_disabled_for_test, true);
}
static void crypto_reenable_simd_for_test(void)
{
__this_cpu_write(crypto_simd_disabled_for_test, false);
migrate_enable();
}
/*
* Given an algorithm name, build the name of the generic implementation of that
* algorithm, assuming the usual naming convention. Specifically, this appends
* "-generic" to every part of the name that is not a template name. Examples:
*
* aes => aes-generic
* cbc(aes) => cbc(aes-generic)
* cts(cbc(aes)) => cts(cbc(aes-generic))
* rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
*
* Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
*/
static int build_generic_driver_name(const char *algname,
char driver_name[CRYPTO_MAX_ALG_NAME])
{
const char *in = algname;
char *out = driver_name;
size_t len = strlen(algname);
if (len >= CRYPTO_MAX_ALG_NAME)
goto too_long;
do {
const char *in_saved = in;
while (*in && *in != '(' && *in != ')' && *in != ',')
*out++ = *in++;
if (*in != '(' && in > in_saved) {
len += 8;
if (len >= CRYPTO_MAX_ALG_NAME)
goto too_long;
memcpy(out, "-generic", 8);
out += 8;
}
} while ((*out++ = *in++) != '\0');
return 0;
too_long:
pr_err("alg: generic driver name for \"%s\" would be too long\n",
algname);
return -ENAMETOOLONG;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static void crypto_disable_simd_for_test(void)
{
}
static void crypto_reenable_simd_for_test(void)
{
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int build_hash_sglist(struct test_sglist *tsgl,
const struct hash_testvec *vec,
const struct testvec_config *cfg,
unsigned int alignmask,
const struct test_sg_division *divs[XBUFSIZE])
{
struct kvec kv;
struct iov_iter input;
kv.iov_base = (void *)vec->plaintext;
kv.iov_len = vec->psize;
iov_iter_kvec(&input, ITER_SOURCE, &kv, 1, vec->psize);
return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
&input, divs);
}
static int check_hash_result(const char *type,
const u8 *result, unsigned int digestsize,
const struct hash_testvec *vec,
const char *vec_name,
const char *driver,
const struct testvec_config *cfg)
{
if (memcmp(result, vec->digest, digestsize) != 0) {
pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
type, driver, vec_name, cfg->name);
return -EINVAL;
}
if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
type, driver, vec_name, cfg->name);
return -EOVERFLOW;
}
return 0;
}
static inline int check_shash_op(const char *op, int err,
const char *driver, const char *vec_name,
const struct testvec_config *cfg)
{
if (err)
pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
driver, op, err, vec_name, cfg->name);
return err;
}
/* Test one hash test vector in one configuration, using the shash API */
static int test_shash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
struct crypto_shash *tfm = desc->tfm;
const unsigned int alignmask = crypto_shash_alignmask(tfm);
const unsigned int digestsize = crypto_shash_digestsize(tfm);
const unsigned int statesize = crypto_shash_statesize(tfm);
const char *driver = crypto_shash_driver_name(tfm);
const struct test_sg_division *divs[XBUFSIZE];
unsigned int i;
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
int err;
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize,
cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
crypto_shash_get_flags(tfm));
return err;
}
if (vec->setkey_error) {
pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setkey_error);
return -EINVAL;
}
}
/* Build the scatterlist for the source data */
err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
if (err) {
pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
return err;
}
/* Do the actual hashing */
testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
vec->digest_error) {
/* Just using digest() */
if (tsgl->nents != 1)
return 0;
if (cfg->nosimd)
crypto_disable_simd_for_test();
err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
tsgl->sgl[0].length, result);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
if (err) {
if (err == vec->digest_error)
return 0;
pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
driver, vec_name, vec->digest_error, err,
cfg->name);
return err;
}
if (vec->digest_error) {
pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
driver, vec_name, vec->digest_error, cfg->name);
return -EINVAL;
}
goto result_ready;
}
/* Using init(), zero or more update(), then final() or finup() */
if (cfg->nosimd)
crypto_disable_simd_for_test();
err = crypto_shash_init(desc);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
err = check_shash_op("init", err, driver, vec_name, cfg);
if (err)
return err;
for (i = 0; i < tsgl->nents; i++) {
if (i + 1 == tsgl->nents &&
cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length, result);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
err = check_shash_op("finup", err, driver, vec_name,
cfg);
if (err)
return err;
goto result_ready;
}
if (divs[i]->nosimd)
crypto_disable_simd_for_test();
err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length);
if (divs[i]->nosimd)
crypto_reenable_simd_for_test();
err = check_shash_op("update", err, driver, vec_name, cfg);
if (err)
return err;
if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
/* Test ->export() and ->import() */
testmgr_poison(hashstate + statesize,
TESTMGR_POISON_LEN);
err = crypto_shash_export(desc, hashstate);
err = check_shash_op("export", err, driver, vec_name,
cfg);
if (err)
return err;
if (!testmgr_is_poison(hashstate + statesize,
TESTMGR_POISON_LEN)) {
pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
return -EOVERFLOW;
}
testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
err = crypto_shash_import(desc, hashstate);
err = check_shash_op("import", err, driver, vec_name,
cfg);
if (err)
return err;
}
}
if (cfg->nosimd)
crypto_disable_simd_for_test();
err = crypto_shash_final(desc, result);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
err = check_shash_op("final", err, driver, vec_name, cfg);
if (err)
return err;
result_ready:
return check_hash_result("shash", result, digestsize, vec, vec_name,
driver, cfg);
}
static int do_ahash_op(int (*op)(struct ahash_request *req),
struct ahash_request *req,
struct crypto_wait *wait, bool nosimd)
{
int err;
if (nosimd)
crypto_disable_simd_for_test();
err = op(req);
if (nosimd)
crypto_reenable_simd_for_test();
return crypto_wait_req(err, wait);
}
static int check_nonfinal_ahash_op(const char *op, int err,
u8 *result, unsigned int digestsize,
const char *driver, const char *vec_name,
const struct testvec_config *cfg)
{
if (err) {
pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
driver, op, err, vec_name, cfg->name);
return err;
}
if (!testmgr_is_poison(result, digestsize)) {
pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return -EINVAL;
}
return 0;
}
/* Test one hash test vector in one configuration, using the ahash API */
static int test_ahash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
struct test_sglist *tsgl,
u8 *hashstate)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int alignmask = crypto_ahash_alignmask(tfm);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int statesize = crypto_ahash_statesize(tfm);
const char *driver = crypto_ahash_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const struct test_sg_division *divs[XBUFSIZE];
DECLARE_CRYPTO_WAIT(wait);
unsigned int i;
struct scatterlist *pending_sgl;
unsigned int pending_len;
u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
int err;
/* Set the key, if specified */
if (vec->ksize) {
err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
crypto_ahash_get_flags(tfm));
return err;
}
if (vec->setkey_error) {
pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setkey_error);
return -EINVAL;
}
}
/* Build the scatterlist for the source data */
err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
if (err) {
pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
return err;
}
/* Do the actual hashing */
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
vec->digest_error) {
/* Just using digest() */
ahash_request_set_callback(req, req_flags, crypto_req_done,
&wait);
ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
if (err) {
if (err == vec->digest_error)
return 0;
pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
driver, vec_name, vec->digest_error, err,
cfg->name);
return err;
}
if (vec->digest_error) {
pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
driver, vec_name, vec->digest_error, cfg->name);
return -EINVAL;
}
goto result_ready;
}
/* Using init(), zero or more update(), then final() or finup() */
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, NULL, result, 0);
err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
err = check_nonfinal_ahash_op("init", err, result, digestsize,
driver, vec_name, cfg);
if (err)
return err;
pending_sgl = NULL;
pending_len = 0;
for (i = 0; i < tsgl->nents; i++) {
if (divs[i]->flush_type != FLUSH_TYPE_NONE &&
pending_sgl != NULL) {
/* update() with the pending data */
ahash_request_set_callback(req, req_flags,
crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result,
pending_len);
err = do_ahash_op(crypto_ahash_update, req, &wait,
divs[i]->nosimd);
err = check_nonfinal_ahash_op("update", err,
result, digestsize,
driver, vec_name, cfg);
if (err)
return err;
pending_sgl = NULL;
pending_len = 0;
}
if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
/* Test ->export() and ->import() */
testmgr_poison(hashstate + statesize,
TESTMGR_POISON_LEN);
err = crypto_ahash_export(req, hashstate);
err = check_nonfinal_ahash_op("export", err,
result, digestsize,
driver, vec_name, cfg);
if (err)
return err;
if (!testmgr_is_poison(hashstate + statesize,
TESTMGR_POISON_LEN)) {
pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
driver, vec_name, cfg->name);
return -EOVERFLOW;
}
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
err = crypto_ahash_import(req, hashstate);
err = check_nonfinal_ahash_op("import", err,
result, digestsize,
driver, vec_name, cfg);
if (err)
return err;
}
if (pending_sgl == NULL)
pending_sgl = &tsgl->sgl[i];
pending_len += tsgl->sgl[i].length;
}
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result, pending_len);
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
/* finish with update() and final() */
err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
err = check_nonfinal_ahash_op("update", err, result, digestsize,
driver, vec_name, cfg);
if (err)
return err;
err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
if (err) {
pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
driver, err, vec_name, cfg->name);
return err;
}
} else {
/* finish with finup() */
err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
if (err) {
pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
driver, err, vec_name, cfg->name);
return err;
}
}
result_ready:
return check_hash_result("ahash", result, digestsize, vec, vec_name,
driver, cfg);
}
static int test_hash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
int err;
/*
* For algorithms implemented as "shash", most bugs will be detected by
* both the shash and ahash tests. Test the shash API first so that the
* failures involve less indirection, so are easier to debug.
*/
if (desc) {
err = test_shash_vec_cfg(vec, vec_name, cfg, desc, tsgl,
hashstate);
if (err)
return err;
}
return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate);
}
static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
struct ahash_request *req, struct shash_desc *desc,
struct test_sglist *tsgl, u8 *hashstate)
{
char vec_name[16];
unsigned int i;
int err;
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
err = test_hash_vec_cfg(vec, vec_name,
&default_hash_testvec_configs[i],
req, desc, tsgl, hashstate);
if (err)
return err;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(vec, vec_name, &cfg,
req, desc, tsgl, hashstate);
if (err)
return err;
cond_resched();
}
}
#endif
return 0;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a hash test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_hash_testvec(struct rnd_state *rng,
struct shash_desc *desc,
struct hash_testvec *vec,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
/* Data */
vec->psize = generate_random_length(rng, maxdatasize);
generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
/*
* Key: length in range [1, maxkeysize], but usually choose maxkeysize.
* If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
*/
vec->setkey_error = 0;
vec->ksize = 0;
if (maxkeysize) {
vec->ksize = maxkeysize;
if (prandom_u32_below(rng, 4) == 0)
vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
vec->ksize);
/* If the key couldn't be set, no need to continue to digest. */
if (vec->setkey_error)
goto done;
}
/* Digest */
vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
vec->psize, (u8 *)vec->digest);
done:
snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
vec->psize, vec->ksize);
}
/*
* Test the hash algorithm represented by @req against the corresponding generic
* implementation, if one is available.
*/
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
const unsigned int digestsize = crypto_ahash_digestsize(tfm);
const unsigned int blocksize = crypto_ahash_blocksize(tfm);
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
const char *driver = crypto_ahash_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_shash *generic_tfm = NULL;
struct shash_desc *generic_desc = NULL;
unsigned int i;
struct hash_testvec vec = { 0 };
char vec_name[64];
struct testvec_config *cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
if (noextratests)
return 0;
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
return err;
generic_driver = _generic_driver;
}
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
driver, generic_driver);
return 0;
}
pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
generic_driver, algname, err);
return err;
}
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
err = -ENOMEM;
goto out;
}
generic_desc = kzalloc(sizeof(*desc) +
crypto_shash_descsize(generic_tfm), GFP_KERNEL);
if (!generic_desc) {
err = -ENOMEM;
goto out;
}
generic_desc->tfm = generic_tfm;
/* Check the algorithm properties for consistency. */
if (digestsize != crypto_shash_digestsize(generic_tfm)) {
pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
driver, digestsize,
crypto_shash_digestsize(generic_tfm));
err = -EINVAL;
goto out;
}
if (blocksize != crypto_shash_blocksize(generic_tfm)) {
pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
driver, blocksize, crypto_shash_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
/*
* Now generate test vectors using the generic implementation, and test
* the other implementation against them.
*/
vec.key = kmalloc(maxkeysize, GFP_KERNEL);
vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
vec.digest = kmalloc(digestsize, GFP_KERNEL);
if (!vec.key || !vec.plaintext || !vec.digest) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_hash_testvec(&rng, generic_desc, &vec,
maxkeysize, maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(&rng, cfg, cfgname,
sizeof(cfgname));
err = test_hash_vec_cfg(&vec, vec_name, cfg,
req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
}
err = 0;
out:
kfree(cfg);
kfree(vec.key);
kfree(vec.plaintext);
kfree(vec.digest);
crypto_free_shash(generic_tfm);
kfree_sensitive(generic_desc);
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_hash_vs_generic_impl(const char *generic_driver,
unsigned int maxkeysize,
struct ahash_request *req,
struct shash_desc *desc,
struct test_sglist *tsgl,
u8 *hashstate)
{
return 0;
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int alloc_shash(const char *driver, u32 type, u32 mask,
struct crypto_shash **tfm_ret,
struct shash_desc **desc_ret)
{
struct crypto_shash *tfm;
struct shash_desc *desc;
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
/*
* This algorithm is only available through the ahash
* API, not the shash API, so skip the shash tests.
*/
return 0;
}
pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
if (!desc) {
crypto_free_shash(tfm);
return -ENOMEM;
}
desc->tfm = tfm;
*tfm_ret = tfm;
*desc_ret = desc;
return 0;
}
static int __alg_test_hash(const struct hash_testvec *vecs,
unsigned int num_vecs, const char *driver,
u32 type, u32 mask,
const char *generic_driver, unsigned int maxkeysize)
{
struct crypto_ahash *atfm = NULL;
struct ahash_request *req = NULL;
struct crypto_shash *stfm = NULL;
struct shash_desc *desc = NULL;
struct test_sglist *tsgl = NULL;
u8 *hashstate = NULL;
unsigned int statesize;
unsigned int i;
int err;
/*
* Always test the ahash API. This works regardless of whether the
* algorithm is implemented as ahash or shash.
*/
atfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(atfm)) {
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(atfm));
return PTR_ERR(atfm);
}
driver = crypto_ahash_driver_name(atfm);
req = ahash_request_alloc(atfm, GFP_KERNEL);
if (!req) {
pr_err("alg: hash: failed to allocate request for %s\n",
driver);
err = -ENOMEM;
goto out;
}
/*
* If available also test the shash API, to cover corner cases that may
* be missed by testing the ahash API only.
*/
err = alloc_shash(driver, type, mask, &stfm, &desc);
if (err)
goto out;
tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
if (!tsgl || init_test_sglist(tsgl) != 0) {
pr_err("alg: hash: failed to allocate test buffers for %s\n",
driver);
kfree(tsgl);
tsgl = NULL;
err = -ENOMEM;
goto out;
}
statesize = crypto_ahash_statesize(atfm);
if (stfm)
statesize = max(statesize, crypto_shash_statesize(stfm));
hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL);
if (!hashstate) {
pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
driver);
err = -ENOMEM;
goto out;
}
for (i = 0; i < num_vecs; i++) {
if (fips_enabled && vecs[i].fips_skip)
continue;
err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
if (err)
goto out;
cond_resched();
}
err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req,
desc, tsgl, hashstate);
out:
kfree(hashstate);
if (tsgl) {
destroy_test_sglist(tsgl);
kfree(tsgl);
}
kfree(desc);
crypto_free_shash(stfm);
ahash_request_free(req);
crypto_free_ahash(atfm);
return err;
}
static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
const struct hash_testvec *template = desc->suite.hash.vecs;
unsigned int tcount = desc->suite.hash.count;
unsigned int nr_unkeyed, nr_keyed;
unsigned int maxkeysize = 0;
int err;
/*
* For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
* first, before setting a key on the tfm. To make this easier, we
* require that the unkeyed test vectors (if any) are listed first.
*/
for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
if (template[nr_unkeyed].ksize)
break;
}
for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
if (!template[nr_unkeyed + nr_keyed].ksize) {
pr_err("alg: hash: test vectors for %s out of order, "
"unkeyed ones must come first\n", desc->alg);
return -EINVAL;
}
maxkeysize = max_t(unsigned int, maxkeysize,
template[nr_unkeyed + nr_keyed].ksize);
}
err = 0;
if (nr_unkeyed) {
err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
desc->generic_driver, maxkeysize);
template += nr_unkeyed;
}
if (!err && nr_keyed)
err = __alg_test_hash(template, nr_keyed, driver, type, mask,
desc->generic_driver, maxkeysize);
return err;
}
static int test_aead_vec_cfg(int enc, const struct aead_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
const unsigned int alignmask = crypto_aead_alignmask(tfm);
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const char *driver = crypto_aead_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const char *op = enc ? "encryption" : "decryption";
DECLARE_CRYPTO_WAIT(wait);
u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
struct kvec input[2];
int err;
/* Set the key */
if (vec->wk)
crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
else
crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = do_setkey(crypto_aead_setkey, tfm, vec->key, vec->klen,
cfg, alignmask);
if (err && err != vec->setkey_error) {
pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
crypto_aead_get_flags(tfm));
return err;
}
if (!err && vec->setkey_error) {
pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setkey_error);
return -EINVAL;
}
/* Set the authentication tag size */
err = crypto_aead_setauthsize(tfm, authsize);
if (err && err != vec->setauthsize_error) {
pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
driver, vec_name, vec->setauthsize_error, err);
return err;
}
if (!err && vec->setauthsize_error) {
pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setauthsize_error);
return -EINVAL;
}
if (vec->setkey_error || vec->setauthsize_error)
return 0;
/* The IV must be copied to a buffer, as the algorithm may modify it */
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
/* Build the src/dst scatterlists */
input[0].iov_base = (void *)vec->assoc;
input[0].iov_len = vec->alen;
input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
input[1].iov_len = enc ? vec->plen : vec->clen;
err = build_cipher_test_sglists(tsgls, cfg, alignmask,
vec->alen + (enc ? vec->plen :
vec->clen),
vec->alen + (enc ? vec->clen :
vec->plen),
input, 2);
if (err) {
pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
/* Do the actual encryption or decryption */
testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm));
aead_request_set_callback(req, req_flags, crypto_req_done, &wait);
aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
enc ? vec->plen : vec->clen, iv);
aead_request_set_ad(req, vec->alen);
if (cfg->nosimd)
crypto_disable_simd_for_test();
err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
req->assoclen != vec->alen ||
req->iv != iv ||
req->src != tsgls->src.sgl_ptr ||
req->dst != tsgls->dst.sgl_ptr ||
crypto_aead_reqtfm(req) != tfm ||
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
if (req->cryptlen != (enc ? vec->plen : vec->clen))
pr_err("alg: aead: changed 'req->cryptlen'\n");
if (req->assoclen != vec->alen)
pr_err("alg: aead: changed 'req->assoclen'\n");
if (req->iv != iv)
pr_err("alg: aead: changed 'req->iv'\n");
if (req->src != tsgls->src.sgl_ptr)
pr_err("alg: aead: changed 'req->src'\n");
if (req->dst != tsgls->dst.sgl_ptr)
pr_err("alg: aead: changed 'req->dst'\n");
if (crypto_aead_reqtfm(req) != tfm)
pr_err("alg: aead: changed 'req->base.tfm'\n");
if (req->base.complete != crypto_req_done)
pr_err("alg: aead: changed 'req->base.complete'\n");
if (req->base.flags != req_flags)
pr_err("alg: aead: changed 'req->base.flags'\n");
if (req->base.data != &wait)
pr_err("alg: aead: changed 'req->base.data'\n");
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return -EINVAL;
}
/* Check for unexpected success or failure, or wrong error code */
if ((err == 0 && vec->novrfy) ||
(err != vec->crypt_error && !(err == -EBADMSG && vec->novrfy))) {
char expected_error[32];
if (vec->novrfy &&
vec->crypt_error != 0 && vec->crypt_error != -EBADMSG)
sprintf(expected_error, "-EBADMSG or %d",
vec->crypt_error);
else if (vec->novrfy)
sprintf(expected_error, "-EBADMSG");
else
sprintf(expected_error, "%d", vec->crypt_error);
if (err) {
pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%s, actual_error=%d, cfg=\"%s\"\n",
driver, op, vec_name, expected_error, err,
cfg->name);
return err;
}
pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%s, cfg=\"%s\"\n",
driver, op, vec_name, expected_error, cfg->name);
return -EINVAL;
}
if (err) /* Expectedly failed. */
return 0;
/* Check for the correct output (ciphertext or plaintext) */
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
enc ? vec->clen : vec->plen,
vec->alen,
enc || cfg->inplace_mode == OUT_OF_PLACE);
if (err == -EOVERFLOW) {
pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
if (err) {
pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
return 0;
}
static int test_aead_vec(int enc, const struct aead_testvec *vec,
unsigned int vec_num, struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
char vec_name[16];
unsigned int i;
int err;
if (enc && vec->novrfy)
return 0;
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
err = test_aead_vec_cfg(enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
return err;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_aead_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
cond_resched();
}
}
#endif
return 0;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
struct aead_extra_tests_ctx {
struct rnd_state rng;
struct aead_request *req;
struct crypto_aead *tfm;
const struct alg_test_desc *test_desc;
struct cipher_test_sglists *tsgls;
unsigned int maxdatasize;
unsigned int maxkeysize;
struct aead_testvec vec;
char vec_name[64];
char cfgname[TESTVEC_CONFIG_NAMELEN];
struct testvec_config cfg;
};
/*
* Make at least one random change to a (ciphertext, AAD) pair. "Ciphertext"
* here means the full ciphertext including the authentication tag. The
* authentication tag (and hence also the ciphertext) is assumed to be nonempty.
*/
static void mutate_aead_message(struct rnd_state *rng,
struct aead_testvec *vec, bool aad_iv,
unsigned int ivsize)
{
const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen;
if (prandom_bool(rng) && vec->alen > aad_tail_size) {
/* Mutate the AAD */
flip_random_bit(rng, (u8 *)vec->assoc,
vec->alen - aad_tail_size);
if (prandom_bool(rng))
return;
}
if (prandom_bool(rng)) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */
flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
} else {
/* Mutate any part of the ciphertext */
flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
}
}
/*
* Minimum authentication tag size in bytes at which we assume that we can
* reliably generate inauthentic messages, i.e. not generate an authentic
* message by chance.
*/
#define MIN_COLLISION_FREE_AUTHSIZE 8
static void generate_aead_message(struct rnd_state *rng,
struct aead_request *req,
const struct aead_test_suite *suite,
struct aead_testvec *vec,
bool prefer_inauthentic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
(prefer_inauthentic ||
prandom_u32_below(rng, 4) == 0);
/* Generate the AAD. */
generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
if (suite->aad_iv && vec->alen >= ivsize)
/* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
if (inauthentic && prandom_bool(rng)) {
/* Generate a random ciphertext. */
generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
} else {
int i = 0;
struct scatterlist src[2], dst;
u8 iv[MAX_IVLEN];
DECLARE_CRYPTO_WAIT(wait);
/* Generate a random plaintext and encrypt it. */
sg_init_table(src, 2);
if (vec->alen)
sg_set_buf(&src[i++], vec->assoc, vec->alen);
if (vec->plen) {
generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
sg_set_buf(&src[i++], vec->ptext, vec->plen);
}
sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
memcpy(iv, vec->iv, ivsize);
aead_request_set_callback(req, 0, crypto_req_done, &wait);
aead_request_set_crypt(req, src, &dst, vec->plen, iv);
aead_request_set_ad(req, vec->alen);
vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req),
&wait);
/* If encryption failed, we're done. */
if (vec->crypt_error != 0)
return;
memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
if (!inauthentic)
return;
/*
* Mutate the authentic (ciphertext, AAD) pair to get an
* inauthentic one.
*/
mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
}
vec->novrfy = 1;
if (suite->einval_allowed)
vec->crypt_error = -EINVAL;
}
/*
* Generate an AEAD test vector 'vec' using the implementation specified by
* 'req'. The buffers in 'vec' must already be allocated.
*
* If 'prefer_inauthentic' is true, then this function will generate inauthentic
* test vectors (i.e. vectors with 'vec->novrfy=1') more often.
*/
static void generate_random_aead_testvec(struct rnd_state *rng,
struct aead_request *req,
struct aead_testvec *vec,
const struct aead_test_suite *suite,
unsigned int maxkeysize,
unsigned int maxdatasize,
char *name, size_t max_namelen,
bool prefer_inauthentic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int maxauthsize = crypto_aead_maxauthsize(tfm);
unsigned int authsize;
unsigned int total_len;
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
if (prandom_u32_below(rng, 4) == 0)
vec->klen = prandom_u32_below(rng, maxkeysize + 1);
generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
/* IV */
generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize;
if (prandom_u32_below(rng, 4) == 0)
authsize = prandom_u32_below(rng, maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize))
authsize = maxdatasize;
maxdatasize -= authsize;
vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
/* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(rng, maxdatasize);
if (prandom_u32_below(rng, 4) == 0)
vec->alen = 0;
else
vec->alen = generate_random_length(rng, total_len);
vec->plen = total_len - vec->alen;
vec->clen = vec->plen + authsize;
/*
* Generate the AAD, plaintext, and ciphertext. Not applicable if the
* key or the authentication tag size couldn't be set.
*/
vec->novrfy = 0;
vec->crypt_error = 0;
if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
snprintf(name, max_namelen,
"\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
}
static void try_to_generate_inauthentic_testvec(
struct aead_extra_tests_ctx *ctx)
{
int i;
for (i = 0; i < 10; i++) {
generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), true);
if (ctx->vec.novrfy)
return;
}
}
/*
* Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
* result of an encryption with the key) and verify that decryption fails.
*/
static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
{
unsigned int i;
int err;
for (i = 0; i < fuzz_iterations * 8; i++) {
/*
* Since this part of the tests isn't comparing the
* implementation to another, there's no point in testing any
* test vectors other than inauthentic ones (vec.novrfy=1) here.
*
* If we're having trouble generating such a test vector, e.g.
* if the algorithm keeps rejecting the generated keys, don't
* retry forever; just continue on.
*/
try_to_generate_inauthentic_testvec(ctx);
if (ctx->vec.novrfy) {
generate_random_testvec_config(&ctx->rng, &ctx->cfg,
ctx->cfgname,
sizeof(ctx->cfgname));
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
return err;
}
cond_resched();
}
return 0;
}
/*
* Test the AEAD algorithm against the corresponding generic implementation, if
* one is available.
*/
static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
{
struct crypto_aead *tfm = ctx->tfm;
const char *algname = crypto_aead_alg(tfm)->base.cra_name;
const char *driver = crypto_aead_driver_name(tfm);
const char *generic_driver = ctx->test_desc->generic_driver;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *generic_tfm = NULL;
struct aead_request *generic_req = NULL;
unsigned int i;
int err;
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
return err;
generic_driver = _generic_driver;
}
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
driver, generic_driver);
return 0;
}
pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
generic_driver, algname, err);
return err;
}
generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
if (!generic_req) {
err = -ENOMEM;
goto out;
}
/* Check the algorithm properties for consistency. */
if (crypto_aead_maxauthsize(tfm) !=
crypto_aead_maxauthsize(generic_tfm)) {
pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
driver, crypto_aead_maxauthsize(tfm),
crypto_aead_maxauthsize(generic_tfm));
err = -EINVAL;
goto out;
}
if (crypto_aead_ivsize(tfm) != crypto_aead_ivsize(generic_tfm)) {
pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
driver, crypto_aead_ivsize(tfm),
crypto_aead_ivsize(generic_tfm));
err = -EINVAL;
goto out;
}
if (crypto_aead_blocksize(tfm) != crypto_aead_blocksize(generic_tfm)) {
pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
driver, crypto_aead_blocksize(tfm),
crypto_aead_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
/*
* Now generate test vectors using the generic implementation, and test
* the other implementation against them.
*/
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
&ctx->test_desc->suite.aead,
ctx->maxkeysize, ctx->maxdatasize,
ctx->vec_name,
sizeof(ctx->vec_name), false);
generate_random_testvec_config(&ctx->rng, &ctx->cfg,
ctx->cfgname,
sizeof(ctx->cfgname));
if (!ctx->vec.novrfy) {
err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
goto out;
}
if (ctx->vec.crypt_error == 0 || ctx->vec.novrfy) {
err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
ctx->vec_name, &ctx->cfg,
ctx->req, ctx->tsgls);
if (err)
goto out;
}
cond_resched();
}
err = 0;
out:
crypto_free_aead(generic_tfm);
aead_request_free(generic_req);
return err;
}
static int test_aead_extra(const struct alg_test_desc *test_desc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
struct aead_extra_tests_ctx *ctx;
unsigned int i;
int err;
if (noextratests)
return 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_rnd_state(&ctx->rng);
ctx->req = req;
ctx->tfm = crypto_aead_reqtfm(req);
ctx->test_desc = test_desc;
ctx->tsgls = tsgls;
ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
ctx->maxkeysize = 0;
for (i = 0; i < test_desc->suite.aead.count; i++)
ctx->maxkeysize = max_t(unsigned int, ctx->maxkeysize,
test_desc->suite.aead.vecs[i].klen);
ctx->vec.key = kmalloc(ctx->maxkeysize, GFP_KERNEL);
ctx->vec.iv = kmalloc(crypto_aead_ivsize(ctx->tfm), GFP_KERNEL);
ctx->vec.assoc = kmalloc(ctx->maxdatasize, GFP_KERNEL);
ctx->vec.ptext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
ctx->vec.ctext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
if (!ctx->vec.key || !ctx->vec.iv || !ctx->vec.assoc ||
!ctx->vec.ptext || !ctx->vec.ctext) {
err = -ENOMEM;
goto out;
}
err = test_aead_vs_generic_impl(ctx);
if (err)
goto out;
err = test_aead_inauthentic_inputs(ctx);
out:
kfree(ctx->vec.key);
kfree(ctx->vec.iv);
kfree(ctx->vec.assoc);
kfree(ctx->vec.ptext);
kfree(ctx->vec.ctext);
kfree(ctx);
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead_extra(const struct alg_test_desc *test_desc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
return 0;
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_aead(int enc, const struct aead_test_suite *suite,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
unsigned int i;
int err;
for (i = 0; i < suite->count; i++) {
err = test_aead_vec(enc, &suite->vecs[i], i, req, tsgls);
if (err)
return err;
cond_resched();
}
return 0;
}
static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
const struct aead_test_suite *suite = &desc->suite.aead;
struct crypto_aead *tfm;
struct aead_request *req = NULL;
struct cipher_test_sglists *tsgls = NULL;
int err;
if (suite->count <= 0) {
pr_err("alg: aead: empty test suite for %s\n", driver);
return -EINVAL;
}
tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_aead_driver_name(tfm);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: failed to allocate request for %s\n",
driver);
err = -ENOMEM;
goto out;
}
tsgls = alloc_cipher_test_sglists();
if (!tsgls) {
pr_err("alg: aead: failed to allocate test buffers for %s\n",
driver);
err = -ENOMEM;
goto out;
}
err = test_aead(ENCRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_aead(DECRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_aead_extra(desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
crypto_free_aead(tfm);
return err;
}
static int test_cipher(struct crypto_cipher *tfm, int enc,
const struct cipher_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
unsigned int i, j, k;
char *q;
const char *e;
const char *input, *result;
void *data;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
j = 0;
for (i = 0; i < tcount; i++) {
if (fips_enabled && template[i].fips_skip)
continue;
input = enc ? template[i].ptext : template[i].ctext;
result = enc ? template[i].ctext : template[i].ptext;
j++;
ret = -EINVAL;
if (WARN_ON(template[i].len > PAGE_SIZE))
goto out;
data = xbuf[0];
memcpy(data, input, template[i].len);
crypto_cipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
if (ret) {
if (ret == template[i].setkey_error)
continue;
pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
algo, j, template[i].setkey_error, ret,
crypto_cipher_get_flags(tfm));
goto out;
}
if (template[i].setkey_error) {
pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
algo, j, template[i].setkey_error);
ret = -EINVAL;
goto out;
}
for (k = 0; k < template[i].len;
k += crypto_cipher_blocksize(tfm)) {
if (enc)
crypto_cipher_encrypt_one(tfm, data + k,
data + k);
else
crypto_cipher_decrypt_one(tfm, data + k,
data + k);
}
q = data;
if (memcmp(q, result, template[i].len)) {
printk(KERN_ERR "alg: cipher: Test %d failed "
"on %s for %s\n", j, e, algo);
hexdump(q, template[i].len);
ret = -EINVAL;
goto out;
}
}
ret = 0;
out:
testmgr_free_buf(xbuf);
out_nobuf:
return ret;
}
static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
const char *vec_name,
const struct testvec_config *cfg,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const unsigned int alignmask = crypto_skcipher_alignmask(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
const char *driver = crypto_skcipher_driver_name(tfm);
const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
const char *op = enc ? "encryption" : "decryption";
DECLARE_CRYPTO_WAIT(wait);
u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
struct kvec input;
int err;
/* Set the key */
if (vec->wk)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
else
crypto_skcipher_clear_flags(tfm,
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = do_setkey(crypto_skcipher_setkey, tfm, vec->key, vec->klen,
cfg, alignmask);
if (err) {
if (err == vec->setkey_error)
return 0;
pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
driver, vec_name, vec->setkey_error, err,
crypto_skcipher_get_flags(tfm));
return err;
}
if (vec->setkey_error) {
pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
driver, vec_name, vec->setkey_error);
return -EINVAL;
}
/* The IV must be copied to a buffer, as the algorithm may modify it */
if (ivsize) {
if (WARN_ON(ivsize > MAX_IVLEN))
return -EINVAL;
if (vec->generates_iv && !enc)
memcpy(iv, vec->iv_out, ivsize);
else if (vec->iv)
memcpy(iv, vec->iv, ivsize);
else
memset(iv, 0, ivsize);
} else {
if (vec->generates_iv) {
pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
driver, vec_name);
return -EINVAL;
}
iv = NULL;
}
/* Build the src/dst scatterlists */
input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
input.iov_len = vec->len;
err = build_cipher_test_sglists(tsgls, cfg, alignmask,
vec->len, vec->len, &input, 1);
if (err) {
pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
/* Do the actual encryption or decryption */
testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm));
skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
vec->len, iv);
if (cfg->nosimd)
crypto_disable_simd_for_test();
err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
if (cfg->nosimd)
crypto_reenable_simd_for_test();
err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != vec->len ||
req->iv != iv ||
req->src != tsgls->src.sgl_ptr ||
req->dst != tsgls->dst.sgl_ptr ||
crypto_skcipher_reqtfm(req) != tfm ||
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
if (req->cryptlen != vec->len)
pr_err("alg: skcipher: changed 'req->cryptlen'\n");
if (req->iv != iv)
pr_err("alg: skcipher: changed 'req->iv'\n");
if (req->src != tsgls->src.sgl_ptr)
pr_err("alg: skcipher: changed 'req->src'\n");
if (req->dst != tsgls->dst.sgl_ptr)
pr_err("alg: skcipher: changed 'req->dst'\n");
if (crypto_skcipher_reqtfm(req) != tfm)
pr_err("alg: skcipher: changed 'req->base.tfm'\n");
if (req->base.complete != crypto_req_done)
pr_err("alg: skcipher: changed 'req->base.complete'\n");
if (req->base.flags != req_flags)
pr_err("alg: skcipher: changed 'req->base.flags'\n");
if (req->base.data != &wait)
pr_err("alg: skcipher: changed 'req->base.data'\n");
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return -EINVAL;
}
/* Check for success or failure */
if (err) {
if (err == vec->crypt_error)
return 0;
pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
driver, op, vec_name, vec->crypt_error, err, cfg->name);
return err;
}
if (vec->crypt_error) {
pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
driver, op, vec_name, vec->crypt_error, cfg->name);
return -EINVAL;
}
/* Check for the correct output (ciphertext or plaintext) */
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
vec->len, 0, true);
if (err == -EOVERFLOW) {
pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
if (err) {
pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
return err;
}
/* If applicable, check that the algorithm generated the correct IV */
if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
driver, op, vec_name, cfg->name);
hexdump(iv, ivsize);
return -EINVAL;
}
return 0;
}
static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
unsigned int vec_num,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
char vec_name[16];
unsigned int i;
int err;
if (fips_enabled && vec->fips_skip)
return 0;
sprintf(vec_name, "%u", vec_num);
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
return err;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
if (!noextratests) {
struct rnd_state rng;
struct testvec_config cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
init_rnd_state(&rng);
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&rng, &cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
cond_resched();
}
}
#endif
return 0;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
/*
* Generate a symmetric cipher test vector from the given implementation.
* Assumes the buffers in 'vec' were already allocated.
*/
static void generate_random_cipher_testvec(struct rnd_state *rng,
struct skcipher_request *req,
struct cipher_testvec *vec,
unsigned int maxdatasize,
char *name, size_t max_namelen)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct scatterlist src, dst;
u8 iv[MAX_IVLEN];
DECLARE_CRYPTO_WAIT(wait);
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize;
if (prandom_u32_below(rng, 4) == 0)
vec->klen = prandom_u32_below(rng, maxkeysize + 1);
generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
/* IV */
generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
/* Plaintext */
vec->len = generate_random_length(rng, maxdatasize);
generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
/* If the key couldn't be set, no need to continue to encrypt. */
if (vec->setkey_error)
goto done;
/* Ciphertext */
sg_init_one(&src, vec->ptext, vec->len);
sg_init_one(&dst, vec->ctext, vec->len);
memcpy(iv, vec->iv, ivsize);
skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
if (vec->crypt_error != 0) {
/*
* The only acceptable error here is for an invalid length, so
* skcipher decryption should fail with the same error too.
* We'll test for this. But to keep the API usage well-defined,
* explicitly initialize the ciphertext buffer too.
*/
memset((u8 *)vec->ctext, 0, vec->len);
}
done:
snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
vec->len, vec->klen);
}
/*
* Test the skcipher algorithm represented by @req against the corresponding
* generic implementation, if one is available.
*/
static int test_skcipher_vs_generic_impl(const char *generic_driver,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
const char *driver = crypto_skcipher_driver_name(tfm);
struct rnd_state rng;
char _generic_driver[CRYPTO_MAX_ALG_NAME];
struct crypto_skcipher *generic_tfm = NULL;
struct skcipher_request *generic_req = NULL;
unsigned int i;
struct cipher_testvec vec = { 0 };
char vec_name[64];
struct testvec_config *cfg;
char cfgname[TESTVEC_CONFIG_NAMELEN];
int err;
if (noextratests)
return 0;
/* Keywrap isn't supported here yet as it handles its IV differently. */
if (strncmp(algname, "kw(", 3) == 0)
return 0;
init_rnd_state(&rng);
if (!generic_driver) { /* Use default naming convention? */
err = build_generic_driver_name(algname, _generic_driver);
if (err)
return err;
generic_driver = _generic_driver;
}
if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
return 0;
generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
if (IS_ERR(generic_tfm)) {
err = PTR_ERR(generic_tfm);
if (err == -ENOENT) {
pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
driver, generic_driver);
return 0;
}
pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
generic_driver, algname, err);
return err;
}
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
err = -ENOMEM;
goto out;
}
generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
if (!generic_req) {
err = -ENOMEM;
goto out;
}
/* Check the algorithm properties for consistency. */
if (crypto_skcipher_min_keysize(tfm) !=
crypto_skcipher_min_keysize(generic_tfm)) {
pr_err("alg: skcipher: min keysize for %s (%u) doesn't match generic impl (%u)\n",
driver, crypto_skcipher_min_keysize(tfm),
crypto_skcipher_min_keysize(generic_tfm));
err = -EINVAL;
goto out;
}
if (maxkeysize != crypto_skcipher_max_keysize(generic_tfm)) {
pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
driver, maxkeysize,
crypto_skcipher_max_keysize(generic_tfm));
err = -EINVAL;
goto out;
}
if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
err = -EINVAL;
goto out;
}
if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
driver, blocksize,
crypto_skcipher_blocksize(generic_tfm));
err = -EINVAL;
goto out;
}
/*
* Now generate test vectors using the generic implementation, and test
* the other implementation against them.
*/
vec.key = kmalloc(maxkeysize, GFP_KERNEL);
vec.iv = kmalloc(ivsize, GFP_KERNEL);
vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
err = -ENOMEM;
goto out;
}
for (i = 0; i < fuzz_iterations * 8; i++) {
generate_random_cipher_testvec(&rng, generic_req, &vec,
maxdatasize,
vec_name, sizeof(vec_name));
generate_random_testvec_config(&rng, cfg, cfgname,
sizeof(cfgname));
err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
cfg, req, tsgls);
if (err)
goto out;
err = test_skcipher_vec_cfg(DECRYPT, &vec, vec_name,
cfg, req, tsgls);
if (err)
goto out;
cond_resched();
}
err = 0;
out:
kfree(cfg);
kfree(vec.key);
kfree(vec.iv);
kfree(vec.ptext);
kfree(vec.ctext);
crypto_free_skcipher(generic_tfm);
skcipher_request_free(generic_req);
return err;
}
#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher_vs_generic_impl(const char *generic_driver,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
return 0;
}
#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
static int test_skcipher(int enc, const struct cipher_test_suite *suite,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
unsigned int i;
int err;
for (i = 0; i < suite->count; i++) {
err = test_skcipher_vec(enc, &suite->vecs[i], i, req, tsgls);
if (err)
return err;
cond_resched();
}
return 0;
}
static int alg_test_skcipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
const struct cipher_test_suite *suite = &desc->suite.cipher;
struct crypto_skcipher *tfm;
struct skcipher_request *req = NULL;
struct cipher_test_sglists *tsgls = NULL;
int err;
if (suite->count <= 0) {
pr_err("alg: skcipher: empty test suite for %s\n", driver);
return -EINVAL;
}
tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_skcipher_driver_name(tfm);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: skcipher: failed to allocate request for %s\n",
driver);
err = -ENOMEM;
goto out;
}
tsgls = alloc_cipher_test_sglists();
if (!tsgls) {
pr_err("alg: skcipher: failed to allocate test buffers for %s\n",
driver);
err = -ENOMEM;
goto out;
}
err = test_skcipher(ENCRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_skcipher(DECRYPT, suite, req, tsgls);
if (err)
goto out;
err = test_skcipher_vs_generic_impl(desc->generic_driver, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
skcipher_request_free(req);
crypto_free_skcipher(tfm);
return err;
}
static int test_comp(struct crypto_comp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
char *output, *decomp_output;
unsigned int i;
int ret;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
return -ENOMEM;
decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!decomp_output) {
kfree(output);
return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(output, 0, COMP_BUF_SIZE);
memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = ctemplate[i].inlen;
ret = crypto_comp_compress(tfm, ctemplate[i].input,
ilen, output, &dlen);
if (ret) {
printk(KERN_ERR "alg: comp: compression failed "
"on test %d for %s: ret=%d\n", i + 1, algo,
-ret);
goto out;
}
ilen = dlen;
dlen = COMP_BUF_SIZE;
ret = crypto_comp_decompress(tfm, output,
ilen, decomp_output, &dlen);
if (ret) {
pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n",
i + 1, algo, -ret);
goto out;
}
if (dlen != ctemplate[i].inlen) {
printk(KERN_ERR "alg: comp: Compression test %d "
"failed for %s: output len = %d\n", i + 1, algo,
dlen);
ret = -EINVAL;
goto out;
}
if (memcmp(decomp_output, ctemplate[i].input,
ctemplate[i].inlen)) {
pr_err("alg: comp: compression failed: output differs: on test %d for %s\n",
i + 1, algo);
hexdump(decomp_output, dlen);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < dtcount; i++) {
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = dtemplate[i].inlen;
ret = crypto_comp_decompress(tfm, dtemplate[i].input,
ilen, decomp_output, &dlen);
if (ret) {
printk(KERN_ERR "alg: comp: decompression failed "
"on test %d for %s: ret=%d\n", i + 1, algo,
-ret);
goto out;
}
if (dlen != dtemplate[i].outlen) {
printk(KERN_ERR "alg: comp: Decompression test %d "
"failed for %s: output len = %d\n", i + 1, algo,
dlen);
ret = -EINVAL;
goto out;
}
if (memcmp(decomp_output, dtemplate[i].output, dlen)) {
printk(KERN_ERR "alg: comp: Decompression test %d "
"failed for %s\n", i + 1, algo);
hexdump(decomp_output, dlen);
ret = -EINVAL;
goto out;
}
}
ret = 0;
out:
kfree(decomp_output);
kfree(output);
return ret;
}
static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate,
int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
unsigned int i;
char *output, *decomp_out;
int ret;
struct scatterlist src, dst;
struct acomp_req *req;
struct crypto_wait wait;
output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!output)
return -ENOMEM;
decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
if (!decomp_out) {
kfree(output);
return -ENOMEM;
}
for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen;
void *input_vec;
input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memset(output, 0, dlen);
crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n",
algo);
kfree(input_vec);
ret = -ENOMEM;
goto out;
}
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
ilen = req->dlen;
dlen = COMP_BUF_SIZE;
sg_init_one(&src, output, ilen);
sg_init_one(&dst, decomp_out, dlen);
crypto_init_wait(&wait);
acomp_request_set_params(req, &src, &dst, ilen, dlen);
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
if (req->dlen != ctemplate[i].inlen) {
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
goto out;
}
if (memcmp(input_vec, decomp_out, req->dlen)) {
pr_err("alg: acomp: Compression test %d failed for %s\n",
i + 1, algo);
hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec);
acomp_request_free(req);
}
for (i = 0; i < dtcount; i++) {
unsigned int dlen = COMP_BUF_SIZE;
int ilen = dtemplate[i].inlen;
void *input_vec;
input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memset(output, 0, dlen);
crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm);
if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n",
algo);
kfree(input_vec);
ret = -ENOMEM;
goto out;
}
acomp_request_set_params(req, &src, &dst, ilen, dlen);
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
if (req->dlen != dtemplate[i].outlen) {
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
goto out;
}
if (memcmp(output, dtemplate[i].output, req->dlen)) {
pr_err("alg: acomp: Decompression test %d failed for %s\n",
i + 1, algo);
hexdump(output, req->dlen);
ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec);
acomp_request_free(req);
}
ret = 0;
out:
kfree(decomp_out);
kfree(output);
return ret;
}
static int test_cprng(struct crypto_rng *tfm,
const struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
int err = 0, i, j, seedsize;
u8 *seed;
char result[32];
seedsize = crypto_rng_seedsize(tfm);
seed = kmalloc(seedsize, GFP_KERNEL);
if (!seed) {
printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
"for %s\n", algo);
return -ENOMEM;
}
for (i = 0; i < tcount; i++) {
memset(result, 0, 32);
memcpy(seed, template[i].v, template[i].vlen);
memcpy(seed + template[i].vlen, template[i].key,
template[i].klen);
memcpy(seed + template[i].vlen + template[i].klen,
template[i].dt, template[i].dtlen);
err = crypto_rng_reset(tfm, seed, seedsize);
if (err) {
printk(KERN_ERR "alg: cprng: Failed to reset rng "
"for %s\n", algo);
goto out;
}
for (j = 0; j < template[i].loops; j++) {
err = crypto_rng_get_bytes(tfm, result,
template[i].rlen);
if (err < 0) {
printk(KERN_ERR "alg: cprng: Failed to obtain "
"the correct amount of random data for "
"%s (requested %d)\n", algo,
template[i].rlen);
goto out;
}
}
err = memcmp(result, template[i].result,
template[i].rlen);
if (err) {
printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
i, algo);
hexdump(result, template[i].rlen);
err = -EINVAL;
goto out;
}
}
out:
kfree(seed);
return err;
}
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
const struct cipher_test_suite *suite = &desc->suite.cipher;
struct crypto_cipher *tfm;
int err;
tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
err = test_cipher(tfm, ENCRYPT, suite->vecs, suite->count);
if (!err)
err = test_cipher(tfm, DECRYPT, suite->vecs, suite->count);
crypto_free_cipher(tfm);
return err;
}
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_comp *comp;
struct crypto_acomp *acomp;
int err;
u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
acomp = crypto_alloc_acomp(driver, type, mask);
if (IS_ERR(acomp)) {
pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(acomp));
return PTR_ERR(acomp);
}
err = test_acomp(acomp, desc->suite.comp.comp.vecs,
desc->suite.comp.decomp.vecs,
desc->suite.comp.comp.count,
desc->suite.comp.decomp.count);
crypto_free_acomp(acomp);
} else {
comp = crypto_alloc_comp(driver, type, mask);
if (IS_ERR(comp)) {
pr_err("alg: comp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(comp));
return PTR_ERR(comp);
}
err = test_comp(comp, desc->suite.comp.comp.vecs,
desc->suite.comp.decomp.vecs,
desc->suite.comp.comp.count,
desc->suite.comp.decomp.count);
crypto_free_comp(comp);
}
return err;
}
static int alg_test_crc32c(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_shash *tfm;
__le32 val;
int err;
err = alg_test_hash(desc, driver, type, mask);
if (err)
return err;
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
/*
* This crc32c implementation is only available through
* ahash API, not the shash API, so the remaining part
* of the test is not applicable to it.
*/
return 0;
}
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
driver = crypto_shash_driver_name(tfm);
do {
SHASH_DESC_ON_STACK(shash, tfm);
u32 *ctx = (u32 *)shash_desc_ctx(shash);
shash->tfm = tfm;
*ctx = 420553207;
err = crypto_shash_final(shash, (u8 *)&val);
if (err) {
printk(KERN_ERR "alg: crc32c: Operation failed for "
"%s: %d\n", driver, err);
break;
}
if (val != cpu_to_le32(~420553207)) {
pr_err("alg: crc32c: Test failed for %s: %u\n",
driver, le32_to_cpu(val));
err = -EINVAL;
}
} while (0);
crypto_free_shash(tfm);
return err;
}
static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_rng *rng;
int err;
rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
return PTR_ERR(rng);
}
err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
crypto_free_rng(rng);
return err;
}
static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
const char *driver, u32 type, u32 mask)
{
int ret = -EAGAIN;
struct crypto_rng *drng;
struct drbg_test_data test_data;
struct drbg_string addtl, pers, testentropy;
unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
kfree_sensitive(buf);
return -ENOMEM;
}
test_data.testentropy = &testentropy;
drbg_string_fill(&testentropy, test->entropy, test->entropylen);
drbg_string_fill(&pers, test->pers, test->perslen);
ret = crypto_drbg_reset_test(drng, &pers, &test_data);
if (ret) {
printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
goto outbuf;
}
drbg_string_fill(&addtl, test->addtla, test->addtllen);
if (pr) {
drbg_string_fill(&testentropy, test->entpra, test->entprlen);
ret = crypto_drbg_get_bytes_addtl_test(drng,
buf, test->expectedlen, &addtl, &test_data);
} else {
ret = crypto_drbg_get_bytes_addtl(drng,
buf, test->expectedlen, &addtl);
}
if (ret < 0) {
printk(KERN_ERR "alg: drbg: could not obtain random data for "
"driver %s\n", driver);
goto outbuf;
}
drbg_string_fill(&addtl, test->addtlb, test->addtllen);
if (pr) {
drbg_string_fill(&testentropy, test->entprb, test->entprlen);
ret = crypto_drbg_get_bytes_addtl_test(drng,
buf, test->expectedlen, &addtl, &test_data);
} else {
ret = crypto_drbg_get_bytes_addtl(drng,
buf, test->expectedlen, &addtl);
}
if (ret < 0) {
printk(KERN_ERR "alg: drbg: could not obtain random data for "
"driver %s\n", driver);
goto outbuf;
}
ret = memcmp(test->expected, buf, test->expectedlen);
outbuf:
crypto_free_rng(drng);
kfree_sensitive(buf);
return ret;
}
static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
int err = 0;
int pr = 0;
int i = 0;
const struct drbg_testvec *template = desc->suite.drbg.vecs;
unsigned int tcount = desc->suite.drbg.count;
if (0 == memcmp(driver, "drbg_pr_", 8))
pr = 1;
for (i = 0; i < tcount; i++) {
err = drbg_cavs_test(&template[i], pr, driver, type, mask);
if (err) {
printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
i, driver);
err = -EINVAL;
break;
}
}
return err;
}
static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
const char *alg)
{
struct kpp_request *req;
void *input_buf = NULL;
void *output_buf = NULL;
void *a_public = NULL;
void *a_ss = NULL;
void *shared_secret = NULL;
struct crypto_wait wait;
unsigned int out_len_max;
int err = -ENOMEM;
struct scatterlist src, dst;
req = kpp_request_alloc(tfm, GFP_KERNEL);
if (!req)
return err;
crypto_init_wait(&wait);
err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
if (err < 0)
goto free_req;
out_len_max = crypto_kpp_maxsize(tfm);
output_buf = kzalloc(out_len_max, GFP_KERNEL);
if (!output_buf) {
err = -ENOMEM;
goto free_req;
}
/* Use appropriate parameter as base */
kpp_request_set_input(req, NULL, 0);
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
/* Compute party A's public key */
err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
if (err) {
pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
alg, err);
goto free_output;
}
if (vec->genkey) {
/* Save party A's public key */
a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL);
if (!a_public) {
err = -ENOMEM;
goto free_output;
}
} else {
/* Verify calculated public key */
if (memcmp(vec->expected_a_public, sg_virt(req->dst),
vec->expected_a_public_size)) {
pr_err("alg: %s: Party A: generate public key test failed. Invalid output\n",
alg);
err = -EINVAL;
goto free_output;
}
}
/* Calculate shared secret key by using counter part (b) public key. */
input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL);
if (!input_buf) {
err = -ENOMEM;
goto free_output;
}
sg_init_one(&src, input_buf, vec->b_public_size);
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_input(req, &src, vec->b_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
if (err) {
pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
alg, err);
goto free_all;
}
if (vec->genkey) {
/* Save the shared secret obtained by party A */
a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL);
if (!a_ss) {
err = -ENOMEM;
goto free_all;
}
/*
* Calculate party B's shared secret by using party A's
* public key.
*/
err = crypto_kpp_set_secret(tfm, vec->b_secret,
vec->b_secret_size);
if (err < 0)
goto free_all;
sg_init_one(&src, a_public, vec->expected_a_public_size);
sg_init_one(&dst, output_buf, out_len_max);
kpp_request_set_input(req, &src, vec->expected_a_public_size);
kpp_request_set_output(req, &dst, out_len_max);
kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
&wait);
if (err) {
pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
alg, err);
goto free_all;
}
shared_secret = a_ss;
} else {
shared_secret = (void *)vec->expected_ss;
}
/*
* verify shared secret from which the user will derive
* secret key by executing whatever hash it has chosen
*/
if (memcmp(shared_secret, sg_virt(req->dst),
vec->expected_ss_size)) {
pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
alg);
err = -EINVAL;
}
free_all:
kfree(a_ss);
kfree(input_buf);
free_output:
kfree(a_public);
kfree(output_buf);
free_req:
kpp_request_free(req);
return err;
}
static int test_kpp(struct crypto_kpp *tfm, const char *alg,
const struct kpp_testvec *vecs, unsigned int tcount)
{
int ret, i;
for (i = 0; i < tcount; i++) {
ret = do_test_kpp(tfm, vecs++, alg);
if (ret) {
pr_err("alg: %s: test failed on vector %d, err=%d\n",
alg, i + 1, ret);
return ret;
}
}
return 0;
}
static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_kpp *tfm;
int err = 0;
tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.kpp.vecs)
err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
desc->suite.kpp.count);
crypto_free_kpp(tfm);
return err;
}
static u8 *test_pack_u32(u8 *dst, u32 val)
{
memcpy(dst, &val, sizeof(val));
return dst + sizeof(val);
}
static int test_akcipher_one(struct crypto_akcipher *tfm,
const struct akcipher_testvec *vecs)
{
char *xbuf[XBUFSIZE];
struct akcipher_request *req;
void *outbuf_enc = NULL;
void *outbuf_dec = NULL;
struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
struct scatterlist src, dst, src_tab[3];
const char *m, *c;
unsigned int m_size, c_size;
const char *op;
u8 *key, *ptr;
if (testmgr_alloc_buf(xbuf))
return err;
req = akcipher_request_alloc(tfm, GFP_KERNEL);
if (!req)
goto free_xbuf;
crypto_init_wait(&wait);
key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
GFP_KERNEL);
if (!key)
goto free_req;
memcpy(key, vecs->key, vecs->key_len);
ptr = key + vecs->key_len;
ptr = test_pack_u32(ptr, vecs->algo);
ptr = test_pack_u32(ptr, vecs->param_len);
memcpy(ptr, vecs->params, vecs->param_len);
if (vecs->public_key_vec)
err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
else
err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
if (err)
goto free_key;
/*
* First run test which do not require a private key, such as
* encrypt or verify.
*/
err = -ENOMEM;
out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
goto free_key;
if (!vecs->siggen_sigver_test) {
m = vecs->m;
m_size = vecs->m_size;
c = vecs->c;
c_size = vecs->c_size;
op = "encrypt";
} else {
/* Swap args so we could keep plaintext (digest)
* in vecs->m, and cooked signature in vecs->c.
*/
m = vecs->c; /* signature */
m_size = vecs->c_size;
c = vecs->m; /* digest */
c_size = vecs->m_size;
op = "verify";
}
err = -E2BIG;
if (WARN_ON(m_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], m, m_size);
sg_init_table(src_tab, 3);
sg_set_buf(&src_tab[0], xbuf[0], 8);
sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
if (vecs->siggen_sigver_test) {
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[1], c, c_size);
sg_set_buf(&src_tab[2], xbuf[1], c_size);
akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
} else {
sg_init_one(&dst, outbuf_enc, out_len_max);
akcipher_request_set_crypt(req, src_tab, &dst, m_size,
out_len_max);
}
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
err = crypto_wait_req(vecs->siggen_sigver_test ?
/* Run asymmetric signature verification */
crypto_akcipher_verify(req) :
/* Run asymmetric encrypt */
crypto_akcipher_encrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
if (!vecs->siggen_sigver_test && c) {
if (req->dst_len != c_size) {
pr_err("alg: akcipher: %s test failed. Invalid output len\n",
op);
err = -EINVAL;
goto free_all;
}
/* verify that encrypted message is equal to expected */
if (memcmp(c, outbuf_enc, c_size) != 0) {
pr_err("alg: akcipher: %s test failed. Invalid output\n",
op);
hexdump(outbuf_enc, c_size);
err = -EINVAL;
goto free_all;
}
}
/*
* Don't invoke (decrypt or sign) test which require a private key
* for vectors with only a public key.
*/
if (vecs->public_key_vec) {
err = 0;
goto free_all;
}
outbuf_dec = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_dec) {
err = -ENOMEM;
goto free_all;
}
if (!vecs->siggen_sigver_test && !c) {
c = outbuf_enc;
c_size = req->dst_len;
}
err = -E2BIG;
op = vecs->siggen_sigver_test ? "sign" : "decrypt";
if (WARN_ON(c_size > PAGE_SIZE))
goto free_all;
memcpy(xbuf[0], c, c_size);
sg_init_one(&src, xbuf[0], c_size);
sg_init_one(&dst, outbuf_dec, out_len_max);
crypto_init_wait(&wait);
akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
err = crypto_wait_req(vecs->siggen_sigver_test ?
/* Run asymmetric signature generation */
crypto_akcipher_sign(req) :
/* Run asymmetric decrypt */
crypto_akcipher_decrypt(req), &wait);
if (err) {
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
out_len = req->dst_len;
if (out_len < m_size) {
pr_err("alg: akcipher: %s test failed. Invalid output len %u\n",
op, out_len);
err = -EINVAL;
goto free_all;
}
/* verify that decrypted message is equal to the original msg */
if (memchr_inv(outbuf_dec, 0, out_len - m_size) ||
memcmp(m, outbuf_dec + out_len - m_size, m_size)) {
pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
hexdump(outbuf_dec, out_len);
err = -EINVAL;
}
free_all:
kfree(outbuf_dec);
kfree(outbuf_enc);
free_key:
kfree(key);
free_req:
akcipher_request_free(req);
free_xbuf:
testmgr_free_buf(xbuf);
return err;
}
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
const struct akcipher_testvec *vecs,
unsigned int tcount)
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
int ret, i;
for (i = 0; i < tcount; i++) {
ret = test_akcipher_one(tfm, vecs++);
if (!ret)
continue;
pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
i + 1, algo, ret);
return ret;
}
return 0;
}
static int alg_test_akcipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_akcipher *tfm;
int err = 0;
tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.akcipher.vecs)
err = test_akcipher(tfm, desc->alg, desc->suite.akcipher.vecs,
desc->suite.akcipher.count);
crypto_free_akcipher(tfm);
return err;
}
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
return 0;
}
#define ____VECS(tv) .vecs = tv, .count = ARRAY_SIZE(tv)
#define __VECS(tv) { ____VECS(tv) }
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
.generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
.generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
},
}, {
.alg = "aegis128",
.test = alg_test_aead,
.suite = {
.aead = __VECS(aegis128_tv_template)
}
}, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.suite = {
.cprng = __VECS(ansi_cprng_aes_tv_template)
}
}, {
.alg = "authenc(hmac(md5),ecb(cipher_null))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
}
}, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha1_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(des))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),cbc(des3_ede))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),ctr(aes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha1),ecb(cipher_null))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp)
}
}, {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha224),cbc(des))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha224_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha224),cbc(des3_ede))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha256_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(des))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha256_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),cbc(des3_ede))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha256),ctr(aes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha384),cbc(des))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha384_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),cbc(des3_ede))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha384),ctr(aes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.fips_allowed = 1,
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha512_aes_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(des))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha512_des_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),cbc(des3_ede))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp)
}
}, {
.alg = "authenc(hmac(sha512),ctr(aes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "blake2b-160",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
.hash = __VECS(blake2b_160_tv_template)
}
}, {
.alg = "blake2b-256",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
.hash = __VECS(blake2b_256_tv_template)
}
}, {
.alg = "blake2b-384",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
.hash = __VECS(blake2b_384_tv_template)
}
}, {
.alg = "blake2b-512",
.test = alg_test_hash,
.fips_allowed = 0,
.suite = {
.hash = __VECS(blake2b_512_tv_template)
}
}, {
.alg = "cbc(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_cbc_tv_template)
},
}, {
.alg = "cbc(anubis)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(anubis_cbc_tv_template)
},
}, {
.alg = "cbc(aria)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aria_cbc_tv_template)
},
}, {
.alg = "cbc(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(bf_cbc_tv_template)
},
}, {
.alg = "cbc(camellia)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_cbc_tv_template)
},
}, {
.alg = "cbc(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast5_cbc_tv_template)
},
}, {
.alg = "cbc(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_cbc_tv_template)
},
}, {
.alg = "cbc(des)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des_cbc_tv_template)
},
}, {
.alg = "cbc(des3_ede)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des3_ede_cbc_tv_template)
},
}, {
/* Same as cbc(aes) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "cbc(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
/* Same as cbc(sm4) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "cbc(psm4)",
.test = alg_test_null,
}, {
.alg = "cbc(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_cbc_tv_template)
},
}, {
.alg = "cbc(sm4)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_cbc_tv_template)
}
}, {
.alg = "cbc(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_cbc_tv_template)
},
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "cbc-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_cbc_tv_template)
}
}, {
#endif
.alg = "cbcmac(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cbcmac_tv_template)
}
}, {
.alg = "cbcmac(sm4)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(sm4_cbcmac_tv_template)
}
}, {
.alg = "ccm(aes)",
.generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
____VECS(aes_ccm_tv_template),
.einval_allowed = 1,
}
}
}, {
.alg = "ccm(sm4)",
.generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))",
.test = alg_test_aead,
.suite = {
.aead = {
____VECS(sm4_ccm_tv_template),
.einval_allowed = 1,
}
}
}, {
.alg = "cfb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_cfb_tv_template)
},
}, {
.alg = "cfb(aria)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aria_cfb_tv_template)
},
}, {
.alg = "cfb(sm4)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_cfb_tv_template)
}
}, {
.alg = "chacha20",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(chacha20_tv_template)
},
}, {
.alg = "cmac(aes)",
.fips_allowed = 1,
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_cmac128_tv_template)
}
}, {
.alg = "cmac(camellia)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(camellia_cmac128_tv_template)
}
}, {
.alg = "cmac(des3_ede)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(des3_ede_cmac64_tv_template)
}
}, {
.alg = "cmac(sm4)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(sm4_cmac128_tv_template)
}
}, {
.alg = "compress_null",
.test = alg_test_null,
}, {
.alg = "crc32",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(crc32_tv_template)
}
}, {
.alg = "crc32c",
.test = alg_test_crc32c,
.fips_allowed = 1,
.suite = {
.hash = __VECS(crc32c_tv_template)
}
}, {
.alg = "crc64-rocksoft",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(crc64_rocksoft_tv_template)
}
}, {
.alg = "crct10dif",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(crct10dif_tv_template)
}
}, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_ctr_tv_template)
}
}, {
.alg = "ctr(aria)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aria_ctr_tv_template)
}
}, {
.alg = "ctr(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(bf_ctr_tv_template)
}
}, {
.alg = "ctr(camellia)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_ctr_tv_template)
}
}, {
.alg = "ctr(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast5_ctr_tv_template)
}
}, {
.alg = "ctr(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_ctr_tv_template)
}
}, {
.alg = "ctr(des)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des_ctr_tv_template)
}
}, {
.alg = "ctr(des3_ede)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des3_ede_ctr_tv_template)
}
}, {
/* Same as ctr(aes) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "ctr(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
/* Same as ctr(sm4) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "ctr(psm4)",
.test = alg_test_null,
}, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_ctr_tv_template)
}
}, {
.alg = "ctr(sm4)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_ctr_tv_template)
}
}, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_ctr_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "ctr-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_ctr_tv_template)
}
}, {
#endif
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(cts_mode_tv_template)
}
}, {
/* Same as cts(cbc((aes)) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "cts(cbc(paes))",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "cts(cbc(sm4))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_cts_tv_template)
}
}, {
.alg = "curve25519",
.test = alg_test_kpp,
.suite = {
.kpp = __VECS(curve25519_tv_template)
}
}, {
.alg = "deflate",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(deflate_comp_tv_template),
.decomp = __VECS(deflate_decomp_tv_template)
}
}
}, {
.alg = "dh",
.test = alg_test_kpp,
.suite = {
.kpp = __VECS(dh_tv_template)
}
}, {
.alg = "digest_null",
.test = alg_test_null,
}, {
.alg = "drbg_nopr_ctr_aes128",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
}
}, {
.alg = "drbg_nopr_ctr_aes192",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
}
}, {
.alg = "drbg_nopr_ctr_aes256",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
}
}, {
/*
* There is no need to specifically test the DRBG with every
* backend cipher -- covered by drbg_nopr_hmac_sha256 test
*/
.alg = "drbg_nopr_hmac_sha1",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_nopr_hmac_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
}
}, {
/* covered by drbg_nopr_hmac_sha256 test */
.alg = "drbg_nopr_hmac_sha384",
.test = alg_test_null,
}, {
.alg = "drbg_nopr_hmac_sha512",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_hmac_sha512_tv_template)
}
}, {
.alg = "drbg_nopr_sha1",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_nopr_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_nopr_sha256_tv_template)
}
}, {
/* covered by drbg_nopr_sha256 test */
.alg = "drbg_nopr_sha384",
.test = alg_test_null,
}, {
.alg = "drbg_nopr_sha512",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_ctr_aes128",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
}
}, {
/* covered by drbg_pr_ctr_aes128 test */
.alg = "drbg_pr_ctr_aes192",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_ctr_aes256",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_hmac_sha1",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_hmac_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
}
}, {
/* covered by drbg_pr_hmac_sha256 test */
.alg = "drbg_pr_hmac_sha384",
.test = alg_test_null,
}, {
.alg = "drbg_pr_hmac_sha512",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "drbg_pr_sha1",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "drbg_pr_sha256",
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
.drbg = __VECS(drbg_pr_sha256_tv_template)
}
}, {
/* covered by drbg_pr_sha256 test */
.alg = "drbg_pr_sha384",
.test = alg_test_null,
}, {
.alg = "drbg_pr_sha512",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_tv_template)
}
}, {
.alg = "ecb(anubis)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(anubis_tv_template)
}
}, {
.alg = "ecb(arc4)",
.generic_driver = "ecb(arc4)-generic",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(arc4_tv_template)
}
}, {
.alg = "ecb(aria)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aria_tv_template)
}
}, {
.alg = "ecb(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(bf_tv_template)
}
}, {
.alg = "ecb(camellia)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_tv_template)
}
}, {
.alg = "ecb(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast5_tv_template)
}
}, {
.alg = "ecb(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_tv_template)
}
}, {
.alg = "ecb(cipher_null)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "ecb(des)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des_tv_template)
}
}, {
.alg = "ecb(des3_ede)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(des3_ede_tv_template)
}
}, {
.alg = "ecb(fcrypt)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.vecs = fcrypt_pcbc_tv_template,
.count = 1
}
}
}, {
.alg = "ecb(khazad)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(khazad_tv_template)
}
}, {
/* Same as ecb(aes) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "ecb(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "ecb(seed)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(seed_tv_template)
}
}, {
.alg = "ecb(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_tv_template)
}
}, {
.alg = "ecb(sm4)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_tv_template)
}
}, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tea_tv_template)
}
}, {
.alg = "ecb(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_tv_template)
}
}, {
.alg = "ecb(xeta)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xeta_tv_template)
}
}, {
.alg = "ecb(xtea)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xtea_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "ecb-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_tv_template)
}
}, {
#endif
.alg = "ecdh-nist-p192",
.test = alg_test_kpp,
.suite = {
.kpp = __VECS(ecdh_p192_tv_template)
}
}, {
.alg = "ecdh-nist-p256",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ecdh_p256_tv_template)
}
}, {
.alg = "ecdh-nist-p384",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ecdh_p384_tv_template)
}
}, {
.alg = "ecdsa-nist-p192",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(ecdsa_nist_p192_tv_template)
}
}, {
.alg = "ecdsa-nist-p256",
.test = alg_test_akcipher,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p384",
.test = alg_test_akcipher,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(ecdsa_nist_p384_tv_template)
}
}, {
.alg = "ecrdsa",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(ecrdsa_tv_template)
}
}, {
.alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(essiv_hmac_sha256_aes_cbc_tv_temp)
}
}, {
.alg = "essiv(cbc(aes),sha256)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(essiv_aes_cbc_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_DH_RFC7919_GROUPS)
.alg = "ffdhe2048(dh)",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ffdhe2048_dh_tv_template)
}
}, {
.alg = "ffdhe3072(dh)",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ffdhe3072_dh_tv_template)
}
}, {
.alg = "ffdhe4096(dh)",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ffdhe4096_dh_tv_template)
}
}, {
.alg = "ffdhe6144(dh)",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ffdhe6144_dh_tv_template)
}
}, {
.alg = "ffdhe8192(dh)",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ffdhe8192_dh_tv_template)
}
}, {
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
.alg = "gcm(aes)",
.generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = __VECS(aes_gcm_tv_template)
}
}, {
.alg = "gcm(aria)",
.generic_driver = "gcm_base(ctr(aria-generic),ghash-generic)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(aria_gcm_tv_template)
}
}, {
.alg = "gcm(sm4)",
.generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(sm4_gcm_tv_template)
}
}, {
.alg = "ghash",
.test = alg_test_hash,
.suite = {
.hash = __VECS(ghash_tv_template)
}
}, {
.alg = "hctr2(aes)",
.generic_driver =
"hctr2_base(xctr(aes-generic),polyval-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_hctr2_tv_template)
}
}, {
.alg = "hmac(md5)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_md5_tv_template)
}
}, {
.alg = "hmac(rmd160)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_rmd160_tv_template)
}
}, {
.alg = "hmac(sha1)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha1_tv_template)
}
}, {
.alg = "hmac(sha224)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha224_tv_template)
}
}, {
.alg = "hmac(sha256)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha256_tv_template)
}
}, {
.alg = "hmac(sha3-224)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha3_224_tv_template)
}
}, {
.alg = "hmac(sha3-256)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha3_256_tv_template)
}
}, {
.alg = "hmac(sha3-384)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha3_384_tv_template)
}
}, {
.alg = "hmac(sha3-512)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha3_512_tv_template)
}
}, {
.alg = "hmac(sha384)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha384_tv_template)
}
}, {
.alg = "hmac(sha512)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(hmac_sha512_tv_template)
}
}, {
.alg = "hmac(sm3)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_sm3_tv_template)
}
}, {
.alg = "hmac(streebog256)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_streebog256_tv_template)
}
}, {
.alg = "hmac(streebog512)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(hmac_streebog512_tv_template)
}
}, {
.alg = "jitterentropy_rng",
.fips_allowed = 1,
.test = alg_test_null,
}, {
.alg = "kw(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_kw_tv_template)
}
}, {
.alg = "lrw(aes)",
.generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_lrw_tv_template)
}
}, {
.alg = "lrw(camellia)",
.generic_driver = "lrw(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_lrw_tv_template)
}
}, {
.alg = "lrw(cast6)",
.generic_driver = "lrw(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_lrw_tv_template)
}
}, {
.alg = "lrw(serpent)",
.generic_driver = "lrw(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_lrw_tv_template)
}
}, {
.alg = "lrw(twofish)",
.generic_driver = "lrw(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_lrw_tv_template)
}
}, {
.alg = "lz4",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(lz4_comp_tv_template),
.decomp = __VECS(lz4_decomp_tv_template)
}
}
}, {
.alg = "lz4hc",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(lz4hc_comp_tv_template),
.decomp = __VECS(lz4hc_decomp_tv_template)
}
}
}, {
.alg = "lzo",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(lzo_comp_tv_template),
.decomp = __VECS(lzo_decomp_tv_template)
}
}
}, {
.alg = "lzo-rle",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(lzorle_comp_tv_template),
.decomp = __VECS(lzorle_decomp_tv_template)
}
}
}, {
.alg = "md4",
.test = alg_test_hash,
.suite = {
.hash = __VECS(md4_tv_template)
}
}, {
.alg = "md5",
.test = alg_test_hash,
.suite = {
.hash = __VECS(md5_tv_template)
}
}, {
.alg = "michael_mic",
.test = alg_test_hash,
.suite = {
.hash = __VECS(michael_mic_tv_template)
}
}, {
.alg = "nhpoly1305",
.test = alg_test_hash,
.suite = {
.hash = __VECS(nhpoly1305_tv_template)
}
}, {
.alg = "ofb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_ofb_tv_template)
}
}, {
/* Same as ofb(aes) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "ofb(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "ofb(sm4)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_ofb_tv_template)
}
}, {
.alg = "pcbc(fcrypt)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(fcrypt_pcbc_tv_template)
}
}, {
.alg = "pkcs1pad(rsa,sha224)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha256)",
.test = alg_test_akcipher,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(pkcs1pad_rsa_tv_template)
}
}, {
.alg = "pkcs1pad(rsa,sha384)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "pkcs1pad(rsa,sha512)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "poly1305",
.test = alg_test_hash,
.suite = {
.hash = __VECS(poly1305_tv_template)
}
}, {
.alg = "polyval",
.test = alg_test_hash,
.suite = {
.hash = __VECS(polyval_tv_template)
}
}, {
.alg = "rfc3686(ctr(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_ctr_rfc3686_tv_template)
}
}, {
.alg = "rfc3686(ctr(sm4))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_ctr_rfc3686_tv_template)
}
}, {
.alg = "rfc4106(gcm(aes))",
.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
____VECS(aes_gcm_rfc4106_tv_template),
.einval_allowed = 1,
.aad_iv = 1,
}
}
}, {
.alg = "rfc4309(ccm(aes))",
.generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
____VECS(aes_ccm_rfc4309_tv_template),
.einval_allowed = 1,
.aad_iv = 1,
}
}
}, {
.alg = "rfc4543(gcm(aes))",
.generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.suite = {
.aead = {
____VECS(aes_gcm_rfc4543_tv_template),
.einval_allowed = 1,
.aad_iv = 1,
}
}
}, {
.alg = "rfc7539(chacha20,poly1305)",
.test = alg_test_aead,
.suite = {
.aead = __VECS(rfc7539_tv_template)
}
}, {
.alg = "rfc7539esp(chacha20,poly1305)",
.test = alg_test_aead,
.suite = {
.aead = {
____VECS(rfc7539esp_tv_template),
.einval_allowed = 1,
.aad_iv = 1,
}
}
}, {
.alg = "rmd160",
.test = alg_test_hash,
.suite = {
.hash = __VECS(rmd160_tv_template)
}
}, {
.alg = "rsa",
.test = alg_test_akcipher,
.fips_allowed = 1,
.suite = {
.akcipher = __VECS(rsa_tv_template)
}
}, {
.alg = "sha1",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha1_tv_template)
}
}, {
.alg = "sha224",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha224_tv_template)
}
}, {
.alg = "sha256",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha256_tv_template)
}
}, {
.alg = "sha3-224",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha3_224_tv_template)
}
}, {
.alg = "sha3-256",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha3_256_tv_template)
}
}, {
.alg = "sha3-384",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha3_384_tv_template)
}
}, {
.alg = "sha3-512",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha3_512_tv_template)
}
}, {
.alg = "sha384",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha384_tv_template)
}
}, {
.alg = "sha512",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(sha512_tv_template)
}
}, {
.alg = "sm2",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(sm2_tv_template)
}
}, {
.alg = "sm3",
.test = alg_test_hash,
.suite = {
.hash = __VECS(sm3_tv_template)
}
}, {
.alg = "streebog256",
.test = alg_test_hash,
.suite = {
.hash = __VECS(streebog256_tv_template)
}
}, {
.alg = "streebog512",
.test = alg_test_hash,
.suite = {
.hash = __VECS(streebog512_tv_template)
}
}, {
.alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
.hash = __VECS(wp256_tv_template)
}
}, {
.alg = "wp384",
.test = alg_test_hash,
.suite = {
.hash = __VECS(wp384_tv_template)
}
}, {
.alg = "wp512",
.test = alg_test_hash,
.suite = {
.hash = __VECS(wp512_tv_template)
}
}, {
.alg = "xcbc(aes)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(aes_xcbc128_tv_template)
}
}, {
.alg = "xcbc(sm4)",
.test = alg_test_hash,
.suite = {
.hash = __VECS(sm4_xcbc128_tv_template)
}
}, {
.alg = "xchacha12",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha12_tv_template)
},
}, {
.alg = "xchacha20",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(xchacha20_tv_template)
},
}, {
.alg = "xctr(aes)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_xctr_tv_template)
}
}, {
.alg = "xts(aes)",
.generic_driver = "xts(ecb(aes-generic))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = __VECS(aes_xts_tv_template)
}
}, {
.alg = "xts(camellia)",
.generic_driver = "xts(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_xts_tv_template)
}
}, {
.alg = "xts(cast6)",
.generic_driver = "xts(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_xts_tv_template)
}
}, {
/* Same as xts(aes) except the key is stored in
* hardware secure memory which we reference by index
*/
.alg = "xts(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "xts(serpent)",
.generic_driver = "xts(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
.alg = "xts(sm4)",
.generic_driver = "xts(ecb(sm4-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(sm4_xts_tv_template)
}
}, {
.alg = "xts(twofish)",
.generic_driver = "xts(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_xts_tv_template)
}
}, {
#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
.alg = "xts-paes-s390",
.fips_allowed = 1,
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_xts_tv_template)
}
}, {
#endif
.alg = "xts4096(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "xts512(paes)",
.test = alg_test_null,
.fips_allowed = 1,
}, {
.alg = "xxhash64",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = __VECS(xxhash64_tv_template)
}
}, {
.alg = "zlib-deflate",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(zlib_deflate_comp_tv_template),
.decomp = __VECS(zlib_deflate_decomp_tv_template)
}
}
}, {
.alg = "zstd",
.test = alg_test_comp,
.fips_allowed = 1,
.suite = {
.comp = {
.comp = __VECS(zstd_comp_tv_template),
.decomp = __VECS(zstd_decomp_tv_template)
}
}
}
};
static void alg_check_test_descs_order(void)
{
int i;
for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) {
int diff = strcmp(alg_test_descs[i - 1].alg,
alg_test_descs[i].alg);
if (WARN_ON(diff > 0)) {
pr_warn("testmgr: alg_test_descs entries in wrong order: '%s' before '%s'\n",
alg_test_descs[i - 1].alg,
alg_test_descs[i].alg);
}
if (WARN_ON(diff == 0)) {
pr_warn("testmgr: duplicate alg_test_descs entry: '%s'\n",
alg_test_descs[i].alg);
}
}
}
static void alg_check_testvec_configs(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++)
WARN_ON(!valid_testvec_config(
&default_cipher_testvec_configs[i]));
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++)
WARN_ON(!valid_testvec_config(
&default_hash_testvec_configs[i]));
}
static void testmgr_onetime_init(void)
{
alg_check_test_descs_order();
alg_check_testvec_configs();
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n");
#endif
}
static int alg_find_test(const char *alg)
{
int start = 0;
int end = ARRAY_SIZE(alg_test_descs);
while (start < end) {
int i = (start + end) / 2;
int diff = strcmp(alg_test_descs[i].alg, alg);
if (diff > 0) {
end = i;
continue;
}
if (diff < 0) {
start = i + 1;
continue;
}
return i;
}
return -1;
}
static int alg_fips_disabled(const char *driver, const char *alg)
{
pr_info("alg: %s (%s) is disabled due to FIPS\n", alg, driver);
return -ECANCELED;
}
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
int j;
int rc;
if (!fips_enabled && notests) {
printk_once(KERN_INFO "alg: self-tests disabled\n");
return 0;
}
DO_ONCE(testmgr_onetime_init);
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
char nalg[CRYPTO_MAX_ALG_NAME];
if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
sizeof(nalg))
return -ENAMETOOLONG;
i = alg_find_test(nalg);
if (i < 0)
goto notest;
if (fips_enabled && !alg_test_descs[i].fips_allowed)
goto non_fips_alg;
rc = alg_test_cipher(alg_test_descs + i, driver, type, mask);
goto test_done;
}
i = alg_find_test(alg);
j = alg_find_test(driver);
if (i < 0 && j < 0)
goto notest;
if (fips_enabled) {
if (j >= 0 && !alg_test_descs[j].fips_allowed)
return -EINVAL;
if (i >= 0 && !alg_test_descs[i].fips_allowed)
goto non_fips_alg;
}
rc = 0;
if (i >= 0)
rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
type, mask);
if (j >= 0 && j != i)
rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
type, mask);
test_done:
if (rc) {
if (fips_enabled || panic_on_fail) {
fips_fail_notify();
panic("alg: self-tests for %s (%s) failed in %s mode!\n",
driver, alg,
fips_enabled ? "fips" : "panic_on_fail");
}
pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
WARN(rc != -ENOENT,
"alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
} else {
if (fips_enabled)
pr_info("alg: self-tests for %s (%s) passed\n",
driver, alg);
}
return rc;
notest:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
if (type & CRYPTO_ALG_FIPS_INTERNAL)
return alg_fips_disabled(driver, alg);
return 0;
non_fips_alg:
return alg_fips_disabled(driver, alg);
}
#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
EXPORT_SYMBOL_GPL(alg_test);
| linux-master | crypto/testmgr.c |
/*
* VMAC: Message Authentication Code using Universal Hashing
*
* Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
*
* Copyright (c) 2009, Intel Corporation.
* Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
/*
* Derived from:
* VMAC and VHASH Implementation by Ted Krovetz ([email protected]) and Wei Dai.
* This implementation is herby placed in the public domain.
* The authors offers no warranty. Use at your own risk.
* Last modified: 17 APR 08, 1700 PDT
*/
#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
/*
* User definable settings.
*/
#define VMAC_TAG_LEN 64
#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
#define VMAC_NONCEBYTES 16
/* per-transform (per-key) context */
struct vmac_tfm_ctx {
struct crypto_cipher *cipher;
u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
u64 polykey[2*VMAC_TAG_LEN/64];
u64 l3key[2*VMAC_TAG_LEN/64];
};
/* per-request context */
struct vmac_desc_ctx {
union {
u8 partial[VMAC_NHBYTES]; /* partial block */
__le64 partial_words[VMAC_NHBYTES / 8];
};
unsigned int partial_size; /* size of the partial block */
bool first_block_processed;
u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
union {
u8 bytes[VMAC_NONCEBYTES];
__be64 pads[VMAC_NONCEBYTES / 8];
} nonce;
unsigned int nonce_size; /* nonce bytes filled so far */
};
/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
#define pe64_to_cpup le64_to_cpup /* Prefer little endian */
#ifdef __LITTLE_ENDIAN
#define INDEX_HIGH 1
#define INDEX_LOW 0
#else
#define INDEX_HIGH 0
#define INDEX_LOW 1
#endif
/*
* The following routines are used in this implementation. They are
* written via macros to simulate zero-overhead call-by-reference.
*
* MUL64: 64x64->128-bit multiplication
* PMUL64: assumes top bits cleared on inputs
* ADD128: 128x128->128-bit addition
*/
#define ADD128(rh, rl, ih, il) \
do { \
u64 _il = (il); \
(rl) += (_il); \
if ((rl) < (_il)) \
(rh)++; \
(rh) += (ih); \
} while (0)
#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m >> 32), (m << 32)); \
} while (0)
#define MUL64(rh, rl, i1, i2) \
do { \
u64 _i1 = (i1), _i2 = (i2); \
u64 m1 = MUL32(_i1, _i2>>32); \
u64 m2 = MUL32(_i1>>32, _i2); \
rh = MUL32(_i1>>32, _i2>>32); \
rl = MUL32(_i1, _i2); \
ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
} while (0)
/*
* For highest performance the L1 NH and L2 polynomial hashes should be
* carefully implemented to take advantage of one's target architecture.
* Here these two hash functions are defined multiple time; once for
* 64-bit architectures, once for 32-bit SSE2 architectures, and once
* for the rest (32-bit) architectures.
* For each, nh_16 *must* be defined (works on multiples of 16 bytes).
* Optionally, nh_vmac_nhbytes can be defined (for multiples of
* VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
* NH computations at once).
*/
#ifdef CONFIG_64BIT
#define nh_16(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 2) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
do { \
int i; u64 th, tl; \
rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
} \
} while (0)
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
do { \
int i; u64 th, tl; \
rh1 = rl1 = rh = rl = 0; \
for (i = 0; i < nw; i += 8) { \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
ADD128(rh1, rl1, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
ADD128(rh, rl, th, tl); \
MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
ADD128(rh1, rl1, th, tl); \
} \
} while (0)
#endif
#define poly_step(ah, al, kh, kl, mh, ml) \
do { \
u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
/* compute ab*cd, put bd into result registers */ \
PMUL64(t3h, t3l, al, kh); \
PMUL64(t2h, t2l, ah, kl); \
PMUL64(t1h, t1l, ah, 2*kh); \
PMUL64(ah, al, al, kl); \
/* add 2 * ac to result */ \
ADD128(ah, al, t1h, t1l); \
/* add together ad + bc */ \
ADD128(t2h, t2l, t3h, t3l); \
/* now (ah,al), (t2l,2*t2h) need summing */ \
/* first add the high registers, carrying into t2h */ \
ADD128(t2h, ah, z, t2l); \
/* double t2h and add top bit of ah */ \
t2h = 2 * t2h + (ah >> 63); \
ah &= m63; \
/* now add the low registers */ \
ADD128(ah, al, mh, ml); \
ADD128(ah, al, z, t2h); \
} while (0)
#else /* ! CONFIG_64BIT */
#ifndef nh_16
#define nh_16(mp, kp, nw, rh, rl) \
do { \
u64 t1, t2, m1, m2, t; \
int i; \
rh = rl = t = 0; \
for (i = 0; i < nw; i += 2) { \
t1 = pe64_to_cpup(mp+i) + kp[i]; \
t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
m2 = MUL32(t1 >> 32, t2); \
m1 = MUL32(t1, t2 >> 32); \
ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
MUL32(t1, t2)); \
rh += (u64)(u32)(m1 >> 32) \
+ (u32)(m2 >> 32); \
t += (u64)(u32)m1 + (u32)m2; \
} \
ADD128(rh, rl, (t >> 32), (t << 32)); \
} while (0)
#endif
static void poly_step_func(u64 *ahi, u64 *alo,
const u64 *kh, const u64 *kl,
const u64 *mh, const u64 *ml)
{
#define a0 (*(((u32 *)alo)+INDEX_LOW))
#define a1 (*(((u32 *)alo)+INDEX_HIGH))
#define a2 (*(((u32 *)ahi)+INDEX_LOW))
#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
#define k0 (*(((u32 *)kl)+INDEX_LOW))
#define k1 (*(((u32 *)kl)+INDEX_HIGH))
#define k2 (*(((u32 *)kh)+INDEX_LOW))
#define k3 (*(((u32 *)kh)+INDEX_HIGH))
u64 p, q, t;
u32 t2;
p = MUL32(a3, k3);
p += p;
p += *(u64 *)mh;
p += MUL32(a0, k2);
p += MUL32(a1, k1);
p += MUL32(a2, k0);
t = (u32)(p);
p >>= 32;
p += MUL32(a0, k3);
p += MUL32(a1, k2);
p += MUL32(a2, k1);
p += MUL32(a3, k0);
t |= ((u64)((u32)p & 0x7fffffff)) << 32;
p >>= 31;
p += (u64)(((u32 *)ml)[INDEX_LOW]);
p += MUL32(a0, k0);
q = MUL32(a1, k3);
q += MUL32(a2, k2);
q += MUL32(a3, k1);
q += q;
p += q;
t2 = (u32)(p);
p >>= 32;
p += (u64)(((u32 *)ml)[INDEX_HIGH]);
p += MUL32(a0, k1);
p += MUL32(a1, k0);
q = MUL32(a2, k3);
q += MUL32(a3, k2);
q += q;
p += q;
*(u64 *)(alo) = (p << 32) | t2;
p >>= 32;
*(u64 *)(ahi) = p + t;
#undef a0
#undef a1
#undef a2
#undef a3
#undef k0
#undef k1
#undef k2
#undef k3
}
#define poly_step(ah, al, kh, kl, mh, ml) \
poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
#endif /* end of specialized NH and poly definitions */
/* At least nh_16 is defined. Defined others as needed here */
#ifndef nh_16_2
#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_16(mp, kp, nw, rh, rl); \
nh_16(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
#ifndef nh_vmac_nhbytes
#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
nh_16(mp, kp, nw, rh, rl)
#endif
#ifndef nh_vmac_nhbytes_2
#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
do { \
nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
} while (0)
#endif
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
/* fully reduce (p1,p2)+(len,0) mod p127 */
t = p1 >> 63;
p1 &= m63;
ADD128(p1, p2, len, t);
/* At this point, (p1,p2) is at most 2^127+(len<<64) */
t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
ADD128(p1, p2, z, t);
p1 &= m63;
/* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
t = p1 + (p2 >> 32);
t += (t >> 32);
t += (u32)t > 0xfffffffeu;
p1 += (t >> 32);
p2 += (p1 << 32);
/* compute (p1+k1)%p64 and (p2+k2)%p64 */
p1 += k1;
p1 += (0 - (p1 < k1)) & 257;
p2 += k2;
p2 += (0 - (p2 < k2)) & 257;
/* compute (p1+k1)*(p2+k2)%p64 */
MUL64(rh, rl, p1, p2);
t = rh >> 56;
ADD128(t, rl, z, rh);
rh <<= 8;
ADD128(t, rl, z, rh);
t += t << 8;
rl += t;
rl += (0 - (rl < t)) & 257;
rl += (0 - (rl > p64-1)) & 257;
return rl;
}
/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx,
const __le64 *mptr, unsigned int blocks)
{
const u64 *kptr = tctx->nhkey;
const u64 pkh = tctx->polykey[0];
const u64 pkl = tctx->polykey[1];
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
u64 rh, rl;
if (!dctx->first_block_processed) {
dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
blocks--;
}
while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
dctx->polytmp[0] = ch;
dctx->polytmp[1] = cl;
}
static int vmac_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
__be64 out[2];
u8 in[16] = { 0 };
unsigned int i;
int err;
if (keylen != VMAC_KEY_LEN)
return -EINVAL;
err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
in[0] = 0x80;
for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->nhkey[i] = be64_to_cpu(out[0]);
tctx->nhkey[i+1] = be64_to_cpu(out[1]);
in[15]++;
}
/* Fill poly key */
in[0] = 0xC0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
in[15]++;
}
/* Fill ip key */
in[0] = 0xE0;
in[15] = 0;
for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
tctx->l3key[i] = be64_to_cpu(out[0]);
tctx->l3key[i+1] = be64_to_cpu(out[1]);
in[15]++;
} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
return 0;
}
static int vmac_init(struct shash_desc *desc)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->partial_size = 0;
dctx->first_block_processed = false;
memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
dctx->nonce_size = 0;
return 0;
}
static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
/* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
if (dctx->nonce_size < VMAC_NONCEBYTES) {
n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
dctx->nonce_size += n;
p += n;
len -= n;
}
if (dctx->partial_size) {
n = min(len, VMAC_NHBYTES - dctx->partial_size);
memcpy(&dctx->partial[dctx->partial_size], p, n);
dctx->partial_size += n;
p += n;
len -= n;
if (dctx->partial_size == VMAC_NHBYTES) {
vhash_blocks(tctx, dctx, dctx->partial_words, 1);
dctx->partial_size = 0;
}
}
if (len >= VMAC_NHBYTES) {
n = round_down(len, VMAC_NHBYTES);
/* TODO: 'p' may be misaligned here */
vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
p += n;
len -= n;
}
if (len) {
memcpy(dctx->partial, p, len);
dctx->partial_size = len;
}
return 0;
}
static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
struct vmac_desc_ctx *dctx)
{
unsigned int partial = dctx->partial_size;
u64 ch = dctx->polytmp[0];
u64 cl = dctx->polytmp[1];
/* L1 and L2-hash the final block if needed */
if (partial) {
/* Zero-pad to next 128-bit boundary */
unsigned int n = round_up(partial, 16);
u64 rh, rl;
memset(&dctx->partial[partial], 0, n - partial);
nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
rh &= m62;
if (dctx->first_block_processed)
poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
rh, rl);
else
ADD128(ch, cl, rh, rl);
}
/* L3-hash the 128-bit output of L2-hash */
return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
}
static int vmac_final(struct shash_desc *desc, u8 *out)
{
const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
int index;
u64 hash, pad;
if (dctx->nonce_size != VMAC_NONCEBYTES)
return -EINVAL;
/*
* The VMAC specification requires a nonce at least 1 bit shorter than
* the block cipher's block length, so we actually only accept a 127-bit
* nonce. We define the unused bit to be the first one and require that
* it be 0, so the needed prepending of a 0 bit is implicit.
*/
if (dctx->nonce.bytes[0] & 0x80)
return -EINVAL;
/* Finish calculating the VHASH of the message */
hash = vhash_final(tctx, dctx);
/* Generate pseudorandom pad by encrypting the nonce */
BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
dctx->nonce.bytes);
pad = be64_to_cpu(dctx->nonce.pads[index]);
/* The VMAC is the sum of VHASH and the pseudorandom pad */
put_unaligned_be64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
err = -EINVAL;
if (alg->cra_blocksize != VMAC_NONCEBYTES)
goto err_free_inst;
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
inst->alg.descsize = sizeof(struct vmac_desc_ctx);
inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
inst->alg.setkey = vmac_setkey;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template vmac64_tmpl = {
.name = "vmac64",
.create = vmac_create,
.module = THIS_MODULE,
};
static int __init vmac_module_init(void)
{
return crypto_register_template(&vmac64_tmpl);
}
static void __exit vmac_module_exit(void)
{
crypto_unregister_template(&vmac64_tmpl);
}
subsys_initcall(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VMAC hash algorithm");
MODULE_ALIAS_CRYPTO("vmac64");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/vmac.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RSA key extract helper
*
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <crypto/internal/rsa.h>
#include "rsapubkey.asn1.h"
#include "rsaprivkey.asn1.h"
int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
const u8 *ptr = value;
size_t n_sz = vlen;
/* invalid key provided */
if (!value || !vlen)
return -EINVAL;
if (fips_enabled) {
while (n_sz && !*ptr) {
ptr++;
n_sz--;
}
/* In FIPS mode only allow key size 2K and higher */
if (n_sz < 256) {
pr_err("RSA: key size not allowed in FIPS mode\n");
return -EINVAL;
}
}
key->n = value;
key->n_sz = vlen;
return 0;
}
int rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
return -EINVAL;
key->e = value;
key->e_sz = vlen;
return 0;
}
int rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !key->n_sz || !vlen || vlen > key->n_sz)
return -EINVAL;
key->d = value;
key->d_sz = vlen;
return 0;
}
int rsa_get_p(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
key->p = value;
key->p_sz = vlen;
return 0;
}
int rsa_get_q(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
key->q = value;
key->q_sz = vlen;
return 0;
}
int rsa_get_dp(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
key->dp = value;
key->dp_sz = vlen;
return 0;
}
int rsa_get_dq(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
key->dq = value;
key->dq_sz = vlen;
return 0;
}
int rsa_get_qinv(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct rsa_key *key = context;
/* invalid key provided */
if (!value || !vlen || vlen > key->n_sz)
return -EINVAL;
key->qinv = value;
key->qinv_sz = vlen;
return 0;
}
/**
* rsa_parse_pub_key() - decodes the BER encoded buffer and stores in the
* provided struct rsa_key, pointers to the raw key as is,
* so that the caller can copy it or MPI parse it, etc.
*
* @rsa_key: struct rsa_key key representation
* @key: key in BER format
* @key_len: length of key
*
* Return: 0 on success or error code in case of error
*/
int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len)
{
return asn1_ber_decoder(&rsapubkey_decoder, rsa_key, key, key_len);
}
EXPORT_SYMBOL_GPL(rsa_parse_pub_key);
/**
* rsa_parse_priv_key() - decodes the BER encoded buffer and stores in the
* provided struct rsa_key, pointers to the raw key
* as is, so that the caller can copy it or MPI parse it,
* etc.
*
* @rsa_key: struct rsa_key key representation
* @key: key in BER format
* @key_len: length of key
*
* Return: 0 on success or error code in case of error
*/
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len)
{
return asn1_ber_decoder(&rsaprivkey_decoder, rsa_key, key, key_len);
}
EXPORT_SYMBOL_GPL(rsa_parse_priv_key);
| linux-master | crypto/rsa_helper.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Crypto user configuration API.
*
* Copyright (C) 2017-2018 Corentin Labbe <[email protected]>
*
*/
#include <crypto/algapi.h>
#include <crypto/internal/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <net/netlink.h>
#include <net/sock.h>
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
struct crypto_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_cipher rcipher;
memset(&rcipher, 0, sizeof(rcipher));
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_stat_compress rcomp;
memset(&rcomp, 0, sizeof(rcomp));
strscpy(rcomp.type, "compression", sizeof(rcomp.type));
return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg,
struct sk_buff *skb)
{
memset(ualg, 0, sizeof(*ualg));
strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strscpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
strscpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_stat_larval rl;
memset(&rl, 0, sizeof(rl));
strscpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
goto nla_put_failure;
goto out;
}
if (alg->cra_type && alg->cra_type->report_stat) {
if (alg->cra_type->report_stat(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_COMPRESS:
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
default:
pr_err("ERROR: Unhandled alg %d in %s\n",
alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
__func__);
}
out:
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_reportstat_alg(struct crypto_alg *alg,
struct crypto_dump_info *info)
{
struct sk_buff *in_skb = info->in_skb;
struct sk_buff *skb = info->out_skb;
struct nlmsghdr *nlh;
struct crypto_user_alg *ualg;
int err = 0;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
if (!nlh) {
err = -EMSGSIZE;
goto out;
}
ualg = nlmsg_data(nlh);
err = crypto_reportstat_one(alg, ualg, skb);
if (err) {
nlmsg_cancel(skb, nlh);
goto out;
}
nlmsg_end(skb, nlh);
out:
return err;
}
int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(in_skb->sk);
struct crypto_user_alg *p = nlmsg_data(in_nlh);
struct crypto_alg *alg;
struct sk_buff *skb;
struct crypto_dump_info info;
int err;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 0);
if (!alg)
return -ENOENT;
err = -ENOMEM;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
goto drop_alg;
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = in_nlh->nlmsg_seq;
info.nlmsg_flags = 0;
err = crypto_reportstat_alg(alg, &info);
drop_alg:
crypto_mod_put(alg);
if (err) {
kfree_skb(skb);
return err;
}
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
MODULE_LICENSE("GPL");
| linux-master | crypto/crypto_user_stat.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
* derived from authenc.c
*
* Copyright (C) 2010 secunet Security Networks AG
* Copyright (C) 2010 Steffen Klassert <[email protected]>
* Copyright (c) 2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct authenc_esn_instance_ctx {
struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
char tail[];
};
static void authenc_esn_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn,
unsigned int authsize)
{
if (authsize > 0 && authsize < 4)
return -EINVAL;
return 0;
}
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_skcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
if (err)
goto out;
crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
}
static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_ahash *auth = ctx->auth;
u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
crypto_ahash_alignmask(auth) + 1);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
struct scatterlist *dst = req->dst;
u32 tmp[2];
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1);
return 0;
}
static void authenc_esn_geniv_ahash_done(void *data, int err)
{
struct aead_request *req = data;
err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
aead_request_complete(req, err);
}
static int crypto_authenc_esn_genicv(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
crypto_ahash_alignmask(auth) + 1);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
struct scatterlist *dst = req->dst;
u32 tmp[2];
if (!authsize)
return 0;
/* Move high-order bits of sequence number to the end. */
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen);
ahash_request_set_callback(ahreq, flags,
authenc_esn_geniv_ahash_done, req);
return crypto_ahash_digest(ahreq) ?:
crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
}
static void crypto_authenc_esn_encrypt_done(void *data, int err)
{
struct aead_request *areq = data;
if (!err)
err = crypto_authenc_esn_genicv(areq, 0);
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ctx->reqoff);
struct crypto_skcipher *enc = ctx->enc;
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
struct scatterlist *src, *dst;
int err;
sg_init_table(areq_ctx->src, 2);
src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
dst = src;
if (req->src != req->dst) {
err = crypto_authenc_esn_copy(req, assoclen);
if (err)
return err;
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
skcipher_request_set_tfm(skreq, enc);
skcipher_request_set_callback(skreq, aead_request_flags(req),
crypto_authenc_esn_encrypt_done, req);
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
return crypto_authenc_esn_genicv(req, aead_request_flags(req));
}
static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ctx->reqoff);
struct crypto_ahash *auth = ctx->auth;
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
crypto_ahash_alignmask(auth) + 1);
unsigned int cryptlen = req->cryptlen - authsize;
unsigned int assoclen = req->assoclen;
struct scatterlist *dst = req->dst;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
if (!authsize)
goto decrypt;
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
decrypt:
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
return crypto_skcipher_decrypt(skreq);
}
static void authenc_esn_verify_ahash_done(void *data, int err)
{
struct aead_request *req = data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
crypto_ahash_alignmask(auth) + 1);
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
struct scatterlist *dst = req->dst;
u32 tmp[2];
int err;
cryptlen -= authsize;
if (req->src != dst) {
err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
if (err)
return err;
}
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
if (!authsize)
goto tail;
/* Move high-order bits of sequence number to the end. */
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req),
authenc_esn_verify_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
return err;
tail:
return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
}
static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
crypto_ahash_alignmask(auth) + 1);
crypto_aead_set_reqsize(
tfm,
sizeof(struct authenc_esn_request_ctx) +
ctx->reqoff +
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(enc)));
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
}
static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
{
struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
static int crypto_authenc_esn_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
u32 mask;
struct aead_instance *inst;
struct authenc_esn_instance_ctx *ctx;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
auth = crypto_spawn_ahash_alg(&ctx->auth);
auth_base = &auth->base;
err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
enc = crypto_spawn_skcipher_alg(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_name,
enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_driver_name,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_esn_init_tfm;
inst->alg.exit = crypto_authenc_esn_exit_tfm;
inst->alg.setkey = crypto_authenc_esn_setkey;
inst->alg.setauthsize = crypto_authenc_esn_setauthsize;
inst->alg.encrypt = crypto_authenc_esn_encrypt;
inst->alg.decrypt = crypto_authenc_esn_decrypt;
inst->free = crypto_authenc_esn_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_authenc_esn_free(inst);
}
return err;
}
static struct crypto_template crypto_authenc_esn_tmpl = {
.name = "authencesn",
.create = crypto_authenc_esn_create,
.module = THIS_MODULE,
};
static int __init crypto_authenc_esn_module_init(void)
{
return crypto_register_template(&crypto_authenc_esn_tmpl);
}
static void __exit crypto_authenc_esn_module_exit(void)
{
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
subsys_initcall(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <[email protected]>");
MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
MODULE_ALIAS_CRYPTO("authencesn");
| linux-master | crypto/authencesn.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common Twofish algorithm parts shared between the c and assembler
* implementations
*
* Originally Twofish for GPG
* By Matthew Skala <[email protected]>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <[email protected]>
* Ported to CryptoAPI by Colin Slater <[email protected]>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
/* The large precomputed tables for the Twofish cipher (twofish.c)
* Taken from the same source as twofish.c
* Marc Mutz <[email protected]>
*/
/* These two tables are the q0 and q1 permutations, exactly as described in
* the Twofish paper. */
static const u8 q0[256] = {
0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78,
0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C,
0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30,
0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82,
0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE,
0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B,
0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45,
0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7,
0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF,
0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8,
0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED,
0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90,
0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B,
0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B,
0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F,
0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A,
0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17,
0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72,
0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68,
0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4,
0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42,
0x4A, 0x5E, 0xC1, 0xE0
};
static const u8 q1[256] = {
0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B,
0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1,
0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B,
0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5,
0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54,
0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96,
0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7,
0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8,
0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF,
0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9,
0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D,
0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E,
0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21,
0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01,
0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E,
0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64,
0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44,
0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E,
0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B,
0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9,
0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56,
0x55, 0x09, 0xBE, 0x91
};
/* These MDS tables are actually tables of MDS composed with q0 and q1,
* because it is only ever used that way and we can save some time by
* precomputing. Of course the main saving comes from precomputing the
* GF(2^8) multiplication involved in the MDS matrix multiply; by looking
* things up in these tables we reduce the matrix multiply to four lookups
* and three XORs. Semi-formally, the definition of these tables is:
* mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T
* mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T
* where ^T means "transpose", the matrix multiply is performed in GF(2^8)
* represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described
* by Schneier et al, and I'm casually glossing over the byte/word
* conversion issues. */
static const u32 mds[4][256] = {
{
0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B,
0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B,
0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32,
0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1,
0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA,
0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B,
0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1,
0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5,
0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490,
0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154,
0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0,
0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796,
0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228,
0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7,
0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3,
0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8,
0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477,
0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF,
0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C,
0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9,
0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA,
0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D,
0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72,
0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E,
0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76,
0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321,
0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39,
0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01,
0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D,
0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E,
0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5,
0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64,
0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7,
0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544,
0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E,
0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E,
0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A,
0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B,
0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2,
0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9,
0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504,
0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756,
0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91},
{
0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252,
0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A,
0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020,
0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141,
0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444,
0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424,
0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A,
0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757,
0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383,
0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A,
0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9,
0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656,
0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1,
0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898,
0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414,
0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3,
0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1,
0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989,
0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5,
0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282,
0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E,
0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E,
0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202,
0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC,
0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565,
0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A,
0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808,
0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272,
0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A,
0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969,
0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505,
0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5,
0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D,
0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343,
0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF,
0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3,
0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F,
0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646,
0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6,
0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF,
0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A,
0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7,
0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8},
{
0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B,
0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F,
0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A,
0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783,
0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70,
0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3,
0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB,
0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA,
0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4,
0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41,
0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C,
0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07,
0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622,
0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18,
0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035,
0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96,
0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84,
0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E,
0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F,
0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD,
0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558,
0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40,
0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA,
0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85,
0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF,
0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773,
0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D,
0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B,
0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C,
0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19,
0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086,
0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D,
0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74,
0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755,
0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691,
0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D,
0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4,
0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53,
0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E,
0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9,
0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705,
0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7,
0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF},
{
0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98,
0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866,
0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643,
0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77,
0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9,
0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C,
0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3,
0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216,
0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F,
0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25,
0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF,
0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7,
0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4,
0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E,
0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA,
0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C,
0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12,
0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A,
0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D,
0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE,
0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A,
0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C,
0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B,
0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4,
0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B,
0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3,
0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE,
0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB,
0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85,
0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA,
0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E,
0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8,
0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33,
0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC,
0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718,
0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA,
0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8,
0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872,
0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882,
0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D,
0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10,
0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6,
0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8}
};
/* The exp_to_poly and poly_to_exp tables are used to perform efficient
* operations in GF(2^8) represented as GF(2)[x]/w(x) where
* w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the
* definition of the RS matrix in the key schedule. Elements of that field
* are polynomials of degree not greater than 7 and all coefficients 0 or 1,
* which can be represented naturally by bytes (just substitute x=2). In that
* form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8)
* multiplication is inefficient without hardware support. To multiply
* faster, I make use of the fact x is a generator for the nonzero elements,
* so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
* some n in 0..254. Note that caret is exponentiation in GF(2^8),
* *not* polynomial notation. So if I want to compute pq where p and q are
* in GF(2^8), I can just say:
* 1. if p=0 or q=0 then pq=0
* 2. otherwise, find m and n such that p=x^m and q=x^n
* 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq
* The translations in steps 2 and 3 are looked up in the tables
* poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this
* in action, look at the CALC_S macro. As additional wrinkles, note that
* one of my operands is always a constant, so the poly_to_exp lookup on it
* is done in advance; I included the original values in the comments so
* readers can have some chance of recognizing that this *is* the RS matrix
* from the Twofish paper. I've only included the table entries I actually
* need; I never do a lookup on a variable input of zero and the biggest
* exponents I'll ever see are 254 (variable) and 237 (constant), so they'll
* never sum to more than 491. I'm repeating part of the exp_to_poly table
* so that I don't have to do mod-255 reduction in the exponent arithmetic.
* Since I know my constant operands are never zero, I only have to worry
* about zero values in the variable operand, and I do it with a simple
* conditional branch. I know conditionals are expensive, but I couldn't
* see a non-horrible way of avoiding them, and I did manage to group the
* statements so that each if covers four group multiplications. */
static const u8 poly_to_exp[255] = {
0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19,
0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A,
0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C,
0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B,
0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47,
0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D,
0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8,
0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C,
0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83,
0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48,
0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26,
0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E,
0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3,
0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9,
0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A,
0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D,
0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75,
0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84,
0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64,
0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49,
0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF,
0x85, 0xC8, 0xA1
};
static const u8 exp_to_poly[492] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2,
0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03,
0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6,
0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A,
0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63,
0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C,
0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07,
0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88,
0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12,
0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7,
0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C,
0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8,
0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25,
0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A,
0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE,
0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC,
0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E,
0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92,
0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89,
0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB,
0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1,
0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D,
0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC,
0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3,
0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52,
0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0,
0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1,
0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A,
0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11,
0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51,
0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66,
0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB,
0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19,
0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D,
0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56,
0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE,
0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9,
0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE,
0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41,
0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E,
0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB
};
/* The table constants are indices of
* S-box entries, preprocessed through q0 and q1. */
static const u8 calc_sb_tbl[512] = {
0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4,
0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8,
0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B,
0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B,
0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD,
0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1,
0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B,
0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F,
0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B,
0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D,
0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E,
0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5,
0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14,
0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3,
0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54,
0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51,
0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A,
0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96,
0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10,
0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C,
0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7,
0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70,
0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB,
0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8,
0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF,
0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC,
0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF,
0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2,
0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82,
0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9,
0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97,
0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17,
0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D,
0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3,
0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C,
0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E,
0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F,
0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49,
0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21,
0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9,
0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD,
0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01,
0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F,
0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48,
0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E,
0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19,
0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57,
0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64,
0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE,
0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5,
0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44,
0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69,
0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15,
0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E,
0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34,
0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC,
0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B,
0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB,
0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52,
0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9,
0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4,
0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2,
0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56,
0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91
};
/* Macro to perform one column of the RS matrix multiplication. The
* parameters a, b, c, and d are the four bytes of output; i is the index
* of the key bytes, and w, x, y, and z, are the column of constants from
* the RS matrix, preprocessed through the poly_to_exp table. */
#define CALC_S(a, b, c, d, i, w, x, y, z) \
if (key[i]) { \
tmp = poly_to_exp[key[i] - 1]; \
(a) ^= exp_to_poly[tmp + (w)]; \
(b) ^= exp_to_poly[tmp + (x)]; \
(c) ^= exp_to_poly[tmp + (y)]; \
(d) ^= exp_to_poly[tmp + (z)]; \
}
/* Macros to calculate the key-dependent S-boxes for a 128-bit key using
* the S vector from CALC_S. CALC_SB_2 computes a single entry in all
* four S-boxes, where i is the index of the entry to compute, and a and b
* are the index numbers preprocessed through the q0 and q1 tables
* respectively. */
#define CALC_SB_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \
ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
/* Macro exactly like CALC_SB_2, but for 192-bit keys. */
#define CALC_SB192_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl];
/* Macro exactly like CALC_SB_2, but for 256-bit keys. */
#define CALC_SB256_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \
ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp];
/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the
* last two stages of the h() function for a given index (either 2i or 2i+1).
* a, b, c, and d are the four bytes going into the last two stages. For
* 128-bit keys, this is the entire h() function and a and c are the index
* preprocessed through q0 and q1 respectively; for longer keys they are the
* output of previous stages. j is the index of the first key byte to use.
* CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2
* twice, doing the Pseudo-Hadamard Transform, and doing the necessary
* rotations. Its parameters are: a, the array to write the results into,
* j, the index of the first output entry, k and l, the preprocessed indices
* for index 2i, and m and n, the preprocessed indices for index 2i+1.
* CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a, b, c and d are the
* four bytes going into the last three stages. For 192-bit keys, c = d
* are the index preprocessed through q0, and a = b are the index
* preprocessed through q1; j is the index of the first key byte to use.
* CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro
* instead of CALC_K_2.
* CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a and b are the index
* preprocessed through q0 and q1 respectively; j is the index of the first
* key byte to use. CALC_K256 is identical to CALC_K but for using the
* CALC_K256_2 macro instead of CALC_K_2. */
#define CALC_K_2(a, b, c, d, j) \
mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \
^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \
^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \
^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]]
#define CALC_K(a, j, k, l, m, n) \
x = CALC_K_2 (k, l, k, l, 0); \
y = CALC_K_2 (m, n, m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K192_2(a, b, c, d, j) \
CALC_K_2 (q0[a ^ key[(j) + 16]], \
q1[b ^ key[(j) + 17]], \
q0[c ^ key[(j) + 18]], \
q1[d ^ key[(j) + 19]], j)
#define CALC_K192(a, j, k, l, m, n) \
x = CALC_K192_2 (l, l, k, k, 0); \
y = CALC_K192_2 (n, n, m, m, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K256_2(a, b, j) \
CALC_K192_2 (q1[b ^ key[(j) + 24]], \
q1[a ^ key[(j) + 25]], \
q0[a ^ key[(j) + 26]], \
q0[b ^ key[(j) + 27]], j)
#define CALC_K256(a, j, k, l, m, n) \
x = CALC_K256_2 (k, l, 0); \
y = CALC_K256_2 (m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
/* Perform the key setup. */
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
unsigned int key_len)
{
int i, j, k;
/* Temporaries for CALC_K. */
u32 x, y;
/* The S vector used to key the S-boxes, split up into individual bytes.
* 128-bit keys use only sa through sh; 256-bit use all of them. */
u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
/* Temporary for CALC_S. */
u8 tmp;
/* Check key length. */
if (key_len % 8)
return -EINVAL; /* unsupported key length */
/* Compute the first two words of the S vector. The magic numbers are
* the entries of the RS matrix, preprocessed through poly_to_exp. The
* numbers in the comments are the original (polynomial form) matrix
* entries. */
CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
/* Calculate the third word of the S vector */
CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
}
if (key_len == 32) { /* 256-bit key */
/* Calculate the fourth word of the S vector */
CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* CALC_K256/CALC_K192/CALC_K loops were unrolled.
* Unrolling produced x2.5 more code (+18k on i386),
* and speeded up key setup by 7%:
* unrolled: twofish_setkey/sec: 41128
* loop: twofish_setkey/sec: 38148
* CALC_K256: ~100 insns each
* CALC_K192: ~90 insns
* CALC_K: ~70 insns
*/
/* Calculate whitening and round subkeys */
for ( i = 0; i < 8; i += 2 ) {
CALC_K256 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
}
for ( i = 0; i < 32; i += 2 ) {
CALC_K256 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
}
} else if (key_len == 24) { /* 192-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys */
for ( i = 0; i < 8; i += 2 ) {
CALC_K192 (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
}
for ( i = 0; i < 32; i += 2 ) {
CALC_K192 (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
}
} else { /* 128-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys */
for ( i = 0; i < 8; i += 2 ) {
CALC_K (w, i, q0[i], q1[i], q0[i+1], q1[i+1]);
}
for ( i = 0; i < 32; i += 2 ) {
CALC_K (k, i, q0[i+8], q1[i+8], q0[i+9], q1[i+9]);
}
}
return 0;
}
EXPORT_SYMBOL_GPL(__twofish_setkey);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
return __twofish_setkey(crypto_tfm_ctx(tfm), key, key_len);
}
EXPORT_SYMBOL_GPL(twofish_setkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish cipher common functions");
| linux-master | crypto/twofish_common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The AEGIS-128 Authenticated-Encryption Algorithm
*
* Copyright (c) 2017-2018 Ondrej Mosnacek <[email protected]>
* Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved.
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <asm/simd.h>
#include "aegis.h"
#define AEGIS128_NONCE_SIZE 16
#define AEGIS128_STATE_BLOCKS 5
#define AEGIS128_KEY_SIZE 16
#define AEGIS128_MIN_AUTH_SIZE 8
#define AEGIS128_MAX_AUTH_SIZE 16
struct aegis_state {
union aegis_block blocks[AEGIS128_STATE_BLOCKS];
};
struct aegis_ctx {
union aegis_block key;
};
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_simd);
static const union aegis_block crypto_aegis_const[2] = {
{ .words64 = {
cpu_to_le64(U64_C(0x0d08050302010100)),
cpu_to_le64(U64_C(0x6279e99059372215)),
} },
{ .words64 = {
cpu_to_le64(U64_C(0xf12fc26d55183ddb)),
cpu_to_le64(U64_C(0xdd28b57342311120)),
} },
};
static bool aegis128_do_simd(void)
{
#ifdef CONFIG_CRYPTO_AEGIS128_SIMD
if (static_branch_likely(&have_simd))
return crypto_simd_usable();
#endif
return false;
}
static void crypto_aegis128_update(struct aegis_state *state)
{
union aegis_block tmp;
unsigned int i;
tmp = state->blocks[AEGIS128_STATE_BLOCKS - 1];
for (i = AEGIS128_STATE_BLOCKS - 1; i > 0; i--)
crypto_aegis_aesenc(&state->blocks[i], &state->blocks[i - 1],
&state->blocks[i]);
crypto_aegis_aesenc(&state->blocks[0], &tmp, &state->blocks[0]);
}
static void crypto_aegis128_update_a(struct aegis_state *state,
const union aegis_block *msg,
bool do_simd)
{
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) {
crypto_aegis128_update_simd(state, msg);
return;
}
crypto_aegis128_update(state);
crypto_aegis_block_xor(&state->blocks[0], msg);
}
static void crypto_aegis128_update_u(struct aegis_state *state, const void *msg,
bool do_simd)
{
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) && do_simd) {
crypto_aegis128_update_simd(state, msg);
return;
}
crypto_aegis128_update(state);
crypto_xor(state->blocks[0].bytes, msg, AEGIS_BLOCK_SIZE);
}
static void crypto_aegis128_init(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
union aegis_block key_iv;
unsigned int i;
key_iv = *key;
crypto_xor(key_iv.bytes, iv, AEGIS_BLOCK_SIZE);
state->blocks[0] = key_iv;
state->blocks[1] = crypto_aegis_const[1];
state->blocks[2] = crypto_aegis_const[0];
state->blocks[3] = *key;
state->blocks[4] = *key;
crypto_aegis_block_xor(&state->blocks[3], &crypto_aegis_const[0]);
crypto_aegis_block_xor(&state->blocks[4], &crypto_aegis_const[1]);
for (i = 0; i < 5; i++) {
crypto_aegis128_update_a(state, key, false);
crypto_aegis128_update_a(state, &key_iv, false);
}
}
static void crypto_aegis128_ad(struct aegis_state *state,
const u8 *src, unsigned int size,
bool do_simd)
{
if (AEGIS_ALIGNED(src)) {
const union aegis_block *src_blk =
(const union aegis_block *)src;
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_a(state, src_blk, do_simd);
size -= AEGIS_BLOCK_SIZE;
src_blk++;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
crypto_aegis128_update_u(state, src, do_simd);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
}
}
}
static void crypto_aegis128_wipe_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
memzero_explicit(dst, size);
}
static void crypto_aegis128_encrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, src_blk, false);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_u(state, src, false);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis128_update_a(state, &msg, false);
crypto_aegis_block_xor(&msg, &tmp);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128_decrypt_chunk(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
union aegis_block tmp;
if (AEGIS_ALIGNED(src) && AEGIS_ALIGNED(dst)) {
while (size >= AEGIS_BLOCK_SIZE) {
union aegis_block *dst_blk =
(union aegis_block *)dst;
const union aegis_block *src_blk =
(const union aegis_block *)src;
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&tmp, src_blk);
crypto_aegis128_update_a(state, &tmp, false);
*dst_blk = tmp;
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
} else {
while (size >= AEGIS_BLOCK_SIZE) {
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_xor(tmp.bytes, src, AEGIS_BLOCK_SIZE);
crypto_aegis128_update_a(state, &tmp, false);
memcpy(dst, tmp.bytes, AEGIS_BLOCK_SIZE);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
}
if (size > 0) {
union aegis_block msg = {};
memcpy(msg.bytes, src, size);
tmp = state->blocks[2];
crypto_aegis_block_and(&tmp, &state->blocks[3]);
crypto_aegis_block_xor(&tmp, &state->blocks[4]);
crypto_aegis_block_xor(&tmp, &state->blocks[1]);
crypto_aegis_block_xor(&msg, &tmp);
memset(msg.bytes + size, 0, AEGIS_BLOCK_SIZE - size);
crypto_aegis128_update_a(state, &msg, false);
memcpy(dst, msg.bytes, size);
}
}
static void crypto_aegis128_process_ad(struct aegis_state *state,
struct scatterlist *sg_src,
unsigned int assoclen,
bool do_simd)
{
struct scatter_walk walk;
union aegis_block buf;
unsigned int pos = 0;
scatterwalk_start(&walk, sg_src);
while (assoclen != 0) {
unsigned int size = scatterwalk_clamp(&walk, assoclen);
unsigned int left = size;
void *mapped = scatterwalk_map(&walk);
const u8 *src = (const u8 *)mapped;
if (pos + size >= AEGIS_BLOCK_SIZE) {
if (pos > 0) {
unsigned int fill = AEGIS_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
crypto_aegis128_update_a(state, &buf, do_simd);
pos = 0;
left -= fill;
src += fill;
}
crypto_aegis128_ad(state, src, left, do_simd);
src += left & ~(AEGIS_BLOCK_SIZE - 1);
left &= AEGIS_BLOCK_SIZE - 1;
}
memcpy(buf.bytes + pos, src, left);
pos += left;
assoclen -= size;
scatterwalk_unmap(mapped);
scatterwalk_advance(&walk, size);
scatterwalk_done(&walk, 0, assoclen);
}
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS_BLOCK_SIZE - pos);
crypto_aegis128_update_a(state, &buf, do_simd);
}
}
static __always_inline
int crypto_aegis128_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk,
void (*crypt)(struct aegis_state *state,
u8 *dst, const u8 *src,
unsigned int size))
{
int err = 0;
while (walk->nbytes) {
unsigned int nbytes = walk->nbytes;
if (nbytes < walk->total)
nbytes = round_down(nbytes, walk->stride);
crypt(state, walk->dst.virt.addr, walk->src.virt.addr, nbytes);
err = skcipher_walk_done(walk, walk->nbytes - nbytes);
}
return err;
}
static void crypto_aegis128_final(struct aegis_state *state,
union aegis_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
u64 assocbits = assoclen * 8;
u64 cryptbits = cryptlen * 8;
union aegis_block tmp;
unsigned int i;
tmp.words64[0] = cpu_to_le64(assocbits);
tmp.words64[1] = cpu_to_le64(cryptbits);
crypto_aegis_block_xor(&tmp, &state->blocks[3]);
for (i = 0; i < 7; i++)
crypto_aegis128_update_a(state, &tmp, false);
for (i = 0; i < AEGIS128_STATE_BLOCKS; i++)
crypto_aegis_block_xor(tag_xor, &state->blocks[i]);
}
static int crypto_aegis128_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct aegis_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != AEGIS128_KEY_SIZE)
return -EINVAL;
memcpy(ctx->key.bytes, key, AEGIS128_KEY_SIZE);
return 0;
}
static int crypto_aegis128_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize > AEGIS128_MAX_AUTH_SIZE)
return -EINVAL;
if (authsize < AEGIS128_MIN_AUTH_SIZE)
return -EINVAL;
return 0;
}
static int crypto_aegis128_encrypt_generic(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int cryptlen = req->cryptlen;
struct skcipher_walk walk;
struct aegis_state state;
skcipher_walk_aead_encrypt(&walk, req, false);
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, false);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_encrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis128_decrypt_generic(struct aead_request *req)
{
static const u8 zeros[AEGIS128_MAX_AUTH_SIZE] = {};
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_init(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, false);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_decrypt_chunk);
crypto_aegis128_final(&state, &tag, req->assoclen, cryptlen);
if (unlikely(crypto_memneq(tag.bytes, zeros, authsize))) {
/*
* From Chapter 4. 'Security Analysis' of the AEGIS spec [0]
*
* "3. If verification fails, the decrypted plaintext and the
* wrong authentication tag should not be given as output."
*
* [0] https://competitions.cr.yp.to/round3/aegisv11.pdf
*/
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_process_crypt(NULL, &walk,
crypto_aegis128_wipe_chunk);
memzero_explicit(&tag, sizeof(tag));
return -EBADMSG;
}
return 0;
}
static int crypto_aegis128_encrypt_simd(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag = {};
unsigned int authsize = crypto_aead_authsize(tfm);
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int cryptlen = req->cryptlen;
struct skcipher_walk walk;
struct aegis_state state;
if (!aegis128_do_simd())
return crypto_aegis128_encrypt_generic(req);
skcipher_walk_aead_encrypt(&walk, req, false);
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, true);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_encrypt_chunk_simd);
crypto_aegis128_final_simd(&state, &tag, req->assoclen, cryptlen, 0);
scatterwalk_map_and_copy(tag.bytes, req->dst, req->assoclen + cryptlen,
authsize, 1);
return 0;
}
static int crypto_aegis128_decrypt_simd(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
union aegis_block tag;
unsigned int authsize = crypto_aead_authsize(tfm);
unsigned int cryptlen = req->cryptlen - authsize;
struct aegis_ctx *ctx = crypto_aead_ctx(tfm);
struct skcipher_walk walk;
struct aegis_state state;
if (!aegis128_do_simd())
return crypto_aegis128_decrypt_generic(req);
scatterwalk_map_and_copy(tag.bytes, req->src, req->assoclen + cryptlen,
authsize, 0);
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_init_simd(&state, &ctx->key, req->iv);
crypto_aegis128_process_ad(&state, req->src, req->assoclen, true);
crypto_aegis128_process_crypt(&state, &walk,
crypto_aegis128_decrypt_chunk_simd);
if (unlikely(crypto_aegis128_final_simd(&state, &tag, req->assoclen,
cryptlen, authsize))) {
skcipher_walk_aead_decrypt(&walk, req, false);
crypto_aegis128_process_crypt(NULL, &walk,
crypto_aegis128_wipe_chunk);
return -EBADMSG;
}
return 0;
}
static struct aead_alg crypto_aegis128_alg_generic = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt_generic,
.decrypt = crypto_aegis128_decrypt_generic,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
.base.cra_module = THIS_MODULE,
};
static struct aead_alg crypto_aegis128_alg_simd = {
.setkey = crypto_aegis128_setkey,
.setauthsize = crypto_aegis128_setauthsize,
.encrypt = crypto_aegis128_encrypt_simd,
.decrypt = crypto_aegis128_decrypt_simd,
.ivsize = AEGIS128_NONCE_SIZE,
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
.base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
.base.cra_module = THIS_MODULE,
};
static int __init crypto_aegis128_module_init(void)
{
int ret;
ret = crypto_register_aead(&crypto_aegis128_alg_generic);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
crypto_aegis128_have_simd()) {
ret = crypto_register_aead(&crypto_aegis128_alg_simd);
if (ret) {
crypto_unregister_aead(&crypto_aegis128_alg_generic);
return ret;
}
static_branch_enable(&have_simd);
}
return 0;
}
static void __exit crypto_aegis128_module_exit(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_AEGIS128_SIMD) &&
crypto_aegis128_have_simd())
crypto_unregister_aead(&crypto_aegis128_alg_simd);
crypto_unregister_aead(&crypto_aegis128_alg_generic);
}
subsys_initcall(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <[email protected]>");
MODULE_DESCRIPTION("AEGIS-128 AEAD algorithm");
MODULE_ALIAS_CRYPTO("aegis128");
MODULE_ALIAS_CRYPTO("aegis128-generic");
MODULE_ALIAS_CRYPTO("aegis128-simd");
| linux-master | crypto/aegis128-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Symmetric key cipher operations.
*
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
* multiple page boundaries by using temporary blocks. In user context,
* the kernel is given a chance to schedule us once per page.
*
* Copyright (c) 2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
enum {
SKCIPHER_WALK_PHYS = 1 << 0,
SKCIPHER_WALK_SLOW = 1 << 1,
SKCIPHER_WALK_COPY = 1 << 2,
SKCIPHER_WALK_DIFF = 1 << 3,
SKCIPHER_WALK_SLEEP = 1 << 4,
};
struct skcipher_walk_buffer {
struct list_head entry;
struct scatter_walk dst;
unsigned int len;
u8 *data;
u8 buffer[];
};
static int skcipher_walk_next(struct skcipher_walk *walk);
static inline void skcipher_map_src(struct skcipher_walk *walk)
{
walk->src.virt.addr = scatterwalk_map(&walk->in);
}
static inline void skcipher_map_dst(struct skcipher_walk *walk)
{
walk->dst.virt.addr = scatterwalk_map(&walk->out);
}
static inline void skcipher_unmap_src(struct skcipher_walk *walk)
{
scatterwalk_unmap(walk->src.virt.addr);
}
static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
{
scatterwalk_unmap(walk->dst.virt.addr);
}
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
{
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
/* Get a spot of the specified length that does not straddle a page.
* The caller needs to ensure that there is enough space for this operation.
*/
static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
{
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
return max(start, end_page);
}
static inline struct skcipher_alg *__crypto_skcipher_alg(
struct crypto_alg *alg)
{
return container_of(alg, struct skcipher_alg, base);
}
static inline struct crypto_istat_cipher *skcipher_get_stat(
struct skcipher_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
return &alg->stat;
#else
return NULL;
#endif
}
static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
{
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
return err;
if (err && err != -EINPROGRESS && err != -EBUSY)
atomic64_inc(&istat->err_cnt);
return err;
}
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
unsigned int n = walk->nbytes;
unsigned int nbytes = 0;
if (!n)
goto finish;
if (likely(err >= 0)) {
n -= err;
nbytes = walk->total - n;
}
if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
unmap_src:
skcipher_unmap_src(walk);
} else if (walk->flags & SKCIPHER_WALK_DIFF) {
skcipher_unmap_dst(walk);
goto unmap_src;
} else if (walk->flags & SKCIPHER_WALK_COPY) {
skcipher_map_dst(walk);
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (err > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
err = -EINVAL;
nbytes = 0;
} else
n = skcipher_done_slow(walk, n);
}
if (err > 0)
err = 0;
walk->total = nbytes;
walk->nbytes = 0;
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
if (nbytes) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
finish:
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
if (walk->flags & SKCIPHER_WALK_PHYS)
goto out;
if (walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
out:
return err;
}
EXPORT_SYMBOL_GPL(skcipher_walk_done);
void skcipher_walk_complete(struct skcipher_walk *walk, int err)
{
struct skcipher_walk_buffer *p, *tmp;
list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
u8 *data;
if (err)
goto done;
data = p->data;
if (!data) {
data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
data = skcipher_get_spot(data, walk->stride);
}
scatterwalk_copychunks(data, &p->dst, p->len, 1);
if (offset_in_page(p->data) + p->len + walk->stride >
PAGE_SIZE)
free_page((unsigned long)p->data);
done:
list_del(&p->entry);
kfree(p);
}
if (!err && walk->iv != walk->oiv)
memcpy(walk->oiv, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
}
EXPORT_SYMBOL_GPL(skcipher_walk_complete);
static void skcipher_queue_write(struct skcipher_walk *walk,
struct skcipher_walk_buffer *p)
{
p->dst = walk->out;
list_add_tail(&p->entry, &walk->buffers);
}
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
{
bool phys = walk->flags & SKCIPHER_WALK_PHYS;
unsigned alignmask = walk->alignmask;
struct skcipher_walk_buffer *p;
unsigned a;
unsigned n;
u8 *buffer;
void *v;
if (!phys) {
if (!walk->buffer)
walk->buffer = walk->page;
buffer = walk->buffer;
if (buffer)
goto ok;
}
/* Start with the minimum alignment of kmalloc. */
a = crypto_tfm_ctx_alignment() - 1;
n = bsize;
if (phys) {
/* Calculate the minimum alignment of p->buffer. */
a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
n += sizeof(*p);
}
/* Minimum size to align p->buffer by alignmask. */
n += alignmask & ~a;
/* Minimum size to ensure p->buffer does not straddle a page. */
n += (bsize - 1) & ~(alignmask | a);
v = kzalloc(n, skcipher_walk_gfp(walk));
if (!v)
return skcipher_walk_done(walk, -ENOMEM);
if (phys) {
p = v;
p->len = bsize;
skcipher_queue_write(walk, p);
buffer = p->buffer;
} else {
walk->buffer = v;
buffer = v;
}
ok:
walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
walk->src.virt.addr = walk->dst.virt.addr;
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
walk->nbytes = bsize;
walk->flags |= SKCIPHER_WALK_SLOW;
return 0;
}
static int skcipher_next_copy(struct skcipher_walk *walk)
{
struct skcipher_walk_buffer *p;
u8 *tmp = walk->page;
skcipher_map_src(walk);
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
skcipher_unmap_src(walk);
walk->src.virt.addr = tmp;
walk->dst.virt.addr = tmp;
if (!(walk->flags & SKCIPHER_WALK_PHYS))
return 0;
p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
if (!p)
return -ENOMEM;
p->data = walk->page;
p->len = walk->nbytes;
skcipher_queue_write(walk, p);
if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
PAGE_SIZE)
walk->page = NULL;
else
walk->page += walk->nbytes;
return 0;
}
static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
walk->src.phys.page = scatterwalk_page(&walk->in);
walk->src.phys.offset = offset_in_page(walk->in.offset);
walk->dst.phys.page = scatterwalk_page(&walk->out);
walk->dst.phys.offset = offset_in_page(walk->out.offset);
if (walk->flags & SKCIPHER_WALK_PHYS)
return 0;
diff = walk->src.phys.offset - walk->dst.phys.offset;
diff |= walk->src.virt.page - walk->dst.virt.page;
skcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
if (diff) {
walk->flags |= SKCIPHER_WALK_DIFF;
skcipher_map_dst(walk);
}
return 0;
}
static int skcipher_walk_next(struct skcipher_walk *walk)
{
unsigned int bsize;
unsigned int n;
int err;
walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF);
n = walk->total;
bsize = min(walk->stride, max(n, walk->blocksize));
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
if (unlikely(n < bsize)) {
if (unlikely(walk->total < walk->blocksize))
return skcipher_walk_done(walk, -EINVAL);
slow_path:
err = skcipher_next_slow(walk, bsize);
goto set_phys_lowmem;
}
if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
if (!walk->page) {
gfp_t gfp = skcipher_walk_gfp(walk);
walk->page = (void *)__get_free_page(gfp);
if (!walk->page)
goto slow_path;
}
walk->nbytes = min_t(unsigned, n,
PAGE_SIZE - offset_in_page(walk->page));
walk->flags |= SKCIPHER_WALK_COPY;
err = skcipher_next_copy(walk);
goto set_phys_lowmem;
}
walk->nbytes = n;
return skcipher_next_fast(walk);
set_phys_lowmem:
if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
walk->src.phys.offset &= PAGE_SIZE - 1;
walk->dst.phys.offset &= PAGE_SIZE - 1;
}
return err;
}
static int skcipher_copy_iv(struct skcipher_walk *walk)
{
unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize;
unsigned bs = walk->stride;
unsigned aligned_bs;
unsigned size;
u8 *iv;
aligned_bs = ALIGN(bs, alignmask + 1);
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~a;
if (walk->flags & SKCIPHER_WALK_PHYS)
size += ivsize;
else {
size += aligned_bs + ivsize;
/* Minimum size to ensure buffer does not straddle a page. */
size += (bs - 1) & ~(alignmask | a);
}
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
if (!walk->buffer)
return -ENOMEM;
iv = PTR_ALIGN(walk->buffer, alignmask + 1);
iv = skcipher_get_spot(iv, bs) + aligned_bs;
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
return 0;
}
static int skcipher_walk_first(struct skcipher_walk *walk)
{
if (WARN_ON_ONCE(in_hardirq()))
return -EDEADLK;
walk->buffer = NULL;
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
int err = skcipher_copy_iv(walk);
if (err)
return err;
}
walk->page = NULL;
return skcipher_walk_next(walk);
}
static int skcipher_walk_skcipher(struct skcipher_walk *walk,
struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
walk->total = req->cryptlen;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
if (unlikely(!walk->total))
return 0;
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
SKCIPHER_WALK_SLEEP : 0;
walk->blocksize = crypto_skcipher_blocksize(tfm);
walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm);
return skcipher_walk_first(walk);
}
int skcipher_walk_virt(struct skcipher_walk *walk,
struct skcipher_request *req, bool atomic)
{
int err;
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
walk->flags &= ~SKCIPHER_WALK_PHYS;
err = skcipher_walk_skcipher(walk, req);
walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
return err;
}
EXPORT_SYMBOL_GPL(skcipher_walk_virt);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req)
{
walk->flags |= SKCIPHER_WALK_PHYS;
INIT_LIST_HEAD(&walk->buffers);
return skcipher_walk_skcipher(walk, req);
}
EXPORT_SYMBOL_GPL(skcipher_walk_async);
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
int err;
walk->nbytes = 0;
walk->iv = req->iv;
walk->oiv = req->iv;
if (unlikely(!walk->total))
return 0;
walk->flags &= ~SKCIPHER_WALK_PHYS;
scatterwalk_start(&walk->in, req->src);
scatterwalk_start(&walk->out, req->dst);
scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
scatterwalk_done(&walk->in, 0, walk->total);
scatterwalk_done(&walk->out, 0, walk->total);
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
walk->flags |= SKCIPHER_WALK_SLEEP;
else
walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->blocksize = crypto_aead_blocksize(tfm);
walk->stride = crypto_aead_chunksize(tfm);
walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm);
err = skcipher_walk_first(walk);
if (atomic)
walk->flags &= ~SKCIPHER_WALK_SLEEP;
return err;
}
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
walk->total = req->cryptlen;
return skcipher_walk_aead_common(walk, req, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
walk->total = req->cryptlen - crypto_aead_authsize(tfm);
return skcipher_walk_aead_common(walk, req, atomic);
}
EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
static void skcipher_set_needkey(struct crypto_skcipher *tfm)
{
if (crypto_skcipher_max_keysize(tfm) != 0)
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
u8 *buffer, *alignbuffer;
unsigned long absize;
int ret;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cipher->setkey(tfm, alignbuffer, keylen);
kfree_sensitive(buffer);
return ret;
}
int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
int err;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
return -EINVAL;
if ((unsigned long)key & alignmask)
err = skcipher_setkey_unaligned(tfm, key, keylen);
else
err = cipher->setkey(tfm, key, keylen);
if (unlikely(err)) {
skcipher_set_needkey(tfm);
return err;
}
crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = alg->encrypt(req);
return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
atomic64_inc(&istat->decrypt_cnt);
atomic64_add(req->cryptlen, &istat->decrypt_tlen);
}
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = alg->decrypt(req);
return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
alg->exit(skcipher);
}
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
skcipher_set_needkey(skcipher);
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
if (alg->init)
return alg->init(skcipher);
return 0;
}
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
container_of(inst, struct skcipher_instance, s.base);
skcipher->free(skcipher);
}
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
seq_printf(m, "walksize : %u\n", skcipher->walksize);
}
static int __maybe_unused crypto_skcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
memset(&rblkcipher, 0, sizeof(rblkcipher));
strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = skcipher->min_keysize;
rblkcipher.max_keysize = skcipher->max_keysize;
rblkcipher.ivsize = skcipher->ivsize;
return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
sizeof(rblkcipher), &rblkcipher);
}
static int __maybe_unused crypto_skcipher_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_istat_cipher *istat;
struct crypto_stat_cipher rcipher;
istat = skcipher_get_stat(skcipher);
memset(&rcipher, 0, sizeof(rcipher));
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.free = crypto_skcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_skcipher_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_skcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_skcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.tfmsize = offsetof(struct crypto_skcipher, base),
};
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_skcipher_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
const char *alg_name, u32 type, u32 mask)
{
struct crypto_skcipher *tfm;
/* Only sync algorithms allowed. */
mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
/*
* Make sure we do not allocate something that might get used with
* an on-stack request: check the request size.
*/
if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
MAX_SYNC_SKCIPHER_REQSIZE)) {
crypto_free_skcipher(tfm);
return ERR_PTR(-EINVAL);
}
return (struct crypto_sync_skcipher *)tfm;
}
EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_skcipher);
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
alg->walksize > PAGE_SIZE / 8)
return -EINVAL;
if (!alg->chunksize)
alg->chunksize = base->cra_blocksize;
if (!alg->walksize)
alg->walksize = alg->chunksize;
base->cra_type = &crypto_skcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}
int crypto_register_skcipher(struct skcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = skcipher_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_skcipher);
void crypto_unregister_skcipher(struct skcipher_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
int crypto_register_skciphers(struct skcipher_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_skcipher(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_skcipher(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_skciphers);
void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_skcipher(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst)
{
int err;
if (WARN_ON(!inst->free))
return -EINVAL;
err = skcipher_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(skcipher_register_instance);
static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_cipher_setkey(cipher, key, keylen);
}
static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->cipher = cipher;
return 0;
}
static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
{
struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->cipher);
}
static void skcipher_free_instance_simple(struct skcipher_instance *inst)
{
crypto_drop_cipher(skcipher_instance_ctx(inst));
kfree(inst);
}
/**
* skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
*
* Allocate an skcipher_instance for a simple block cipher mode of operation,
* e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
* that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
* alignmask, and priority are set from the underlying cipher but can be
* overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
* default ->setkey(), ->init(), and ->exit() methods are installed.
*
* @tmpl: the template being instantiated
* @tb: the template parameters
*
* Return: a pointer to the new instance, or an ERR_PTR(). The caller still
* needs to register the instance.
*/
struct skcipher_instance *skcipher_alloc_instance_simple(
struct crypto_template *tmpl, struct rtattr **tb)
{
u32 mask;
struct skcipher_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *cipher_alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
cipher_alg = crypto_spawn_cipher_alg(spawn);
err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
cipher_alg);
if (err)
goto err_free_inst;
inst->free = skcipher_free_instance_simple;
/* Default algorithm properties, can be overridden */
inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
inst->alg.base.cra_priority = cipher_alg->cra_priority;
inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
inst->alg.ivsize = cipher_alg->cra_blocksize;
/* Use skcipher_ctx_simple by default, can be overridden */
inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
inst->alg.setkey = skcipher_setkey_simple;
inst->alg.init = skcipher_init_tfm_simple;
inst->alg.exit = skcipher_exit_tfm_simple;
return inst;
err_free_inst:
skcipher_free_instance_simple(inst);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/skcipher.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* LRW: as defined by Cyril Guyot in
* http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
*
* Copyright (c) 2006 Rik Snel <[email protected]>
*
* Based on ecb.c
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
/* This implementation is checked against the test vectors in the above
* document and by a test vector provided by Ken Buchanan at
* https://www.mail-archive.com/[email protected]/msg00173.html
*
* The test vectors are included in the testing module tcrypt.[ch] */
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
#define LRW_BLOCK_SIZE 16
struct lrw_tfm_ctx {
struct crypto_skcipher *child;
/*
* optimizes multiplying a random (non incrementing, as at the
* start of a new sector) value with key2, we could also have
* used 4k optimization tables or no optimization at all. In the
* latter case we would have to store key2 here
*/
struct gf128mul_64k *table;
/*
* stores:
* key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
* key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
* key2*{ 0,0,...1,1,1,1,1 }, etc
* needed for optimized multiplication of incrementing values
* with key2
*/
be128 mulinc[128];
};
struct lrw_request_ctx {
be128 t;
struct skcipher_request subreq;
};
static inline void lrw_setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ (0x80 -
#ifdef __BIG_ENDIAN
BITS_PER_LONG
#else
BITS_PER_BYTE
#endif
), b);
}
static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
int err, bsize = LRW_BLOCK_SIZE;
const u8 *tweak = key + keylen - bsize;
be128 tmp = { 0 };
int i;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(child, key, keylen - bsize);
if (err)
return err;
if (ctx->table)
gf128mul_free_64k(ctx->table);
/* initialize multiplication table for Key2 */
ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
if (!ctx->table)
return -ENOMEM;
/* initialize optimization table */
for (i = 0; i < 128; i++) {
lrw_setbit128_bbe(&tmp, i);
ctx->mulinc[i] = tmp;
gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
}
return 0;
}
/*
* Returns the number of trailing '1' bits in the words of the counter, which is
* represented by 4 32-bit words, arranged from least to most significant.
* At the same time, increments the counter by one.
*
* For example:
*
* u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
* int i = lrw_next_index(&counter);
* // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
*/
static int lrw_next_index(u32 *counter)
{
int i, res = 0;
for (i = 0; i < 4; i++) {
if (counter[i] + 1 != 0)
return res + ffz(counter[i]++);
counter[i] = 0;
res += 32;
}
/*
* If we get here, then x == 128 and we are incrementing the counter
* from all ones to all zeros. This means we must return index 127, i.e.
* the one corresponding to key2*{ 1,...,1 }.
*/
return 127;
}
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the lrw_next_index() calls again.
*/
static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
{
const int bs = LRW_BLOCK_SIZE;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
be128 t = rctx->t;
struct skcipher_walk w;
__be32 *iv;
u32 counter[4];
int err;
if (second_pass) {
req = &rctx->subreq;
/* set to our TFM to enforce correct alignment: */
skcipher_request_set_tfm(req, tfm);
}
err = skcipher_walk_virt(&w, req, false);
if (err)
return err;
iv = (__be32 *)w.iv;
counter[0] = be32_to_cpu(iv[3]);
counter[1] = be32_to_cpu(iv[2]);
counter[2] = be32_to_cpu(iv[1]);
counter[3] = be32_to_cpu(iv[0]);
while (w.nbytes) {
unsigned int avail = w.nbytes;
be128 *wsrc;
be128 *wdst;
wsrc = w.src.virt.addr;
wdst = w.dst.virt.addr;
do {
be128_xor(wdst++, &t, wsrc++);
/* T <- I*Key2, using the optimization
* discussed in the specification */
be128_xor(&t, &t,
&ctx->mulinc[lrw_next_index(counter)]);
} while ((avail -= bs) >= bs);
if (second_pass && w.nbytes == w.total) {
iv[0] = cpu_to_be32(counter[3]);
iv[1] = cpu_to_be32(counter[2]);
iv[2] = cpu_to_be32(counter[1]);
iv[3] = cpu_to_be32(counter[0]);
}
err = skcipher_walk_done(&w, avail);
}
return err;
}
static int lrw_xor_tweak_pre(struct skcipher_request *req)
{
return lrw_xor_tweak(req, false);
}
static int lrw_xor_tweak_post(struct skcipher_request *req)
{
return lrw_xor_tweak(req, true);
}
static void lrw_crypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err) {
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = lrw_xor_tweak_post(req);
}
skcipher_request_complete(req, err);
}
static void lrw_init_crypt(struct skcipher_request *req)
{
const struct lrw_tfm_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
skcipher_request_set_tfm(subreq, ctx->child);
skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done,
req);
/* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
skcipher_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, req->iv);
/* calculate first value of T */
memcpy(&rctx->t, req->iv, sizeof(rctx->t));
/* T <- I*Key2 */
gf128mul_64k_bbe(&rctx->t, ctx->table);
}
static int lrw_encrypt(struct skcipher_request *req)
{
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
lrw_init_crypt(req);
return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_encrypt(subreq) ?:
lrw_xor_tweak_post(req);
}
static int lrw_decrypt(struct skcipher_request *req)
{
struct lrw_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
lrw_init_crypt(req);
return lrw_xor_tweak_pre(req) ?:
crypto_skcipher_decrypt(subreq) ?:
lrw_xor_tweak_post(req);
}
static int lrw_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
sizeof(struct lrw_request_ctx));
return 0;
}
static void lrw_exit_tfm(struct crypto_skcipher *tfm)
{
struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->table)
gf128mul_free_64k(ctx->table);
crypto_free_skcipher(ctx->child);
}
static void lrw_free_instance(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
const char *cipher_name;
char ecb_name[CRYPTO_MAX_ALG_NAME];
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
if (err == -ENOENT) {
err = -ENAMETOOLONG;
if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
err = crypto_grab_skcipher(spawn,
skcipher_crypto_instance(inst),
ecb_name, 0, mask);
}
if (err)
goto err_free_inst;
alg = crypto_skcipher_spawn_alg(spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
goto err_free_inst;
if (crypto_skcipher_alg_ivsize(alg))
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
&alg->base);
if (err)
goto err_free_inst;
err = -EINVAL;
cipher_name = alg->base.cra_name;
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
if (len < 2)
goto err_free_inst;
if (ecb_name[len - 1] != ')')
goto err_free_inst;
ecb_name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_free_inst;
}
} else
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
(__alignof__(be128) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
LRW_BLOCK_SIZE;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
LRW_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx);
inst->alg.init = lrw_init_tfm;
inst->alg.exit = lrw_exit_tfm;
inst->alg.setkey = lrw_setkey;
inst->alg.encrypt = lrw_encrypt;
inst->alg.decrypt = lrw_decrypt;
inst->free = lrw_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
lrw_free_instance(inst);
}
return err;
}
static struct crypto_template lrw_tmpl = {
.name = "lrw",
.create = lrw_create,
.module = THIS_MODULE,
};
static int __init lrw_module_init(void)
{
return crypto_register_template(&lrw_tmpl);
}
static void __exit lrw_module_exit(void)
{
crypto_unregister_template(&lrw_tmpl);
}
subsys_initcall(lrw_module_init);
module_exit(lrw_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LRW block cipher mode");
MODULE_ALIAS_CRYPTO("lrw");
MODULE_SOFTDEP("pre: ecb");
| linux-master | crypto/lrw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Create default crypto algorithm instances.
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <linux/completion.h>
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
struct cryptomgr_param {
struct rtattr *tb[CRYPTO_MAX_ATTRS + 2];
struct {
struct rtattr attr;
struct crypto_attr_type data;
} type;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} attrs[CRYPTO_MAX_ATTRS];
char template[CRYPTO_MAX_ALG_NAME];
struct crypto_larval *larval;
u32 otype;
u32 omask;
};
struct crypto_test_param {
char driver[CRYPTO_MAX_ALG_NAME];
char alg[CRYPTO_MAX_ALG_NAME];
u32 type;
};
static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
int err;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
goto out;
do {
err = tmpl->create(tmpl, param->tb);
} while (err == -EAGAIN && !signal_pending(current));
crypto_tmpl_put(tmpl);
out:
complete_all(¶m->larval->completion);
crypto_alg_put(¶m->larval->alg);
kfree(param);
module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
{
struct task_struct *thread;
struct cryptomgr_param *param;
const char *name = larval->alg.cra_name;
const char *p;
unsigned int len;
int i;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = p - name;
if (!len || *p != '(')
goto err_free_param;
memcpy(param->template, name, len);
i = 0;
for (;;) {
name = ++p;
for (; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
if (*p == '(') {
int recursion = 0;
for (;;) {
if (!*++p)
goto err_free_param;
if (*p == '(')
recursion++;
else if (*p == ')' && !recursion--)
break;
}
p++;
}
len = p - name;
if (!len)
goto err_free_param;
param->attrs[i].attr.rta_len = sizeof(param->attrs[i]);
param->attrs[i].attr.rta_type = CRYPTOA_ALG;
memcpy(param->attrs[i].data.name, name, len);
param->tb[i + 1] = ¶m->attrs[i].attr;
i++;
if (i >= CRYPTO_MAX_ATTRS)
goto err_free_param;
if (*p == ')')
break;
if (*p != ',')
goto err_free_param;
}
if (!i)
goto err_free_param;
param->tb[i + 1] = NULL;
param->type.attr.rta_len = sizeof(param->type);
param->type.attr.rta_type = CRYPTOA_TYPE;
param->type.data.type = larval->alg.cra_flags & ~CRYPTO_ALG_TESTED;
param->type.data.mask = larval->mask & ~CRYPTO_ALG_TESTED;
param->tb[0] = ¶m->type.attr;
param->otype = larval->alg.cra_flags;
param->omask = larval->mask;
crypto_alg_get(&larval->alg);
param->larval = larval;
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
if (IS_ERR(thread))
goto err_put_larval;
return NOTIFY_STOP;
err_put_larval:
crypto_alg_put(&larval->alg);
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_test(void *data)
{
struct crypto_test_param *param = data;
u32 type = param->type;
int err;
err = alg_test(param->driver, param->alg, type, CRYPTO_ALG_TESTED);
crypto_alg_tested(param->driver, err);
kfree(param);
module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_test(struct crypto_alg *alg)
{
struct task_struct *thread;
struct crypto_test_param *param;
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return NOTIFY_DONE;
if (!try_module_get(THIS_MODULE))
goto err;
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err_put_module;
memcpy(param->driver, alg->cra_driver_name, sizeof(param->driver));
memcpy(param->alg, alg->cra_name, sizeof(param->alg));
param->type = alg->cra_flags;
thread = kthread_run(cryptomgr_test, param, "cryptomgr_test");
if (IS_ERR(thread))
goto err_free_param;
return NOTIFY_STOP;
err_free_param:
kfree(param);
err_put_module:
module_put(THIS_MODULE);
err:
return NOTIFY_OK;
}
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
{
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
case CRYPTO_MSG_ALG_LOADED:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block cryptomgr_notifier = {
.notifier_call = cryptomgr_notify,
};
static int __init cryptomgr_init(void)
{
return crypto_register_notifier(&cryptomgr_notifier);
}
static void __exit cryptomgr_exit(void)
{
int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err);
}
/*
* This is arch_initcall() so that the crypto self-tests are run on algorithms
* registered early by subsys_initcall(). subsys_initcall() is needed for
* generic implementations so that they're available for comparison tests when
* other implementations are registered later by module_init().
*/
arch_initcall(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Algorithm Manager");
| linux-master | crypto/algboss.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* echainiv: Encrypted Chain IV Generator
*
* This generator generates an IV based on a sequence number by multiplying
* it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
* This generator can only be used by algorithms where authentication
* is performed after encryption (i.e., authenc).
*
* Copyright (c) 2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/geniv.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
__be64 nseqno;
u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
if (req->cryptlen < ivsize)
return -EINVAL;
aead_request_set_tfm(subreq, ctx->child);
info = req->iv;
if (req->src != req->dst) {
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
req->assoclen + req->cryptlen,
NULL);
err = crypto_skcipher_encrypt(nreq);
if (err)
return err;
}
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
memcpy(&nseqno, info + ivsize - 8, 8);
seqno = be64_to_cpu(nseqno);
memset(info, 0, ivsize);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
do {
u64 a;
memcpy(&a, ctx->salt + ivsize - 8, 8);
a |= 1;
a *= seqno;
memcpy(info + ivsize - 8, &a, 8);
} while ((ivsize -= 8));
return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
unsigned int ivsize = crypto_aead_ivsize(geniv);
if (req->cryptlen < ivsize)
return -EINVAL;
aead_request_set_tfm(subreq, ctx->child);
compl = req->base.complete;
data = req->base.data;
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen - ivsize, req->iv);
aead_request_set_ad(subreq, req->assoclen + ivsize);
scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
return crypto_aead_decrypt(subreq);
}
static int echainiv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct aead_instance *inst;
int err;
inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
inst->alg.decrypt = echainiv_decrypt;
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
err = aead_register_instance(tmpl, inst);
if (err) {
free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template echainiv_tmpl = {
.name = "echainiv",
.create = echainiv_aead_create,
.module = THIS_MODULE,
};
static int __init echainiv_module_init(void)
{
return crypto_register_template(&echainiv_tmpl);
}
static void __exit echainiv_module_exit(void)
{
crypto_unregister_template(&echainiv_tmpl);
}
subsys_initcall(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Encrypted Chain IV Generator");
MODULE_ALIAS_CRYPTO("echainiv");
| linux-master | crypto/echainiv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel cryptographic api.
* cast6.c - Cast6 cipher algorithm [rfc2612].
*
* CAST-256 (*cast6*) is a DES like Substitution-Permutation Network (SPN)
* cryptosystem built upon the CAST-128 (*cast5*) [rfc2144] encryption
* algorithm.
*
* Copyright (C) 2003 Kartikey Mahendra Bhatt <[email protected]>.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/cast6.h>
#define s1 cast_s1
#define s2 cast_s2
#define s3 cast_s3
#define s4 cast_s4
#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
0x7262cdce, 0xe13cb96f, 0x5016a510 } ,
{ 0xbef090b1, 0x2dca7c52, 0x9ca467f3, 0x0b7e5394, 0x7a583f35,
0xe9322ad6, 0x580c1677, 0xc6e60218 } ,
{ 0x35bfedb9, 0xa499d95a, 0x1373c4fb, 0x824db09c, 0xf1279c3d,
0x600187de, 0xcedb737f, 0x3db55f20 } ,
{ 0xac8f4ac1, 0x1b693662, 0x8a432203, 0xf91d0da4, 0x67f6f945,
0xd6d0e4e6, 0x45aad087, 0xb484bc28 } ,
{ 0x235ea7c9, 0x9238936a, 0x01127f0b, 0x6fec6aac, 0xdec6564d,
0x4da041ee, 0xbc7a2d8f, 0x2b541930 } ,
{ 0x9a2e04d1, 0x0907f072, 0x77e1dc13, 0xe6bbc7b4, 0x5595b355,
0xc46f9ef6, 0x33498a97, 0xa2237638 } ,
{ 0x10fd61d9, 0x7fd74d7a, 0xeeb1391b, 0x5d8b24bc, 0xcc65105d,
0x3b3efbfe, 0xaa18e79f, 0x18f2d340 } ,
{ 0x87ccbee1, 0xf6a6aa82, 0x65809623, 0xd45a81c4, 0x43346d65,
0xb20e5906, 0x20e844a7, 0x8fc23048 } ,
{ 0xfe9c1be9, 0x6d76078a, 0xdc4ff32b, 0x4b29decc, 0xba03ca6d,
0x28ddb60e, 0x97b7a1af, 0x06918d50 } ,
{ 0x756b78f1, 0xe4456492, 0x531f5033, 0xc1f93bd4, 0x30d32775,
0x9fad1316, 0x0e86feb7, 0x7d60ea58 } ,
{ 0xec3ad5f9, 0x5b14c19a, 0xc9eead3b, 0x38c898dc, 0xa7a2847d,
0x167c701e, 0x85565bbf, 0xf4304760 } ,
{ 0x630a3301, 0xd1e41ea2, 0x40be0a43, 0xaf97f5e4, 0x1e71e185,
0x8d4bcd26, 0xfc25b8c7, 0x6affa468 } ,
{ 0xd9d99009, 0x48b37baa, 0xb78d674b, 0x266752ec, 0x95413e8d,
0x041b2a2e, 0x72f515cf, 0xe1cf0170 } ,
{ 0x50a8ed11, 0xbf82d8b2, 0x2e5cc453, 0x9d36aff4, 0x0c109b95,
0x7aea8736, 0xe9c472d7, 0x589e5e78 } ,
{ 0xc7784a19, 0x365235ba, 0xa52c215b, 0x14060cfc, 0x82dff89d,
0xf1b9e43e, 0x6093cfdf, 0xcf6dbb80 } ,
{ 0x3e47a721, 0xad2192c2, 0x1bfb7e63, 0x8ad56a04, 0xf9af55a5,
0x68894146, 0xd7632ce7, 0x463d1888 } ,
{ 0xb5170429, 0x23f0efca, 0x92cadb6b, 0x01a4c70c, 0x707eb2ad,
0xdf589e4e, 0x4e3289ef, 0xbd0c7590 } ,
{ 0x2be66131, 0x9ac04cd2, 0x099a3873, 0x78742414, 0xe74e0fb5,
0x5627fb56, 0xc501e6f7, 0x33dbd298 } ,
{ 0xa2b5be39, 0x118fa9da, 0x8069957b, 0xef43811c, 0x5e1d6cbd,
0xccf7585e, 0x3bd143ff, 0xaaab2fa0 } ,
{ 0x19851b41, 0x885f06e2, 0xf738f283, 0x6612de24, 0xd4ecc9c5,
0x43c6b566, 0xb2a0a107, 0x217a8ca8 } ,
{ 0x90547849, 0xff2e63ea, 0x6e084f8b, 0xdce23b2c, 0x4bbc26cd,
0xba96126e, 0x296ffe0f, 0x9849e9b0 } ,
{ 0x0723d551, 0x75fdc0f2, 0xe4d7ac93, 0x53b19834, 0xc28b83d5,
0x31656f76, 0xa03f5b17, 0x0f1946b8 }
};
static const u8 Tr[4][8] = {
{ 0x13, 0x04, 0x15, 0x06, 0x17, 0x08, 0x19, 0x0a } ,
{ 0x1b, 0x0c, 0x1d, 0x0e, 0x1f, 0x10, 0x01, 0x12 } ,
{ 0x03, 0x14, 0x05, 0x16, 0x07, 0x18, 0x09, 0x1a } ,
{ 0x0b, 0x1c, 0x0d, 0x1e, 0x0f, 0x00, 0x11, 0x02 }
};
/* forward octave */
static inline void W(u32 *key, unsigned int i)
{
u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
key[4] ^= F3(key[5], Tr[i % 4][2], Tm[i][2]);
key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key, unsigned int key_len)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
if (key_len % 4 != 0)
return -EINVAL;
memset(p_key, 0, 32);
memcpy(p_key, in_key, key_len);
key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = be32_to_cpu(p_key[2]); /* C */
key[3] = be32_to_cpu(p_key[3]); /* D */
key[4] = be32_to_cpu(p_key[4]); /* E */
key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = be32_to_cpu(p_key[7]); /* H */
for (i = 0; i < 12; i++) {
W(key, 2 * i);
W(key, 2 * i + 1);
c->Kr[i][0] = key[0] & 0x1f;
c->Kr[i][1] = key[2] & 0x1f;
c->Kr[i][2] = key[4] & 0x1f;
c->Kr[i][3] = key[6] & 0x1f;
c->Km[i][0] = key[7];
c->Km[i][1] = key[5];
c->Km[i][2] = key[3];
c->Km[i][3] = key[1];
}
return 0;
}
EXPORT_SYMBOL_GPL(__cast6_setkey);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
EXPORT_SYMBOL_GPL(cast6_setkey);
/*forward quad round*/
static inline void Q(u32 *block, const u8 *Kr, const u32 *Km)
{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[3] ^= F1(block[0], Kr[3], Km[3]);
}
/*reverse quad round*/
static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km)
{
u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
const struct cast6_ctx *c = ctx;
u32 block[4];
const u32 *Km;
const u8 *Kr;
block[0] = get_unaligned_be32(inbuf);
block[1] = get_unaligned_be32(inbuf + 4);
block[2] = get_unaligned_be32(inbuf + 8);
block[3] = get_unaligned_be32(inbuf + 12);
Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
put_unaligned_be32(block[0], outbuf);
put_unaligned_be32(block[1], outbuf + 4);
put_unaligned_be32(block[2], outbuf + 8);
put_unaligned_be32(block[3], outbuf + 12);
}
EXPORT_SYMBOL_GPL(__cast6_encrypt);
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
{
const struct cast6_ctx *c = ctx;
u32 block[4];
const u32 *Km;
const u8 *Kr;
block[0] = get_unaligned_be32(inbuf);
block[1] = get_unaligned_be32(inbuf + 4);
block[2] = get_unaligned_be32(inbuf + 8);
block[3] = get_unaligned_be32(inbuf + 12);
Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
put_unaligned_be32(block[0], outbuf);
put_unaligned_be32(block[1], outbuf + 4);
put_unaligned_be32(block[2], outbuf + 8);
put_unaligned_be32(block[3], outbuf + 12);
}
EXPORT_SYMBOL_GPL(__cast6_decrypt);
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
static struct crypto_alg alg = {
.cra_name = "cast6",
.cra_driver_name = "cast6-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAST6_MIN_KEY_SIZE,
.cia_max_keysize = CAST6_MAX_KEY_SIZE,
.cia_setkey = cast6_setkey,
.cia_encrypt = cast6_encrypt,
.cia_decrypt = cast6_decrypt}
}
};
static int __init cast6_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit cast6_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
subsys_initcall(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast6");
MODULE_ALIAS_CRYPTO("cast6-generic");
| linux-master | crypto/cast6_generic.c |
/*
* Key Wrapping: RFC3394 / NIST SP800-38F
*
* Copyright (C) 2015, Stephan Mueller <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2
* are required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* Note for using key wrapping:
*
* * The result of the encryption operation is the ciphertext starting
* with the 2nd semiblock. The first semiblock is provided as the IV.
* The IV used to start the encryption operation is the default IV.
*
* * The input for the decryption is the first semiblock handed in as an
* IV. The ciphertext is the data starting with the 2nd semiblock. The
* return code of the decryption operation will be EBADMSG in case an
* integrity error occurs.
*
* To obtain the full result of an encryption as expected by SP800-38F, the
* caller must allocate a buffer of plaintext + 8 bytes:
*
* unsigned int datalen = ptlen + crypto_skcipher_ivsize(tfm);
* u8 data[datalen];
* u8 *iv = data;
* u8 *pt = data + crypto_skcipher_ivsize(tfm);
* <ensure that pt contains the plaintext of size ptlen>
* sg_init_one(&sg, pt, ptlen);
* skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv);
*
* ==> After encryption, data now contains full KW result as per SP800-38F.
*
* In case of decryption, ciphertext now already has the expected length
* and must be segmented appropriately:
*
* unsigned int datalen = CTLEN;
* u8 data[datalen];
* <ensure that data contains full ciphertext>
* u8 *iv = data;
* u8 *ct = data + crypto_skcipher_ivsize(tfm);
* unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm);
* sg_init_one(&sg, ct, ctlen);
* skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv);
*
* ==> After decryption (which hopefully does not return EBADMSG), the ct
* pointer now points to the plaintext of size ctlen.
*
* Note 2: KWP is not implemented as this would defy in-place operation.
* If somebody wants to wrap non-aligned data, he should simply pad
* the input with zeros to fill it up to the 8 byte boundary.
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
struct crypto_kw_block {
#define SEMIBSIZE 8
__be64 A;
__be64 R;
};
/*
* Fast forward the SGL to the "end" length minus SEMIBSIZE.
* The start in the SGL defined by the fast-forward is returned with
* the walk variable
*/
static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
struct scatterlist *sg,
unsigned int end)
{
unsigned int skip = 0;
/* The caller should only operate on full SEMIBLOCKs. */
BUG_ON(end < SEMIBSIZE);
skip = end - SEMIBSIZE;
while (sg) {
if (sg->length > skip) {
scatterwalk_start(walk, sg);
scatterwalk_advance(walk, skip);
break;
}
skip -= sg->length;
sg = sg_next(sg);
}
}
static int crypto_kw_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *src, *dst;
u64 t = 6 * ((req->cryptlen) >> 3);
unsigned int i;
int ret = 0;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV.
*/
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/* Place the IV into block A */
memcpy(&block.A, req->iv, SEMIBSIZE);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int nbytes = req->cryptlen;
while (nbytes) {
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&src_walk, src, nbytes);
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
t--;
/* perform KW operation: decrypt block */
crypto_cipher_decrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* move pointer by nbytes in the SGL */
crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes);
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
src = req->dst;
dst = req->dst;
}
/* Perform authentication check */
if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return ret;
}
static int crypto_kw_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct crypto_kw_block block;
struct scatterlist *src, *dst;
u64 t = 1;
unsigned int i;
/*
* Require at least 2 semiblocks (note, the 3rd semiblock that is
* required by SP800-38F is the IV that occupies the first semiblock.
* This means that the dst memory must be one semiblock larger than src.
* Also ensure that the given data is aligned to semiblock.
*/
if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE)
return -EINVAL;
/*
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
* first loop, src points to req->src and dst to req->dst. For any
* subsequent round, the code operates on req->dst only.
*/
src = req->src;
dst = req->dst;
for (i = 0; i < 6; i++) {
struct scatter_walk src_walk, dst_walk;
unsigned int nbytes = req->cryptlen;
scatterwalk_start(&src_walk, src);
scatterwalk_start(&dst_walk, dst);
while (nbytes) {
/* get the source block */
scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE,
false);
/* perform KW operation: encrypt block */
crypto_cipher_encrypt_one(cipher, (u8 *)&block,
(u8 *)&block);
/* perform KW operation: modify IV with counter */
block.A ^= cpu_to_be64(t);
t++;
/* Copy block->R into place */
scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE,
true);
nbytes -= SEMIBSIZE;
}
/* we now start to operate on the dst SGL only */
src = req->dst;
dst = req->dst;
}
/* establish the IV for the caller to pick up */
memcpy(req->iv, &block.A, SEMIBSIZE);
memzero_explicit(&block, sizeof(struct crypto_kw_block));
return 0;
}
static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
err = -EINVAL;
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
goto out_free_inst;
inst->alg.base.cra_blocksize = SEMIBSIZE;
inst->alg.base.cra_alignmask = 0;
inst->alg.ivsize = SEMIBSIZE;
inst->alg.encrypt = crypto_kw_encrypt;
inst->alg.decrypt = crypto_kw_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template crypto_kw_tmpl = {
.name = "kw",
.create = crypto_kw_create,
.module = THIS_MODULE,
};
static int __init crypto_kw_init(void)
{
return crypto_register_template(&crypto_kw_tmpl);
}
static void __exit crypto_kw_exit(void)
{
crypto_unregister_template(&crypto_kw_tmpl);
}
subsys_initcall(crypto_kw_init);
module_exit(crypto_kw_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("Key Wrapping (RFC3394 / NIST SP800-38F)");
MODULE_ALIAS_CRYPTO("kw");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/keywrap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* XCTR: XOR Counter mode - Adapted from ctr.c
*
* (C) Copyright IBM Corp. 2007 - Joy Latten <[email protected]>
* Copyright 2021 Google LLC
*/
/*
* XCTR mode is a blockcipher mode of operation used to implement HCTR2. XCTR is
* closely related to the CTR mode of operation; the main difference is that CTR
* generates the keystream using E(CTR + IV) whereas XCTR generates the
* keystream using E(CTR ^ IV). This allows implementations to avoid dealing
* with multi-limb integers (as is required in CTR mode). XCTR is also specified
* using little-endian arithmetic which makes it slightly faster on LE machines.
*
* See the HCTR2 paper for more details:
* Length-preserving encryption with HCTR2
* (https://eprint.iacr.org/2021/1441.pdf)
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
/* For now this implementation is limited to 16-byte blocks for simplicity */
#define XCTR_BLOCKSIZE 16
static void crypto_xctr_crypt_final(struct skcipher_walk *walk,
struct crypto_cipher *tfm, u32 byte_ctr)
{
u8 keystream[XCTR_BLOCKSIZE];
const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
crypto_cipher_encrypt_one(tfm, keystream, walk->iv);
crypto_xor_cpy(dst, keystream, src, nbytes);
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
}
static int crypto_xctr_crypt_segment(struct skcipher_walk *walk,
struct crypto_cipher *tfm, u32 byte_ctr)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
do {
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
fn(crypto_cipher_tfm(tfm), dst, walk->iv);
crypto_xor(dst, src, XCTR_BLOCKSIZE);
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
le32_add_cpu(&ctr32, 1);
src += XCTR_BLOCKSIZE;
dst += XCTR_BLOCKSIZE;
} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
return nbytes;
}
static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk,
struct crypto_cipher *tfm, u32 byte_ctr)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
u8 *data = walk->src.virt.addr;
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
do {
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
fn(crypto_cipher_tfm(tfm), keystream, walk->iv);
crypto_xor(data, keystream, XCTR_BLOCKSIZE);
crypto_xor(walk->iv, (u8 *)&ctr32, sizeof(ctr32));
le32_add_cpu(&ctr32, 1);
data += XCTR_BLOCKSIZE;
} while ((nbytes -= XCTR_BLOCKSIZE) >= XCTR_BLOCKSIZE);
return nbytes;
}
static int crypto_xctr_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
u32 byte_ctr = 0;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= XCTR_BLOCKSIZE) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_xctr_crypt_inplace(&walk, cipher,
byte_ctr);
else
nbytes = crypto_xctr_crypt_segment(&walk, cipher,
byte_ctr);
byte_ctr += walk.nbytes - nbytes;
err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
crypto_xctr_crypt_final(&walk, cipher, byte_ctr);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_xctr_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
/* Block size must be 16 bytes. */
err = -EINVAL;
if (alg->cra_blocksize != XCTR_BLOCKSIZE)
goto out_free_inst;
/* XCTR mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.encrypt = crypto_xctr_crypt;
inst->alg.decrypt = crypto_xctr_crypt;
err = skcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template crypto_xctr_tmpl = {
.name = "xctr",
.create = crypto_xctr_create,
.module = THIS_MODULE,
};
static int __init crypto_xctr_module_init(void)
{
return crypto_register_template(&crypto_xctr_tmpl);
}
static void __exit crypto_xctr_module_exit(void)
{
crypto_unregister_template(&crypto_xctr_tmpl);
}
subsys_initcall(crypto_xctr_module_init);
module_exit(crypto_xctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCTR block cipher mode of operation");
MODULE_ALIAS_CRYPTO("xctr");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/xctr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Serpent Cipher Algorithm.
*
* Copyright (C) 2002 Dag Arne Osvik <[email protected]>
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/unaligned.h>
#include <linux/types.h>
#include <crypto/serpent.h>
/* Key is padded to the maximum of 256 bits before round key generation.
* Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
*/
#define PHI 0x9e3779b9UL
#define keyiter(a, b, c, d, i, j) \
({ b ^= d; b ^= c; b ^= a; b ^= PHI ^ i; b = rol32(b, 11); k[j] = b; })
#define loadkeys(x0, x1, x2, x3, i) \
({ x0 = k[i]; x1 = k[i+1]; x2 = k[i+2]; x3 = k[i+3]; })
#define storekeys(x0, x1, x2, x3, i) \
({ k[i] = x0; k[i+1] = x1; k[i+2] = x2; k[i+3] = x3; })
#define store_and_load_keys(x0, x1, x2, x3, s, l) \
({ storekeys(x0, x1, x2, x3, s); loadkeys(x0, x1, x2, x3, l); })
#define K(x0, x1, x2, x3, i) ({ \
x3 ^= k[4*(i)+3]; x2 ^= k[4*(i)+2]; \
x1 ^= k[4*(i)+1]; x0 ^= k[4*(i)+0]; \
})
#define LK(x0, x1, x2, x3, x4, i) ({ \
x0 = rol32(x0, 13);\
x2 = rol32(x2, 3); x1 ^= x0; x4 = x0 << 3; \
x3 ^= x2; x1 ^= x2; \
x1 = rol32(x1, 1); x3 ^= x4; \
x3 = rol32(x3, 7); x4 = x1; \
x0 ^= x1; x4 <<= 7; x2 ^= x3; \
x0 ^= x3; x2 ^= x4; x3 ^= k[4*i+3]; \
x1 ^= k[4*i+1]; x0 = rol32(x0, 5); x2 = rol32(x2, 22);\
x0 ^= k[4*i+0]; x2 ^= k[4*i+2]; \
})
#define KL(x0, x1, x2, x3, x4, i) ({ \
x0 ^= k[4*i+0]; x1 ^= k[4*i+1]; x2 ^= k[4*i+2]; \
x3 ^= k[4*i+3]; x0 = ror32(x0, 5); x2 = ror32(x2, 22);\
x4 = x1; x2 ^= x3; x0 ^= x3; \
x4 <<= 7; x0 ^= x1; x1 = ror32(x1, 1); \
x2 ^= x4; x3 = ror32(x3, 7); x4 = x0 << 3; \
x1 ^= x0; x3 ^= x4; x0 = ror32(x0, 13);\
x1 ^= x2; x3 ^= x2; x2 = ror32(x2, 3); \
})
#define S0(x0, x1, x2, x3, x4) ({ \
x4 = x3; \
x3 |= x0; x0 ^= x4; x4 ^= x2; \
x4 = ~x4; x3 ^= x1; x1 &= x0; \
x1 ^= x4; x2 ^= x0; x0 ^= x3; \
x4 |= x0; x0 ^= x2; x2 &= x1; \
x3 ^= x2; x1 = ~x1; x2 ^= x4; \
x1 ^= x2; \
})
#define S1(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x1 ^= x0; x0 ^= x3; x3 = ~x3; \
x4 &= x1; x0 |= x1; x3 ^= x2; \
x0 ^= x3; x1 ^= x3; x3 ^= x4; \
x1 |= x4; x4 ^= x2; x2 &= x0; \
x2 ^= x1; x1 |= x0; x0 = ~x0; \
x0 ^= x2; x4 ^= x1; \
})
#define S2(x0, x1, x2, x3, x4) ({ \
x3 = ~x3; \
x1 ^= x0; x4 = x0; x0 &= x2; \
x0 ^= x3; x3 |= x4; x2 ^= x1; \
x3 ^= x1; x1 &= x0; x0 ^= x2; \
x2 &= x3; x3 |= x1; x0 = ~x0; \
x3 ^= x0; x4 ^= x0; x0 ^= x2; \
x1 |= x2; \
})
#define S3(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x1 ^= x3; x3 |= x0; x4 &= x0; \
x0 ^= x2; x2 ^= x1; x1 &= x3; \
x2 ^= x3; x0 |= x4; x4 ^= x3; \
x1 ^= x0; x0 &= x3; x3 &= x4; \
x3 ^= x2; x4 |= x1; x2 &= x1; \
x4 ^= x3; x0 ^= x3; x3 ^= x2; \
})
#define S4(x0, x1, x2, x3, x4) ({ \
x4 = x3; \
x3 &= x0; x0 ^= x4; \
x3 ^= x2; x2 |= x4; x0 ^= x1; \
x4 ^= x3; x2 |= x0; \
x2 ^= x1; x1 &= x0; \
x1 ^= x4; x4 &= x2; x2 ^= x3; \
x4 ^= x0; x3 |= x1; x1 = ~x1; \
x3 ^= x0; \
})
#define S5(x0, x1, x2, x3, x4) ({ \
x4 = x1; x1 |= x0; \
x2 ^= x1; x3 = ~x3; x4 ^= x0; \
x0 ^= x2; x1 &= x4; x4 |= x3; \
x4 ^= x0; x0 &= x3; x1 ^= x3; \
x3 ^= x2; x0 ^= x1; x2 &= x4; \
x1 ^= x2; x2 &= x0; \
x3 ^= x2; \
})
#define S6(x0, x1, x2, x3, x4) ({ \
x4 = x1; \
x3 ^= x0; x1 ^= x2; x2 ^= x0; \
x0 &= x3; x1 |= x3; x4 = ~x4; \
x0 ^= x1; x1 ^= x2; \
x3 ^= x4; x4 ^= x0; x2 &= x0; \
x4 ^= x1; x2 ^= x3; x3 &= x1; \
x3 ^= x0; x1 ^= x2; \
})
#define S7(x0, x1, x2, x3, x4) ({ \
x1 = ~x1; \
x4 = x1; x0 = ~x0; x1 &= x2; \
x1 ^= x3; x3 |= x4; x4 ^= x2; \
x2 ^= x3; x3 ^= x0; x0 |= x1; \
x2 &= x0; x0 ^= x4; x4 ^= x3; \
x3 &= x0; x4 ^= x1; \
x2 ^= x4; x3 ^= x1; x4 |= x0; \
x4 ^= x1; \
})
#define SI0(x0, x1, x2, x3, x4) ({ \
x4 = x3; x1 ^= x0; \
x3 |= x1; x4 ^= x1; x0 = ~x0; \
x2 ^= x3; x3 ^= x0; x0 &= x1; \
x0 ^= x2; x2 &= x3; x3 ^= x4; \
x2 ^= x3; x1 ^= x3; x3 &= x0; \
x1 ^= x0; x0 ^= x2; x4 ^= x3; \
})
#define SI1(x0, x1, x2, x3, x4) ({ \
x1 ^= x3; x4 = x0; \
x0 ^= x2; x2 = ~x2; x4 |= x1; \
x4 ^= x3; x3 &= x1; x1 ^= x2; \
x2 &= x4; x4 ^= x1; x1 |= x3; \
x3 ^= x0; x2 ^= x0; x0 |= x4; \
x2 ^= x4; x1 ^= x0; \
x4 ^= x1; \
})
#define SI2(x0, x1, x2, x3, x4) ({ \
x2 ^= x1; x4 = x3; x3 = ~x3; \
x3 |= x2; x2 ^= x4; x4 ^= x0; \
x3 ^= x1; x1 |= x2; x2 ^= x0; \
x1 ^= x4; x4 |= x3; x2 ^= x3; \
x4 ^= x2; x2 &= x1; \
x2 ^= x3; x3 ^= x4; x4 ^= x0; \
})
#define SI3(x0, x1, x2, x3, x4) ({ \
x2 ^= x1; \
x4 = x1; x1 &= x2; \
x1 ^= x0; x0 |= x4; x4 ^= x3; \
x0 ^= x3; x3 |= x1; x1 ^= x2; \
x1 ^= x3; x0 ^= x2; x2 ^= x3; \
x3 &= x1; x1 ^= x0; x0 &= x2; \
x4 ^= x3; x3 ^= x0; x0 ^= x1; \
})
#define SI4(x0, x1, x2, x3, x4) ({ \
x2 ^= x3; x4 = x0; x0 &= x1; \
x0 ^= x2; x2 |= x3; x4 = ~x4; \
x1 ^= x0; x0 ^= x2; x2 &= x4; \
x2 ^= x0; x0 |= x4; \
x0 ^= x3; x3 &= x2; \
x4 ^= x3; x3 ^= x1; x1 &= x0; \
x4 ^= x1; x0 ^= x3; \
})
#define SI5(x0, x1, x2, x3, x4) ({ \
x4 = x1; x1 |= x2; \
x2 ^= x4; x1 ^= x3; x3 &= x4; \
x2 ^= x3; x3 |= x0; x0 = ~x0; \
x3 ^= x2; x2 |= x0; x4 ^= x1; \
x2 ^= x4; x4 &= x0; x0 ^= x1; \
x1 ^= x3; x0 &= x2; x2 ^= x3; \
x0 ^= x2; x2 ^= x4; x4 ^= x3; \
})
#define SI6(x0, x1, x2, x3, x4) ({ \
x0 ^= x2; \
x4 = x0; x0 &= x3; x2 ^= x3; \
x0 ^= x2; x3 ^= x1; x2 |= x4; \
x2 ^= x3; x3 &= x0; x0 = ~x0; \
x3 ^= x1; x1 &= x2; x4 ^= x0; \
x3 ^= x4; x4 ^= x2; x0 ^= x1; \
x2 ^= x0; \
})
#define SI7(x0, x1, x2, x3, x4) ({ \
x4 = x3; x3 &= x0; x0 ^= x2; \
x2 |= x4; x4 ^= x1; x0 = ~x0; \
x1 |= x3; x4 ^= x0; x0 &= x2; \
x0 ^= x1; x1 &= x2; x3 ^= x2; \
x4 ^= x3; x2 &= x3; x3 |= x0; \
x1 ^= x4; x3 ^= x4; x4 &= x0; \
x4 ^= x2; \
})
/*
* both gcc and clang have misoptimized this function in the past,
* producing horrible object code from spilling temporary variables
* on the stack. Forcing this part out of line avoids that.
*/
static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
u32 r3, u32 r4, u32 *k)
{
k += 100;
S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
S4(r1, r2, r4, r3, r0); store_and_load_keys(r2, r4, r3, r0, 24, 20);
S5(r2, r4, r3, r0, r1); store_and_load_keys(r1, r2, r4, r0, 20, 16);
S6(r1, r2, r4, r0, r3); store_and_load_keys(r4, r3, r2, r0, 16, 12);
S7(r4, r3, r2, r0, r1); store_and_load_keys(r1, r2, r0, r4, 12, 8);
S0(r1, r2, r0, r4, r3); store_and_load_keys(r0, r2, r4, r1, 8, 4);
S1(r0, r2, r4, r1, r3); store_and_load_keys(r3, r4, r1, r0, 4, 0);
S2(r3, r4, r1, r0, r2); store_and_load_keys(r2, r4, r3, r0, 0, -4);
S3(r2, r4, r3, r0, r1); store_and_load_keys(r0, r1, r4, r2, -4, -8);
S4(r0, r1, r4, r2, r3); store_and_load_keys(r1, r4, r2, r3, -8, -12);
S5(r1, r4, r2, r3, r0); store_and_load_keys(r0, r1, r4, r3, -12, -16);
S6(r0, r1, r4, r3, r2); store_and_load_keys(r4, r2, r1, r3, -16, -20);
S7(r4, r2, r1, r3, r0); store_and_load_keys(r0, r1, r3, r4, -20, -24);
S0(r0, r1, r3, r4, r2); store_and_load_keys(r3, r1, r4, r0, -24, -28);
k -= 50;
S1(r3, r1, r4, r0, r2); store_and_load_keys(r2, r4, r0, r3, 22, 18);
S2(r2, r4, r0, r3, r1); store_and_load_keys(r1, r4, r2, r3, 18, 14);
S3(r1, r4, r2, r3, r0); store_and_load_keys(r3, r0, r4, r1, 14, 10);
S4(r3, r0, r4, r1, r2); store_and_load_keys(r0, r4, r1, r2, 10, 6);
S5(r0, r4, r1, r2, r3); store_and_load_keys(r3, r0, r4, r2, 6, 2);
S6(r3, r0, r4, r2, r1); store_and_load_keys(r4, r1, r0, r2, 2, -2);
S7(r4, r1, r0, r2, r3); store_and_load_keys(r3, r0, r2, r4, -2, -6);
S0(r3, r0, r2, r4, r1); store_and_load_keys(r2, r0, r4, r3, -6, -10);
S1(r2, r0, r4, r3, r1); store_and_load_keys(r1, r4, r3, r2, -10, -14);
S2(r1, r4, r3, r2, r0); store_and_load_keys(r0, r4, r1, r2, -14, -18);
S3(r0, r4, r1, r2, r3); store_and_load_keys(r2, r3, r4, r0, -18, -22);
k -= 50;
S4(r2, r3, r4, r0, r1); store_and_load_keys(r3, r4, r0, r1, 28, 24);
S5(r3, r4, r0, r1, r2); store_and_load_keys(r2, r3, r4, r1, 24, 20);
S6(r2, r3, r4, r1, r0); store_and_load_keys(r4, r0, r3, r1, 20, 16);
S7(r4, r0, r3, r1, r2); store_and_load_keys(r2, r3, r1, r4, 16, 12);
S0(r2, r3, r1, r4, r0); store_and_load_keys(r1, r3, r4, r2, 12, 8);
S1(r1, r3, r4, r2, r0); store_and_load_keys(r0, r4, r2, r1, 8, 4);
S2(r0, r4, r2, r1, r3); store_and_load_keys(r3, r4, r0, r1, 4, 0);
S3(r3, r4, r0, r1, r2); storekeys(r1, r2, r4, r3, 0);
}
int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen)
{
u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k;
u32 r0, r1, r2, r3, r4;
__le32 *lk;
int i;
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
k8[i] = key[i];
if (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 1;
while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0;
lk = (__le32 *)k;
k[0] = le32_to_cpu(lk[0]);
k[1] = le32_to_cpu(lk[1]);
k[2] = le32_to_cpu(lk[2]);
k[3] = le32_to_cpu(lk[3]);
k[4] = le32_to_cpu(lk[4]);
k[5] = le32_to_cpu(lk[5]);
k[6] = le32_to_cpu(lk[6]);
k[7] = le32_to_cpu(lk[7]);
/* Expand key using polynomial */
r0 = k[3];
r1 = k[4];
r2 = k[5];
r3 = k[6];
r4 = k[7];
keyiter(k[0], r0, r4, r2, 0, 0);
keyiter(k[1], r1, r0, r3, 1, 1);
keyiter(k[2], r2, r1, r4, 2, 2);
keyiter(k[3], r3, r2, r0, 3, 3);
keyiter(k[4], r4, r3, r1, 4, 4);
keyiter(k[5], r0, r4, r2, 5, 5);
keyiter(k[6], r1, r0, r3, 6, 6);
keyiter(k[7], r2, r1, r4, 7, 7);
keyiter(k[0], r3, r2, r0, 8, 8);
keyiter(k[1], r4, r3, r1, 9, 9);
keyiter(k[2], r0, r4, r2, 10, 10);
keyiter(k[3], r1, r0, r3, 11, 11);
keyiter(k[4], r2, r1, r4, 12, 12);
keyiter(k[5], r3, r2, r0, 13, 13);
keyiter(k[6], r4, r3, r1, 14, 14);
keyiter(k[7], r0, r4, r2, 15, 15);
keyiter(k[8], r1, r0, r3, 16, 16);
keyiter(k[9], r2, r1, r4, 17, 17);
keyiter(k[10], r3, r2, r0, 18, 18);
keyiter(k[11], r4, r3, r1, 19, 19);
keyiter(k[12], r0, r4, r2, 20, 20);
keyiter(k[13], r1, r0, r3, 21, 21);
keyiter(k[14], r2, r1, r4, 22, 22);
keyiter(k[15], r3, r2, r0, 23, 23);
keyiter(k[16], r4, r3, r1, 24, 24);
keyiter(k[17], r0, r4, r2, 25, 25);
keyiter(k[18], r1, r0, r3, 26, 26);
keyiter(k[19], r2, r1, r4, 27, 27);
keyiter(k[20], r3, r2, r0, 28, 28);
keyiter(k[21], r4, r3, r1, 29, 29);
keyiter(k[22], r0, r4, r2, 30, 30);
keyiter(k[23], r1, r0, r3, 31, 31);
k += 50;
keyiter(k[-26], r2, r1, r4, 32, -18);
keyiter(k[-25], r3, r2, r0, 33, -17);
keyiter(k[-24], r4, r3, r1, 34, -16);
keyiter(k[-23], r0, r4, r2, 35, -15);
keyiter(k[-22], r1, r0, r3, 36, -14);
keyiter(k[-21], r2, r1, r4, 37, -13);
keyiter(k[-20], r3, r2, r0, 38, -12);
keyiter(k[-19], r4, r3, r1, 39, -11);
keyiter(k[-18], r0, r4, r2, 40, -10);
keyiter(k[-17], r1, r0, r3, 41, -9);
keyiter(k[-16], r2, r1, r4, 42, -8);
keyiter(k[-15], r3, r2, r0, 43, -7);
keyiter(k[-14], r4, r3, r1, 44, -6);
keyiter(k[-13], r0, r4, r2, 45, -5);
keyiter(k[-12], r1, r0, r3, 46, -4);
keyiter(k[-11], r2, r1, r4, 47, -3);
keyiter(k[-10], r3, r2, r0, 48, -2);
keyiter(k[-9], r4, r3, r1, 49, -1);
keyiter(k[-8], r0, r4, r2, 50, 0);
keyiter(k[-7], r1, r0, r3, 51, 1);
keyiter(k[-6], r2, r1, r4, 52, 2);
keyiter(k[-5], r3, r2, r0, 53, 3);
keyiter(k[-4], r4, r3, r1, 54, 4);
keyiter(k[-3], r0, r4, r2, 55, 5);
keyiter(k[-2], r1, r0, r3, 56, 6);
keyiter(k[-1], r2, r1, r4, 57, 7);
keyiter(k[0], r3, r2, r0, 58, 8);
keyiter(k[1], r4, r3, r1, 59, 9);
keyiter(k[2], r0, r4, r2, 60, 10);
keyiter(k[3], r1, r0, r3, 61, 11);
keyiter(k[4], r2, r1, r4, 62, 12);
keyiter(k[5], r3, r2, r0, 63, 13);
keyiter(k[6], r4, r3, r1, 64, 14);
keyiter(k[7], r0, r4, r2, 65, 15);
keyiter(k[8], r1, r0, r3, 66, 16);
keyiter(k[9], r2, r1, r4, 67, 17);
keyiter(k[10], r3, r2, r0, 68, 18);
keyiter(k[11], r4, r3, r1, 69, 19);
keyiter(k[12], r0, r4, r2, 70, 20);
keyiter(k[13], r1, r0, r3, 71, 21);
keyiter(k[14], r2, r1, r4, 72, 22);
keyiter(k[15], r3, r2, r0, 73, 23);
keyiter(k[16], r4, r3, r1, 74, 24);
keyiter(k[17], r0, r4, r2, 75, 25);
keyiter(k[18], r1, r0, r3, 76, 26);
keyiter(k[19], r2, r1, r4, 77, 27);
keyiter(k[20], r3, r2, r0, 78, 28);
keyiter(k[21], r4, r3, r1, 79, 29);
keyiter(k[22], r0, r4, r2, 80, 30);
keyiter(k[23], r1, r0, r3, 81, 31);
k += 50;
keyiter(k[-26], r2, r1, r4, 82, -18);
keyiter(k[-25], r3, r2, r0, 83, -17);
keyiter(k[-24], r4, r3, r1, 84, -16);
keyiter(k[-23], r0, r4, r2, 85, -15);
keyiter(k[-22], r1, r0, r3, 86, -14);
keyiter(k[-21], r2, r1, r4, 87, -13);
keyiter(k[-20], r3, r2, r0, 88, -12);
keyiter(k[-19], r4, r3, r1, 89, -11);
keyiter(k[-18], r0, r4, r2, 90, -10);
keyiter(k[-17], r1, r0, r3, 91, -9);
keyiter(k[-16], r2, r1, r4, 92, -8);
keyiter(k[-15], r3, r2, r0, 93, -7);
keyiter(k[-14], r4, r3, r1, 94, -6);
keyiter(k[-13], r0, r4, r2, 95, -5);
keyiter(k[-12], r1, r0, r3, 96, -4);
keyiter(k[-11], r2, r1, r4, 97, -3);
keyiter(k[-10], r3, r2, r0, 98, -2);
keyiter(k[-9], r4, r3, r1, 99, -1);
keyiter(k[-8], r0, r4, r2, 100, 0);
keyiter(k[-7], r1, r0, r3, 101, 1);
keyiter(k[-6], r2, r1, r4, 102, 2);
keyiter(k[-5], r3, r2, r0, 103, 3);
keyiter(k[-4], r4, r3, r1, 104, 4);
keyiter(k[-3], r0, r4, r2, 105, 5);
keyiter(k[-2], r1, r0, r3, 106, 6);
keyiter(k[-1], r2, r1, r4, 107, 7);
keyiter(k[0], r3, r2, r0, 108, 8);
keyiter(k[1], r4, r3, r1, 109, 9);
keyiter(k[2], r0, r4, r2, 110, 10);
keyiter(k[3], r1, r0, r3, 111, 11);
keyiter(k[4], r2, r1, r4, 112, 12);
keyiter(k[5], r3, r2, r0, 113, 13);
keyiter(k[6], r4, r3, r1, 114, 14);
keyiter(k[7], r0, r4, r2, 115, 15);
keyiter(k[8], r1, r0, r3, 116, 16);
keyiter(k[9], r2, r1, r4, 117, 17);
keyiter(k[10], r3, r2, r0, 118, 18);
keyiter(k[11], r4, r3, r1, 119, 19);
keyiter(k[12], r0, r4, r2, 120, 20);
keyiter(k[13], r1, r0, r3, 121, 21);
keyiter(k[14], r2, r1, r4, 122, 22);
keyiter(k[15], r3, r2, r0, 123, 23);
keyiter(k[16], r4, r3, r1, 124, 24);
keyiter(k[17], r0, r4, r2, 125, 25);
keyiter(k[18], r1, r0, r3, 126, 26);
keyiter(k[19], r2, r1, r4, 127, 27);
keyiter(k[20], r3, r2, r0, 128, 28);
keyiter(k[21], r4, r3, r1, 129, 29);
keyiter(k[22], r0, r4, r2, 130, 30);
keyiter(k[23], r1, r0, r3, 131, 31);
/* Apply S-boxes */
__serpent_setkey_sbox(r0, r1, r2, r3, r4, ctx->expkey);
return 0;
}
EXPORT_SYMBOL_GPL(__serpent_setkey);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
return __serpent_setkey(crypto_tfm_ctx(tfm), key, keylen);
}
EXPORT_SYMBOL_GPL(serpent_setkey);
void __serpent_encrypt(const void *c, u8 *dst, const u8 *src)
{
const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
u32 r0, r1, r2, r3, r4;
r0 = get_unaligned_le32(src);
r1 = get_unaligned_le32(src + 4);
r2 = get_unaligned_le32(src + 8);
r3 = get_unaligned_le32(src + 12);
K(r0, r1, r2, r3, 0);
S0(r0, r1, r2, r3, r4); LK(r2, r1, r3, r0, r4, 1);
S1(r2, r1, r3, r0, r4); LK(r4, r3, r0, r2, r1, 2);
S2(r4, r3, r0, r2, r1); LK(r1, r3, r4, r2, r0, 3);
S3(r1, r3, r4, r2, r0); LK(r2, r0, r3, r1, r4, 4);
S4(r2, r0, r3, r1, r4); LK(r0, r3, r1, r4, r2, 5);
S5(r0, r3, r1, r4, r2); LK(r2, r0, r3, r4, r1, 6);
S6(r2, r0, r3, r4, r1); LK(r3, r1, r0, r4, r2, 7);
S7(r3, r1, r0, r4, r2); LK(r2, r0, r4, r3, r1, 8);
S0(r2, r0, r4, r3, r1); LK(r4, r0, r3, r2, r1, 9);
S1(r4, r0, r3, r2, r1); LK(r1, r3, r2, r4, r0, 10);
S2(r1, r3, r2, r4, r0); LK(r0, r3, r1, r4, r2, 11);
S3(r0, r3, r1, r4, r2); LK(r4, r2, r3, r0, r1, 12);
S4(r4, r2, r3, r0, r1); LK(r2, r3, r0, r1, r4, 13);
S5(r2, r3, r0, r1, r4); LK(r4, r2, r3, r1, r0, 14);
S6(r4, r2, r3, r1, r0); LK(r3, r0, r2, r1, r4, 15);
S7(r3, r0, r2, r1, r4); LK(r4, r2, r1, r3, r0, 16);
S0(r4, r2, r1, r3, r0); LK(r1, r2, r3, r4, r0, 17);
S1(r1, r2, r3, r4, r0); LK(r0, r3, r4, r1, r2, 18);
S2(r0, r3, r4, r1, r2); LK(r2, r3, r0, r1, r4, 19);
S3(r2, r3, r0, r1, r4); LK(r1, r4, r3, r2, r0, 20);
S4(r1, r4, r3, r2, r0); LK(r4, r3, r2, r0, r1, 21);
S5(r4, r3, r2, r0, r1); LK(r1, r4, r3, r0, r2, 22);
S6(r1, r4, r3, r0, r2); LK(r3, r2, r4, r0, r1, 23);
S7(r3, r2, r4, r0, r1); LK(r1, r4, r0, r3, r2, 24);
S0(r1, r4, r0, r3, r2); LK(r0, r4, r3, r1, r2, 25);
S1(r0, r4, r3, r1, r2); LK(r2, r3, r1, r0, r4, 26);
S2(r2, r3, r1, r0, r4); LK(r4, r3, r2, r0, r1, 27);
S3(r4, r3, r2, r0, r1); LK(r0, r1, r3, r4, r2, 28);
S4(r0, r1, r3, r4, r2); LK(r1, r3, r4, r2, r0, 29);
S5(r1, r3, r4, r2, r0); LK(r0, r1, r3, r2, r4, 30);
S6(r0, r1, r3, r2, r4); LK(r3, r4, r1, r2, r0, 31);
S7(r3, r4, r1, r2, r0); K(r0, r1, r2, r3, 32);
put_unaligned_le32(r0, dst);
put_unaligned_le32(r1, dst + 4);
put_unaligned_le32(r2, dst + 8);
put_unaligned_le32(r3, dst + 12);
}
EXPORT_SYMBOL_GPL(__serpent_encrypt);
static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
__serpent_encrypt(ctx, dst, src);
}
void __serpent_decrypt(const void *c, u8 *dst, const u8 *src)
{
const struct serpent_ctx *ctx = c;
const u32 *k = ctx->expkey;
u32 r0, r1, r2, r3, r4;
r0 = get_unaligned_le32(src);
r1 = get_unaligned_le32(src + 4);
r2 = get_unaligned_le32(src + 8);
r3 = get_unaligned_le32(src + 12);
K(r0, r1, r2, r3, 32);
SI7(r0, r1, r2, r3, r4); KL(r1, r3, r0, r4, r2, 31);
SI6(r1, r3, r0, r4, r2); KL(r0, r2, r4, r1, r3, 30);
SI5(r0, r2, r4, r1, r3); KL(r2, r3, r0, r4, r1, 29);
SI4(r2, r3, r0, r4, r1); KL(r2, r0, r1, r4, r3, 28);
SI3(r2, r0, r1, r4, r3); KL(r1, r2, r3, r4, r0, 27);
SI2(r1, r2, r3, r4, r0); KL(r2, r0, r4, r3, r1, 26);
SI1(r2, r0, r4, r3, r1); KL(r1, r0, r4, r3, r2, 25);
SI0(r1, r0, r4, r3, r2); KL(r4, r2, r0, r1, r3, 24);
SI7(r4, r2, r0, r1, r3); KL(r2, r1, r4, r3, r0, 23);
SI6(r2, r1, r4, r3, r0); KL(r4, r0, r3, r2, r1, 22);
SI5(r4, r0, r3, r2, r1); KL(r0, r1, r4, r3, r2, 21);
SI4(r0, r1, r4, r3, r2); KL(r0, r4, r2, r3, r1, 20);
SI3(r0, r4, r2, r3, r1); KL(r2, r0, r1, r3, r4, 19);
SI2(r2, r0, r1, r3, r4); KL(r0, r4, r3, r1, r2, 18);
SI1(r0, r4, r3, r1, r2); KL(r2, r4, r3, r1, r0, 17);
SI0(r2, r4, r3, r1, r0); KL(r3, r0, r4, r2, r1, 16);
SI7(r3, r0, r4, r2, r1); KL(r0, r2, r3, r1, r4, 15);
SI6(r0, r2, r3, r1, r4); KL(r3, r4, r1, r0, r2, 14);
SI5(r3, r4, r1, r0, r2); KL(r4, r2, r3, r1, r0, 13);
SI4(r4, r2, r3, r1, r0); KL(r4, r3, r0, r1, r2, 12);
SI3(r4, r3, r0, r1, r2); KL(r0, r4, r2, r1, r3, 11);
SI2(r0, r4, r2, r1, r3); KL(r4, r3, r1, r2, r0, 10);
SI1(r4, r3, r1, r2, r0); KL(r0, r3, r1, r2, r4, 9);
SI0(r0, r3, r1, r2, r4); KL(r1, r4, r3, r0, r2, 8);
SI7(r1, r4, r3, r0, r2); KL(r4, r0, r1, r2, r3, 7);
SI6(r4, r0, r1, r2, r3); KL(r1, r3, r2, r4, r0, 6);
SI5(r1, r3, r2, r4, r0); KL(r3, r0, r1, r2, r4, 5);
SI4(r3, r0, r1, r2, r4); KL(r3, r1, r4, r2, r0, 4);
SI3(r3, r1, r4, r2, r0); KL(r4, r3, r0, r2, r1, 3);
SI2(r4, r3, r0, r2, r1); KL(r3, r1, r2, r0, r4, 2);
SI1(r3, r1, r2, r0, r4); KL(r4, r1, r2, r0, r3, 1);
SI0(r4, r1, r2, r0, r3); K(r2, r3, r1, r4, 0);
put_unaligned_le32(r2, dst);
put_unaligned_le32(r3, dst + 4);
put_unaligned_le32(r1, dst + 8);
put_unaligned_le32(r4, dst + 12);
}
EXPORT_SYMBOL_GPL(__serpent_decrypt);
static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
__serpent_decrypt(ctx, dst, src);
}
static struct crypto_alg srp_alg = {
.cra_name = "serpent",
.cra_driver_name = "serpent-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SERPENT_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct serpent_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = SERPENT_MIN_KEY_SIZE,
.cia_max_keysize = SERPENT_MAX_KEY_SIZE,
.cia_setkey = serpent_setkey,
.cia_encrypt = serpent_encrypt,
.cia_decrypt = serpent_decrypt } }
};
static int __init serpent_mod_init(void)
{
return crypto_register_alg(&srp_alg);
}
static void __exit serpent_mod_fini(void)
{
crypto_unregister_alg(&srp_alg);
}
subsys_initcall(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serpent Cipher Algorithm");
MODULE_AUTHOR("Dag Arne Osvik <[email protected]>");
MODULE_ALIAS_CRYPTO("serpent");
MODULE_ALIAS_CRYPTO("serpent-generic");
| linux-master | crypto/serpent_generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API
*
* Michael MIC (IEEE 802.11i/TKIP) keyed digest
*
* Copyright (c) 2004 Jouni Malinen <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
struct michael_mic_ctx {
u32 l, r;
};
struct michael_mic_desc_ctx {
__le32 pending;
size_t pending_len;
u32 l, r;
};
static inline u32 xswap(u32 val)
{
return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
}
#define michael_block(l, r) \
do { \
r ^= rol32(l, 17); \
l += r; \
r ^= xswap(l); \
l += r; \
r ^= rol32(l, 3); \
l += r; \
r ^= ror32(l, 2); \
l += r; \
} while (0)
static int michael_init(struct shash_desc *desc)
{
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm);
mctx->pending_len = 0;
mctx->l = ctx->l;
mctx->r = ctx->r;
return 0;
}
static int michael_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
if (mctx->pending_len) {
int flen = 4 - mctx->pending_len;
if (flen > len)
flen = len;
memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen);
mctx->pending_len += flen;
data += flen;
len -= flen;
if (mctx->pending_len < 4)
return 0;
mctx->l ^= le32_to_cpu(mctx->pending);
michael_block(mctx->l, mctx->r);
mctx->pending_len = 0;
}
while (len >= 4) {
mctx->l ^= get_unaligned_le32(data);
michael_block(mctx->l, mctx->r);
data += 4;
len -= 4;
}
if (len > 0) {
mctx->pending_len = len;
memcpy(&mctx->pending, data, len);
}
return 0;
}
static int michael_final(struct shash_desc *desc, u8 *out)
{
struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc);
u8 *data = (u8 *)&mctx->pending;
/* Last block and padding (0x5a, 4..7 x 0) */
switch (mctx->pending_len) {
case 0:
mctx->l ^= 0x5a;
break;
case 1:
mctx->l ^= data[0] | 0x5a00;
break;
case 2:
mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000;
break;
case 3:
mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) |
0x5a000000;
break;
}
michael_block(mctx->l, mctx->r);
/* l ^= 0; */
michael_block(mctx->l, mctx->r);
put_unaligned_le32(mctx->l, out);
put_unaligned_le32(mctx->r, out + 4);
return 0;
}
static int michael_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm);
if (keylen != 8)
return -EINVAL;
mctx->l = get_unaligned_le32(key);
mctx->r = get_unaligned_le32(key + 4);
return 0;
}
static struct shash_alg alg = {
.digestsize = 8,
.setkey = michael_setkey,
.init = michael_init,
.update = michael_update,
.final = michael_final,
.descsize = sizeof(struct michael_mic_desc_ctx),
.base = {
.cra_name = "michael_mic",
.cra_driver_name = "michael_mic-generic",
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct michael_mic_ctx),
.cra_module = THIS_MODULE,
}
};
static int __init michael_mic_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit michael_mic_exit(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Michael MIC");
MODULE_AUTHOR("Jouni Malinen <[email protected]>");
MODULE_ALIAS_CRYPTO("michael_mic");
| linux-master | crypto/michael_mic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CFB: Cipher FeedBack mode
*
* Copyright (c) 2018 [email protected]
*
* CFB is a stream cipher mode which is layered on to a block
* encryption scheme. It works very much like a one time pad where
* the pad is generated initially from the encrypted IV and then
* subsequently from the encrypted previous block of ciphertext. The
* pad is XOR'd into the plain text to get the final ciphertext.
*
* The scheme of CFB is best described by wikipedia:
*
* https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB
*
* Note that since the pad for both encryption and decryption is
* generated by an encryption operation, CFB never uses the block
* decryption function.
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm)
{
return crypto_cipher_blocksize(skcipher_cipher_simple(tfm));
}
static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm,
const u8 *src, u8 *dst)
{
crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
}
/* final encrypt and decrypt is the same */
static void crypto_cfb_final(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned long alignmask = crypto_skcipher_alignmask(tfm);
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1);
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
unsigned int nbytes = walk->nbytes;
crypto_cfb_encrypt_one(tfm, iv, stream);
crypto_xor_cpy(dst, stream, src, nbytes);
}
static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
crypto_xor(dst, src, bsize);
iv = dst;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
crypto_cfb_encrypt_one(tfm, iv, tmp);
crypto_xor(src, tmp, bsize);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cfb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
unsigned int bsize = crypto_cfb_bsize(tfm);
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
if (walk.src.virt.addr == walk.dst.virt.addr)
err = crypto_cfb_encrypt_inplace(&walk, tfm);
else
err = crypto_cfb_encrypt_segment(&walk, tfm);
err = skcipher_walk_done(&walk, err);
}
if (walk.nbytes) {
crypto_cfb_final(&walk, tfm);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
crypto_xor(dst, src, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 * const iv = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
crypto_cfb_encrypt_one(tfm, iv, tmp);
memcpy(iv, src, bsize);
crypto_xor(src, tmp, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk,
struct crypto_skcipher *tfm)
{
if (walk->src.virt.addr == walk->dst.virt.addr)
return crypto_cfb_decrypt_inplace(walk, tfm);
else
return crypto_cfb_decrypt_segment(walk, tfm);
}
static int crypto_cfb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
const unsigned int bsize = crypto_cfb_bsize(tfm);
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
err = crypto_cfb_decrypt_blocks(&walk, tfm);
err = skcipher_walk_done(&walk, err);
}
if (walk.nbytes) {
crypto_cfb_final(&walk, tfm);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
/* CFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.encrypt = crypto_cfb_encrypt;
inst->alg.decrypt = crypto_cfb_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
return err;
}
static struct crypto_template crypto_cfb_tmpl = {
.name = "cfb",
.create = crypto_cfb_create,
.module = THIS_MODULE,
};
static int __init crypto_cfb_module_init(void)
{
return crypto_register_template(&crypto_cfb_tmpl);
}
static void __exit crypto_cfb_module_exit(void)
{
crypto_unregister_template(&crypto_cfb_tmpl);
}
subsys_initcall(crypto_cfb_module_init);
module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cfb");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/cfb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* SHA-512 code by Jean-Luc Cooke <[email protected]>
*
* Copyright (c) Jean-Luc Cooke <[email protected]>
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) 2003 Kyle McMartin <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/percpu.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE] = {
0x38, 0xb0, 0x60, 0xa7, 0x51, 0xac, 0x96, 0x38,
0x4c, 0xd9, 0x32, 0x7e, 0xb1, 0xb1, 0xe3, 0x6a,
0x21, 0xfd, 0xb7, 0x11, 0x14, 0xbe, 0x07, 0x43,
0x4c, 0x0c, 0xc7, 0xbf, 0x63, 0xf6, 0xe1, 0xda,
0x27, 0x4e, 0xde, 0xbf, 0xe7, 0x6f, 0x65, 0xfb,
0xd5, 0x1a, 0xd2, 0xf1, 0x48, 0x98, 0xb9, 0x5b
};
EXPORT_SYMBOL_GPL(sha384_zero_message_hash);
const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE] = {
0xcf, 0x83, 0xe1, 0x35, 0x7e, 0xef, 0xb8, 0xbd,
0xf1, 0x54, 0x28, 0x50, 0xd6, 0x6d, 0x80, 0x07,
0xd6, 0x20, 0xe4, 0x05, 0x0b, 0x57, 0x15, 0xdc,
0x83, 0xf4, 0xa9, 0x21, 0xd3, 0x6c, 0xe9, 0xce,
0x47, 0xd0, 0xd1, 0x3c, 0x5d, 0x85, 0xf2, 0xb0,
0xff, 0x83, 0x18, 0xd2, 0x87, 0x7e, 0xec, 0x2f,
0x63, 0xb9, 0x31, 0xbd, 0x47, 0x41, 0x7a, 0x81,
0xa5, 0x38, 0x32, 0x7a, 0xf9, 0x27, 0xda, 0x3e
};
EXPORT_SYMBOL_GPL(sha512_zero_message_hash);
static inline u64 Ch(u64 x, u64 y, u64 z)
{
return z ^ (x & (y ^ z));
}
static inline u64 Maj(u64 x, u64 y, u64 z)
{
return (x & y) | (z & (x | y));
}
static const u64 sha512_K[80] = {
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, 0xb5c0fbcfec4d3b2fULL,
0xe9b5dba58189dbbcULL, 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, 0xd807aa98a3030242ULL,
0x12835b0145706fbeULL, 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, 0x9bdc06a725c71235ULL,
0xc19bf174cf692694ULL, 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, 0x2de92c6f592b0275ULL,
0x4a7484aa6ea6e483ULL, 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, 0xb00327c898fb213fULL,
0xbf597fc7beef0ee4ULL, 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, 0x27b70a8546d22ffcULL,
0x2e1b21385c26c926ULL, 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, 0x81c2c92e47edaee6ULL,
0x92722c851482353bULL, 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, 0xd192e819d6ef5218ULL,
0xd69906245565a910ULL, 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, 0x2748774cdf8eeb99ULL,
0x34b0bcb5e19b48a8ULL, 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, 0x748f82ee5defb2fcULL,
0x78a5636f43172f60ULL, 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, 0xbef9a3f7b2c67915ULL,
0xc67178f2e372532bULL, 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, 0x06f067aa72176fbaULL,
0x0a637dc5a2c898a6ULL, 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, 0x3c9ebe0a15c9bebcULL,
0x431d67c49c100d4cULL, 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL,
};
#define e0(x) (ror64(x,28) ^ ror64(x,34) ^ ror64(x,39))
#define e1(x) (ror64(x,14) ^ ror64(x,18) ^ ror64(x,41))
#define s0(x) (ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7))
#define s1(x) (ror64(x,19) ^ ror64(x,61) ^ (x >> 6))
static inline void LOAD_OP(int I, u64 *W, const u8 *input)
{
W[I] = get_unaligned_be64((__u64 *)input + I);
}
static inline void BLEND_OP(int I, u64 *W)
{
W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]);
}
static void
sha512_transform(u64 *state, const u8 *input)
{
u64 a, b, c, d, e, f, g, h, t1, t2;
int i;
u64 W[16];
/* load the state into our registers */
a=state[0]; b=state[1]; c=state[2]; d=state[3];
e=state[4]; f=state[5]; g=state[6]; h=state[7];
/* now iterate */
for (i=0; i<80; i+=8) {
if (!(i & 8)) {
int j;
if (i < 16) {
/* load the input */
for (j = 0; j < 16; j++)
LOAD_OP(i + j, W, input);
} else {
for (j = 0; j < 16; j++) {
BLEND_OP(i + j, W);
}
}
}
t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[(i & 15) + 3];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[(i & 15) + 4];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[(i & 15) + 5];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[(i & 15) + 6];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[(i & 15) + 7];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
}
state[0] += a; state[1] += b; state[2] += c; state[3] += d;
state[4] += e; state[5] += f; state[6] += g; state[7] += h;
}
static void sha512_generic_block_fn(struct sha512_state *sst, u8 const *src,
int blocks)
{
while (blocks--) {
sha512_transform(sst->state, src);
src += SHA512_BLOCK_SIZE;
}
}
int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sha512_update);
static int sha512_final(struct shash_desc *desc, u8 *hash)
{
sha512_base_do_finalize(desc, sha512_generic_block_fn);
return sha512_base_finish(desc, hash);
}
int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
sha512_base_do_update(desc, data, len, sha512_generic_block_fn);
return sha512_final(desc, hash);
}
EXPORT_SYMBOL(crypto_sha512_finup);
static struct shash_alg sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = crypto_sha512_update,
.final = sha512_final,
.finup = crypto_sha512_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-generic",
.cra_priority = 100,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = crypto_sha512_update,
.final = sha512_final,
.finup = crypto_sha512_finup,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-generic",
.cra_priority = 100,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init sha512_generic_mod_init(void)
{
return crypto_register_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
static void __exit sha512_generic_mod_fini(void)
{
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
subsys_initcall(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha384-generic");
MODULE_ALIAS_CRYPTO("sha512");
MODULE_ALIAS_CRYPTO("sha512-generic");
| linux-master | crypto/sha512_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ECB: Electronic CodeBook mode
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
static int crypto_ecb_crypt(struct skcipher_request *req,
struct crypto_cipher *cipher,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) != 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
do {
fn(crypto_cipher_tfm(cipher), dst, src);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int crypto_ecb_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
return crypto_ecb_crypt(req, cipher,
crypto_cipher_alg(cipher)->cia_encrypt);
}
static int crypto_ecb_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
return crypto_ecb_crypt(req, cipher,
crypto_cipher_alg(cipher)->cia_decrypt);
}
static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */
inst->alg.encrypt = crypto_ecb_encrypt;
inst->alg.decrypt = crypto_ecb_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
return err;
}
static struct crypto_template crypto_ecb_tmpl = {
.name = "ecb",
.create = crypto_ecb_create,
.module = THIS_MODULE,
};
static int __init crypto_ecb_module_init(void)
{
return crypto_register_template(&crypto_ecb_tmpl);
}
static void __exit crypto_ecb_module_exit(void)
{
crypto_unregister_template(&crypto_ecb_tmpl);
}
subsys_initcall(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ecb");
| linux-master | crypto/ecb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019 Linaro Ltd <[email protected]>
*/
#include <asm/cpufeature.h>
#include <asm/neon.h>
#include "aegis.h"
#include "aegis-neon.h"
int aegis128_have_aes_insn __ro_after_init;
bool crypto_aegis128_have_simd(void)
{
if (cpu_have_feature(cpu_feature(AES))) {
aegis128_have_aes_insn = 1;
return true;
}
return IS_ENABLED(CONFIG_ARM64);
}
void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv)
{
kernel_neon_begin();
crypto_aegis128_init_neon(state, key, iv);
kernel_neon_end();
}
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
{
kernel_neon_begin();
crypto_aegis128_update_neon(state, msg);
kernel_neon_end();
}
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
kernel_neon_begin();
crypto_aegis128_encrypt_chunk_neon(state, dst, src, size);
kernel_neon_end();
}
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size)
{
kernel_neon_begin();
crypto_aegis128_decrypt_chunk_neon(state, dst, src, size);
kernel_neon_end();
}
int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize)
{
int ret;
kernel_neon_begin();
ret = crypto_aegis128_final_neon(state, tag_xor, assoclen, cryptlen,
authsize);
kernel_neon_end();
return ret;
}
| linux-master | crypto/aegis128-neon.c |
/*
* Cryptographic API.
*
* Anubis Algorithm
*
* The Anubis algorithm was developed by Paulo S. L. M. Barreto and
* Vincent Rijmen.
*
* See
*
* P.S.L.M. Barreto, V. Rijmen,
* ``The Anubis block cipher,''
* NESSIE submission, 2000.
*
* This software implements the "tweaked" version of Anubis.
* Only the S-box and (consequently) the rounds constants have been
* changed.
*
* The original authors have disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* By Aaron Grothe [email protected], October 28, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
#define ANUBIS_MAX_KEY_SIZE 40
#define ANUBIS_BLOCK_SIZE 16
#define ANUBIS_MAX_N 10
#define ANUBIS_MAX_ROUNDS (8 + ANUBIS_MAX_N)
struct anubis_ctx {
int key_len; // in bits
int R;
u32 E[ANUBIS_MAX_ROUNDS + 1][4];
u32 D[ANUBIS_MAX_ROUNDS + 1][4];
};
static const u32 T0[256] = {
0xba69d2bbU, 0x54a84de5U, 0x2f5ebce2U, 0x74e8cd25U,
0x53a651f7U, 0xd3bb6bd0U, 0xd2b96fd6U, 0x4d9a29b3U,
0x50a05dfdU, 0xac458acfU, 0x8d070e09U, 0xbf63c6a5U,
0x70e0dd3dU, 0x52a455f1U, 0x9a29527bU, 0x4c982db5U,
0xeac98f46U, 0xd5b773c4U, 0x97336655U, 0xd1bf63dcU,
0x3366ccaaU, 0x51a259fbU, 0x5bb671c7U, 0xa651a2f3U,
0xdea15ffeU, 0x48903dadU, 0xa84d9ad7U, 0x992f5e71U,
0xdbab4be0U, 0x3264c8acU, 0xb773e695U, 0xfce5d732U,
0xe3dbab70U, 0x9e214263U, 0x913f7e41U, 0x9b2b567dU,
0xe2d9af76U, 0xbb6bd6bdU, 0x4182199bU, 0x6edca579U,
0xa557aef9U, 0xcb8b0b80U, 0x6bd6b167U, 0x95376e59U,
0xa15fbee1U, 0xf3fbeb10U, 0xb17ffe81U, 0x0204080cU,
0xcc851792U, 0xc49537a2U, 0x1d3a744eU, 0x14285078U,
0xc39b2bb0U, 0x63c69157U, 0xdaa94fe6U, 0x5dba69d3U,
0x5fbe61dfU, 0xdca557f2U, 0x7dfae913U, 0xcd871394U,
0x7ffee11fU, 0x5ab475c1U, 0x6cd8ad75U, 0x5cb86dd5U,
0xf7f3fb08U, 0x264c98d4U, 0xffe3db38U, 0xedc79354U,
0xe8cd874aU, 0x9d274e69U, 0x6fdea17fU, 0x8e010203U,
0x19326456U, 0xa05dbae7U, 0xf0fde71aU, 0x890f1e11U,
0x0f1e3c22U, 0x070e1c12U, 0xaf4386c5U, 0xfbebcb20U,
0x08102030U, 0x152a547eU, 0x0d1a342eU, 0x04081018U,
0x01020406U, 0x64c88d45U, 0xdfa35bf8U, 0x76ecc529U,
0x79f2f90bU, 0xdda753f4U, 0x3d7af48eU, 0x162c5874U,
0x3f7efc82U, 0x376edcb2U, 0x6ddaa973U, 0x3870e090U,
0xb96fdeb1U, 0x73e6d137U, 0xe9cf834cU, 0x356ad4beU,
0x55aa49e3U, 0x71e2d93bU, 0x7bf6f107U, 0x8c050a0fU,
0x72e4d531U, 0x880d1a17U, 0xf6f1ff0eU, 0x2a54a8fcU,
0x3e7cf884U, 0x5ebc65d9U, 0x274e9cd2U, 0x468c0589U,
0x0c183028U, 0x65ca8943U, 0x68d0bd6dU, 0x61c2995bU,
0x03060c0aU, 0xc19f23bcU, 0x57ae41efU, 0xd6b17fceU,
0xd9af43ecU, 0x58b07dcdU, 0xd8ad47eaU, 0x66cc8549U,
0xd7b37bc8U, 0x3a74e89cU, 0xc88d078aU, 0x3c78f088U,
0xfae9cf26U, 0x96316253U, 0xa753a6f5U, 0x982d5a77U,
0xecc59752U, 0xb86ddab7U, 0xc7933ba8U, 0xae4182c3U,
0x69d2b96bU, 0x4b9631a7U, 0xab4b96ddU, 0xa94f9ed1U,
0x67ce814fU, 0x0a14283cU, 0x478e018fU, 0xf2f9ef16U,
0xb577ee99U, 0x224488ccU, 0xe5d7b364U, 0xeec19f5eU,
0xbe61c2a3U, 0x2b56acfaU, 0x811f3e21U, 0x1224486cU,
0x831b362dU, 0x1b366c5aU, 0x0e1c3824U, 0x23468ccaU,
0xf5f7f304U, 0x458a0983U, 0x214284c6U, 0xce811f9eU,
0x499239abU, 0x2c58b0e8U, 0xf9efc32cU, 0xe6d1bf6eU,
0xb671e293U, 0x2850a0f0U, 0x172e5c72U, 0x8219322bU,
0x1a34685cU, 0x8b0b161dU, 0xfee1df3eU, 0x8a09121bU,
0x09122436U, 0xc98f038cU, 0x87132635U, 0x4e9c25b9U,
0xe1dfa37cU, 0x2e5cb8e4U, 0xe4d5b762U, 0xe0dda77aU,
0xebcb8b40U, 0x903d7a47U, 0xa455aaffU, 0x1e3c7844U,
0x85172e39U, 0x60c09d5dU, 0x00000000U, 0x254a94deU,
0xf4f5f702U, 0xf1ffe31cU, 0x94356a5fU, 0x0b162c3aU,
0xe7d3bb68U, 0x75eac923U, 0xefc39b58U, 0x3468d0b8U,
0x3162c4a6U, 0xd4b577c2U, 0xd0bd67daU, 0x86112233U,
0x7efce519U, 0xad478ec9U, 0xfde7d334U, 0x2952a4f6U,
0x3060c0a0U, 0x3b76ec9aU, 0x9f234665U, 0xf8edc72aU,
0xc6913faeU, 0x13264c6aU, 0x060c1814U, 0x050a141eU,
0xc59733a4U, 0x11224466U, 0x77eec12fU, 0x7cf8ed15U,
0x7af4f501U, 0x78f0fd0dU, 0x366cd8b4U, 0x1c387048U,
0x3972e496U, 0x59b279cbU, 0x18306050U, 0x56ac45e9U,
0xb37bf68dU, 0xb07dfa87U, 0x244890d8U, 0x204080c0U,
0xb279f28bU, 0x9239724bU, 0xa35bb6edU, 0xc09d27baU,
0x44880d85U, 0x62c49551U, 0x10204060U, 0xb475ea9fU,
0x84152a3fU, 0x43861197U, 0x933b764dU, 0xc2992fb6U,
0x4a9435a1U, 0xbd67cea9U, 0x8f030605U, 0x2d5ab4eeU,
0xbc65caafU, 0x9c254a6fU, 0x6ad4b561U, 0x40801d9dU,
0xcf831b98U, 0xa259b2ebU, 0x801d3a27U, 0x4f9e21bfU,
0x1f3e7c42U, 0xca890f86U, 0xaa4992dbU, 0x42841591U,
};
static const u32 T1[256] = {
0x69babbd2U, 0xa854e54dU, 0x5e2fe2bcU, 0xe87425cdU,
0xa653f751U, 0xbbd3d06bU, 0xb9d2d66fU, 0x9a4db329U,
0xa050fd5dU, 0x45accf8aU, 0x078d090eU, 0x63bfa5c6U,
0xe0703dddU, 0xa452f155U, 0x299a7b52U, 0x984cb52dU,
0xc9ea468fU, 0xb7d5c473U, 0x33975566U, 0xbfd1dc63U,
0x6633aaccU, 0xa251fb59U, 0xb65bc771U, 0x51a6f3a2U,
0xa1defe5fU, 0x9048ad3dU, 0x4da8d79aU, 0x2f99715eU,
0xabdbe04bU, 0x6432acc8U, 0x73b795e6U, 0xe5fc32d7U,
0xdbe370abU, 0x219e6342U, 0x3f91417eU, 0x2b9b7d56U,
0xd9e276afU, 0x6bbbbdd6U, 0x82419b19U, 0xdc6e79a5U,
0x57a5f9aeU, 0x8bcb800bU, 0xd66b67b1U, 0x3795596eU,
0x5fa1e1beU, 0xfbf310ebU, 0x7fb181feU, 0x04020c08U,
0x85cc9217U, 0x95c4a237U, 0x3a1d4e74U, 0x28147850U,
0x9bc3b02bU, 0xc6635791U, 0xa9dae64fU, 0xba5dd369U,
0xbe5fdf61U, 0xa5dcf257U, 0xfa7d13e9U, 0x87cd9413U,
0xfe7f1fe1U, 0xb45ac175U, 0xd86c75adU, 0xb85cd56dU,
0xf3f708fbU, 0x4c26d498U, 0xe3ff38dbU, 0xc7ed5493U,
0xcde84a87U, 0x279d694eU, 0xde6f7fa1U, 0x018e0302U,
0x32195664U, 0x5da0e7baU, 0xfdf01ae7U, 0x0f89111eU,
0x1e0f223cU, 0x0e07121cU, 0x43afc586U, 0xebfb20cbU,
0x10083020U, 0x2a157e54U, 0x1a0d2e34U, 0x08041810U,
0x02010604U, 0xc864458dU, 0xa3dff85bU, 0xec7629c5U,
0xf2790bf9U, 0xa7ddf453U, 0x7a3d8ef4U, 0x2c167458U,
0x7e3f82fcU, 0x6e37b2dcU, 0xda6d73a9U, 0x703890e0U,
0x6fb9b1deU, 0xe67337d1U, 0xcfe94c83U, 0x6a35bed4U,
0xaa55e349U, 0xe2713bd9U, 0xf67b07f1U, 0x058c0f0aU,
0xe47231d5U, 0x0d88171aU, 0xf1f60effU, 0x542afca8U,
0x7c3e84f8U, 0xbc5ed965U, 0x4e27d29cU, 0x8c468905U,
0x180c2830U, 0xca654389U, 0xd0686dbdU, 0xc2615b99U,
0x06030a0cU, 0x9fc1bc23U, 0xae57ef41U, 0xb1d6ce7fU,
0xafd9ec43U, 0xb058cd7dU, 0xadd8ea47U, 0xcc664985U,
0xb3d7c87bU, 0x743a9ce8U, 0x8dc88a07U, 0x783c88f0U,
0xe9fa26cfU, 0x31965362U, 0x53a7f5a6U, 0x2d98775aU,
0xc5ec5297U, 0x6db8b7daU, 0x93c7a83bU, 0x41aec382U,
0xd2696bb9U, 0x964ba731U, 0x4babdd96U, 0x4fa9d19eU,
0xce674f81U, 0x140a3c28U, 0x8e478f01U, 0xf9f216efU,
0x77b599eeU, 0x4422cc88U, 0xd7e564b3U, 0xc1ee5e9fU,
0x61bea3c2U, 0x562bfaacU, 0x1f81213eU, 0x24126c48U,
0x1b832d36U, 0x361b5a6cU, 0x1c0e2438U, 0x4623ca8cU,
0xf7f504f3U, 0x8a458309U, 0x4221c684U, 0x81ce9e1fU,
0x9249ab39U, 0x582ce8b0U, 0xeff92cc3U, 0xd1e66ebfU,
0x71b693e2U, 0x5028f0a0U, 0x2e17725cU, 0x19822b32U,
0x341a5c68U, 0x0b8b1d16U, 0xe1fe3edfU, 0x098a1b12U,
0x12093624U, 0x8fc98c03U, 0x13873526U, 0x9c4eb925U,
0xdfe17ca3U, 0x5c2ee4b8U, 0xd5e462b7U, 0xdde07aa7U,
0xcbeb408bU, 0x3d90477aU, 0x55a4ffaaU, 0x3c1e4478U,
0x1785392eU, 0xc0605d9dU, 0x00000000U, 0x4a25de94U,
0xf5f402f7U, 0xfff11ce3U, 0x35945f6aU, 0x160b3a2cU,
0xd3e768bbU, 0xea7523c9U, 0xc3ef589bU, 0x6834b8d0U,
0x6231a6c4U, 0xb5d4c277U, 0xbdd0da67U, 0x11863322U,
0xfc7e19e5U, 0x47adc98eU, 0xe7fd34d3U, 0x5229f6a4U,
0x6030a0c0U, 0x763b9aecU, 0x239f6546U, 0xedf82ac7U,
0x91c6ae3fU, 0x26136a4cU, 0x0c061418U, 0x0a051e14U,
0x97c5a433U, 0x22116644U, 0xee772fc1U, 0xf87c15edU,
0xf47a01f5U, 0xf0780dfdU, 0x6c36b4d8U, 0x381c4870U,
0x723996e4U, 0xb259cb79U, 0x30185060U, 0xac56e945U,
0x7bb38df6U, 0x7db087faU, 0x4824d890U, 0x4020c080U,
0x79b28bf2U, 0x39924b72U, 0x5ba3edb6U, 0x9dc0ba27U,
0x8844850dU, 0xc4625195U, 0x20106040U, 0x75b49feaU,
0x15843f2aU, 0x86439711U, 0x3b934d76U, 0x99c2b62fU,
0x944aa135U, 0x67bda9ceU, 0x038f0506U, 0x5a2deeb4U,
0x65bcafcaU, 0x259c6f4aU, 0xd46a61b5U, 0x80409d1dU,
0x83cf981bU, 0x59a2ebb2U, 0x1d80273aU, 0x9e4fbf21U,
0x3e1f427cU, 0x89ca860fU, 0x49aadb92U, 0x84429115U,
};
static const u32 T2[256] = {
0xd2bbba69U, 0x4de554a8U, 0xbce22f5eU, 0xcd2574e8U,
0x51f753a6U, 0x6bd0d3bbU, 0x6fd6d2b9U, 0x29b34d9aU,
0x5dfd50a0U, 0x8acfac45U, 0x0e098d07U, 0xc6a5bf63U,
0xdd3d70e0U, 0x55f152a4U, 0x527b9a29U, 0x2db54c98U,
0x8f46eac9U, 0x73c4d5b7U, 0x66559733U, 0x63dcd1bfU,
0xccaa3366U, 0x59fb51a2U, 0x71c75bb6U, 0xa2f3a651U,
0x5ffedea1U, 0x3dad4890U, 0x9ad7a84dU, 0x5e71992fU,
0x4be0dbabU, 0xc8ac3264U, 0xe695b773U, 0xd732fce5U,
0xab70e3dbU, 0x42639e21U, 0x7e41913fU, 0x567d9b2bU,
0xaf76e2d9U, 0xd6bdbb6bU, 0x199b4182U, 0xa5796edcU,
0xaef9a557U, 0x0b80cb8bU, 0xb1676bd6U, 0x6e599537U,
0xbee1a15fU, 0xeb10f3fbU, 0xfe81b17fU, 0x080c0204U,
0x1792cc85U, 0x37a2c495U, 0x744e1d3aU, 0x50781428U,
0x2bb0c39bU, 0x915763c6U, 0x4fe6daa9U, 0x69d35dbaU,
0x61df5fbeU, 0x57f2dca5U, 0xe9137dfaU, 0x1394cd87U,
0xe11f7ffeU, 0x75c15ab4U, 0xad756cd8U, 0x6dd55cb8U,
0xfb08f7f3U, 0x98d4264cU, 0xdb38ffe3U, 0x9354edc7U,
0x874ae8cdU, 0x4e699d27U, 0xa17f6fdeU, 0x02038e01U,
0x64561932U, 0xbae7a05dU, 0xe71af0fdU, 0x1e11890fU,
0x3c220f1eU, 0x1c12070eU, 0x86c5af43U, 0xcb20fbebU,
0x20300810U, 0x547e152aU, 0x342e0d1aU, 0x10180408U,
0x04060102U, 0x8d4564c8U, 0x5bf8dfa3U, 0xc52976ecU,
0xf90b79f2U, 0x53f4dda7U, 0xf48e3d7aU, 0x5874162cU,
0xfc823f7eU, 0xdcb2376eU, 0xa9736ddaU, 0xe0903870U,
0xdeb1b96fU, 0xd13773e6U, 0x834ce9cfU, 0xd4be356aU,
0x49e355aaU, 0xd93b71e2U, 0xf1077bf6U, 0x0a0f8c05U,
0xd53172e4U, 0x1a17880dU, 0xff0ef6f1U, 0xa8fc2a54U,
0xf8843e7cU, 0x65d95ebcU, 0x9cd2274eU, 0x0589468cU,
0x30280c18U, 0x894365caU, 0xbd6d68d0U, 0x995b61c2U,
0x0c0a0306U, 0x23bcc19fU, 0x41ef57aeU, 0x7fced6b1U,
0x43ecd9afU, 0x7dcd58b0U, 0x47ead8adU, 0x854966ccU,
0x7bc8d7b3U, 0xe89c3a74U, 0x078ac88dU, 0xf0883c78U,
0xcf26fae9U, 0x62539631U, 0xa6f5a753U, 0x5a77982dU,
0x9752ecc5U, 0xdab7b86dU, 0x3ba8c793U, 0x82c3ae41U,
0xb96b69d2U, 0x31a74b96U, 0x96ddab4bU, 0x9ed1a94fU,
0x814f67ceU, 0x283c0a14U, 0x018f478eU, 0xef16f2f9U,
0xee99b577U, 0x88cc2244U, 0xb364e5d7U, 0x9f5eeec1U,
0xc2a3be61U, 0xacfa2b56U, 0x3e21811fU, 0x486c1224U,
0x362d831bU, 0x6c5a1b36U, 0x38240e1cU, 0x8cca2346U,
0xf304f5f7U, 0x0983458aU, 0x84c62142U, 0x1f9ece81U,
0x39ab4992U, 0xb0e82c58U, 0xc32cf9efU, 0xbf6ee6d1U,
0xe293b671U, 0xa0f02850U, 0x5c72172eU, 0x322b8219U,
0x685c1a34U, 0x161d8b0bU, 0xdf3efee1U, 0x121b8a09U,
0x24360912U, 0x038cc98fU, 0x26358713U, 0x25b94e9cU,
0xa37ce1dfU, 0xb8e42e5cU, 0xb762e4d5U, 0xa77ae0ddU,
0x8b40ebcbU, 0x7a47903dU, 0xaaffa455U, 0x78441e3cU,
0x2e398517U, 0x9d5d60c0U, 0x00000000U, 0x94de254aU,
0xf702f4f5U, 0xe31cf1ffU, 0x6a5f9435U, 0x2c3a0b16U,
0xbb68e7d3U, 0xc92375eaU, 0x9b58efc3U, 0xd0b83468U,
0xc4a63162U, 0x77c2d4b5U, 0x67dad0bdU, 0x22338611U,
0xe5197efcU, 0x8ec9ad47U, 0xd334fde7U, 0xa4f62952U,
0xc0a03060U, 0xec9a3b76U, 0x46659f23U, 0xc72af8edU,
0x3faec691U, 0x4c6a1326U, 0x1814060cU, 0x141e050aU,
0x33a4c597U, 0x44661122U, 0xc12f77eeU, 0xed157cf8U,
0xf5017af4U, 0xfd0d78f0U, 0xd8b4366cU, 0x70481c38U,
0xe4963972U, 0x79cb59b2U, 0x60501830U, 0x45e956acU,
0xf68db37bU, 0xfa87b07dU, 0x90d82448U, 0x80c02040U,
0xf28bb279U, 0x724b9239U, 0xb6eda35bU, 0x27bac09dU,
0x0d854488U, 0x955162c4U, 0x40601020U, 0xea9fb475U,
0x2a3f8415U, 0x11974386U, 0x764d933bU, 0x2fb6c299U,
0x35a14a94U, 0xcea9bd67U, 0x06058f03U, 0xb4ee2d5aU,
0xcaafbc65U, 0x4a6f9c25U, 0xb5616ad4U, 0x1d9d4080U,
0x1b98cf83U, 0xb2eba259U, 0x3a27801dU, 0x21bf4f9eU,
0x7c421f3eU, 0x0f86ca89U, 0x92dbaa49U, 0x15914284U,
};
static const u32 T3[256] = {
0xbbd269baU, 0xe54da854U, 0xe2bc5e2fU, 0x25cde874U,
0xf751a653U, 0xd06bbbd3U, 0xd66fb9d2U, 0xb3299a4dU,
0xfd5da050U, 0xcf8a45acU, 0x090e078dU, 0xa5c663bfU,
0x3ddde070U, 0xf155a452U, 0x7b52299aU, 0xb52d984cU,
0x468fc9eaU, 0xc473b7d5U, 0x55663397U, 0xdc63bfd1U,
0xaacc6633U, 0xfb59a251U, 0xc771b65bU, 0xf3a251a6U,
0xfe5fa1deU, 0xad3d9048U, 0xd79a4da8U, 0x715e2f99U,
0xe04babdbU, 0xacc86432U, 0x95e673b7U, 0x32d7e5fcU,
0x70abdbe3U, 0x6342219eU, 0x417e3f91U, 0x7d562b9bU,
0x76afd9e2U, 0xbdd66bbbU, 0x9b198241U, 0x79a5dc6eU,
0xf9ae57a5U, 0x800b8bcbU, 0x67b1d66bU, 0x596e3795U,
0xe1be5fa1U, 0x10ebfbf3U, 0x81fe7fb1U, 0x0c080402U,
0x921785ccU, 0xa23795c4U, 0x4e743a1dU, 0x78502814U,
0xb02b9bc3U, 0x5791c663U, 0xe64fa9daU, 0xd369ba5dU,
0xdf61be5fU, 0xf257a5dcU, 0x13e9fa7dU, 0x941387cdU,
0x1fe1fe7fU, 0xc175b45aU, 0x75add86cU, 0xd56db85cU,
0x08fbf3f7U, 0xd4984c26U, 0x38dbe3ffU, 0x5493c7edU,
0x4a87cde8U, 0x694e279dU, 0x7fa1de6fU, 0x0302018eU,
0x56643219U, 0xe7ba5da0U, 0x1ae7fdf0U, 0x111e0f89U,
0x223c1e0fU, 0x121c0e07U, 0xc58643afU, 0x20cbebfbU,
0x30201008U, 0x7e542a15U, 0x2e341a0dU, 0x18100804U,
0x06040201U, 0x458dc864U, 0xf85ba3dfU, 0x29c5ec76U,
0x0bf9f279U, 0xf453a7ddU, 0x8ef47a3dU, 0x74582c16U,
0x82fc7e3fU, 0xb2dc6e37U, 0x73a9da6dU, 0x90e07038U,
0xb1de6fb9U, 0x37d1e673U, 0x4c83cfe9U, 0xbed46a35U,
0xe349aa55U, 0x3bd9e271U, 0x07f1f67bU, 0x0f0a058cU,
0x31d5e472U, 0x171a0d88U, 0x0efff1f6U, 0xfca8542aU,
0x84f87c3eU, 0xd965bc5eU, 0xd29c4e27U, 0x89058c46U,
0x2830180cU, 0x4389ca65U, 0x6dbdd068U, 0x5b99c261U,
0x0a0c0603U, 0xbc239fc1U, 0xef41ae57U, 0xce7fb1d6U,
0xec43afd9U, 0xcd7db058U, 0xea47add8U, 0x4985cc66U,
0xc87bb3d7U, 0x9ce8743aU, 0x8a078dc8U, 0x88f0783cU,
0x26cfe9faU, 0x53623196U, 0xf5a653a7U, 0x775a2d98U,
0x5297c5ecU, 0xb7da6db8U, 0xa83b93c7U, 0xc38241aeU,
0x6bb9d269U, 0xa731964bU, 0xdd964babU, 0xd19e4fa9U,
0x4f81ce67U, 0x3c28140aU, 0x8f018e47U, 0x16eff9f2U,
0x99ee77b5U, 0xcc884422U, 0x64b3d7e5U, 0x5e9fc1eeU,
0xa3c261beU, 0xfaac562bU, 0x213e1f81U, 0x6c482412U,
0x2d361b83U, 0x5a6c361bU, 0x24381c0eU, 0xca8c4623U,
0x04f3f7f5U, 0x83098a45U, 0xc6844221U, 0x9e1f81ceU,
0xab399249U, 0xe8b0582cU, 0x2cc3eff9U, 0x6ebfd1e6U,
0x93e271b6U, 0xf0a05028U, 0x725c2e17U, 0x2b321982U,
0x5c68341aU, 0x1d160b8bU, 0x3edfe1feU, 0x1b12098aU,
0x36241209U, 0x8c038fc9U, 0x35261387U, 0xb9259c4eU,
0x7ca3dfe1U, 0xe4b85c2eU, 0x62b7d5e4U, 0x7aa7dde0U,
0x408bcbebU, 0x477a3d90U, 0xffaa55a4U, 0x44783c1eU,
0x392e1785U, 0x5d9dc060U, 0x00000000U, 0xde944a25U,
0x02f7f5f4U, 0x1ce3fff1U, 0x5f6a3594U, 0x3a2c160bU,
0x68bbd3e7U, 0x23c9ea75U, 0x589bc3efU, 0xb8d06834U,
0xa6c46231U, 0xc277b5d4U, 0xda67bdd0U, 0x33221186U,
0x19e5fc7eU, 0xc98e47adU, 0x34d3e7fdU, 0xf6a45229U,
0xa0c06030U, 0x9aec763bU, 0x6546239fU, 0x2ac7edf8U,
0xae3f91c6U, 0x6a4c2613U, 0x14180c06U, 0x1e140a05U,
0xa43397c5U, 0x66442211U, 0x2fc1ee77U, 0x15edf87cU,
0x01f5f47aU, 0x0dfdf078U, 0xb4d86c36U, 0x4870381cU,
0x96e47239U, 0xcb79b259U, 0x50603018U, 0xe945ac56U,
0x8df67bb3U, 0x87fa7db0U, 0xd8904824U, 0xc0804020U,
0x8bf279b2U, 0x4b723992U, 0xedb65ba3U, 0xba279dc0U,
0x850d8844U, 0x5195c462U, 0x60402010U, 0x9fea75b4U,
0x3f2a1584U, 0x97118643U, 0x4d763b93U, 0xb62f99c2U,
0xa135944aU, 0xa9ce67bdU, 0x0506038fU, 0xeeb45a2dU,
0xafca65bcU, 0x6f4a259cU, 0x61b5d46aU, 0x9d1d8040U,
0x981b83cfU, 0xebb259a2U, 0x273a1d80U, 0xbf219e4fU,
0x427c3e1fU, 0x860f89caU, 0xdb9249aaU, 0x91158442U,
};
static const u32 T4[256] = {
0xbabababaU, 0x54545454U, 0x2f2f2f2fU, 0x74747474U,
0x53535353U, 0xd3d3d3d3U, 0xd2d2d2d2U, 0x4d4d4d4dU,
0x50505050U, 0xacacacacU, 0x8d8d8d8dU, 0xbfbfbfbfU,
0x70707070U, 0x52525252U, 0x9a9a9a9aU, 0x4c4c4c4cU,
0xeaeaeaeaU, 0xd5d5d5d5U, 0x97979797U, 0xd1d1d1d1U,
0x33333333U, 0x51515151U, 0x5b5b5b5bU, 0xa6a6a6a6U,
0xdedededeU, 0x48484848U, 0xa8a8a8a8U, 0x99999999U,
0xdbdbdbdbU, 0x32323232U, 0xb7b7b7b7U, 0xfcfcfcfcU,
0xe3e3e3e3U, 0x9e9e9e9eU, 0x91919191U, 0x9b9b9b9bU,
0xe2e2e2e2U, 0xbbbbbbbbU, 0x41414141U, 0x6e6e6e6eU,
0xa5a5a5a5U, 0xcbcbcbcbU, 0x6b6b6b6bU, 0x95959595U,
0xa1a1a1a1U, 0xf3f3f3f3U, 0xb1b1b1b1U, 0x02020202U,
0xccccccccU, 0xc4c4c4c4U, 0x1d1d1d1dU, 0x14141414U,
0xc3c3c3c3U, 0x63636363U, 0xdadadadaU, 0x5d5d5d5dU,
0x5f5f5f5fU, 0xdcdcdcdcU, 0x7d7d7d7dU, 0xcdcdcdcdU,
0x7f7f7f7fU, 0x5a5a5a5aU, 0x6c6c6c6cU, 0x5c5c5c5cU,
0xf7f7f7f7U, 0x26262626U, 0xffffffffU, 0xededededU,
0xe8e8e8e8U, 0x9d9d9d9dU, 0x6f6f6f6fU, 0x8e8e8e8eU,
0x19191919U, 0xa0a0a0a0U, 0xf0f0f0f0U, 0x89898989U,
0x0f0f0f0fU, 0x07070707U, 0xafafafafU, 0xfbfbfbfbU,
0x08080808U, 0x15151515U, 0x0d0d0d0dU, 0x04040404U,
0x01010101U, 0x64646464U, 0xdfdfdfdfU, 0x76767676U,
0x79797979U, 0xddddddddU, 0x3d3d3d3dU, 0x16161616U,
0x3f3f3f3fU, 0x37373737U, 0x6d6d6d6dU, 0x38383838U,
0xb9b9b9b9U, 0x73737373U, 0xe9e9e9e9U, 0x35353535U,
0x55555555U, 0x71717171U, 0x7b7b7b7bU, 0x8c8c8c8cU,
0x72727272U, 0x88888888U, 0xf6f6f6f6U, 0x2a2a2a2aU,
0x3e3e3e3eU, 0x5e5e5e5eU, 0x27272727U, 0x46464646U,
0x0c0c0c0cU, 0x65656565U, 0x68686868U, 0x61616161U,
0x03030303U, 0xc1c1c1c1U, 0x57575757U, 0xd6d6d6d6U,
0xd9d9d9d9U, 0x58585858U, 0xd8d8d8d8U, 0x66666666U,
0xd7d7d7d7U, 0x3a3a3a3aU, 0xc8c8c8c8U, 0x3c3c3c3cU,
0xfafafafaU, 0x96969696U, 0xa7a7a7a7U, 0x98989898U,
0xececececU, 0xb8b8b8b8U, 0xc7c7c7c7U, 0xaeaeaeaeU,
0x69696969U, 0x4b4b4b4bU, 0xababababU, 0xa9a9a9a9U,
0x67676767U, 0x0a0a0a0aU, 0x47474747U, 0xf2f2f2f2U,
0xb5b5b5b5U, 0x22222222U, 0xe5e5e5e5U, 0xeeeeeeeeU,
0xbebebebeU, 0x2b2b2b2bU, 0x81818181U, 0x12121212U,
0x83838383U, 0x1b1b1b1bU, 0x0e0e0e0eU, 0x23232323U,
0xf5f5f5f5U, 0x45454545U, 0x21212121U, 0xcecececeU,
0x49494949U, 0x2c2c2c2cU, 0xf9f9f9f9U, 0xe6e6e6e6U,
0xb6b6b6b6U, 0x28282828U, 0x17171717U, 0x82828282U,
0x1a1a1a1aU, 0x8b8b8b8bU, 0xfefefefeU, 0x8a8a8a8aU,
0x09090909U, 0xc9c9c9c9U, 0x87878787U, 0x4e4e4e4eU,
0xe1e1e1e1U, 0x2e2e2e2eU, 0xe4e4e4e4U, 0xe0e0e0e0U,
0xebebebebU, 0x90909090U, 0xa4a4a4a4U, 0x1e1e1e1eU,
0x85858585U, 0x60606060U, 0x00000000U, 0x25252525U,
0xf4f4f4f4U, 0xf1f1f1f1U, 0x94949494U, 0x0b0b0b0bU,
0xe7e7e7e7U, 0x75757575U, 0xefefefefU, 0x34343434U,
0x31313131U, 0xd4d4d4d4U, 0xd0d0d0d0U, 0x86868686U,
0x7e7e7e7eU, 0xadadadadU, 0xfdfdfdfdU, 0x29292929U,
0x30303030U, 0x3b3b3b3bU, 0x9f9f9f9fU, 0xf8f8f8f8U,
0xc6c6c6c6U, 0x13131313U, 0x06060606U, 0x05050505U,
0xc5c5c5c5U, 0x11111111U, 0x77777777U, 0x7c7c7c7cU,
0x7a7a7a7aU, 0x78787878U, 0x36363636U, 0x1c1c1c1cU,
0x39393939U, 0x59595959U, 0x18181818U, 0x56565656U,
0xb3b3b3b3U, 0xb0b0b0b0U, 0x24242424U, 0x20202020U,
0xb2b2b2b2U, 0x92929292U, 0xa3a3a3a3U, 0xc0c0c0c0U,
0x44444444U, 0x62626262U, 0x10101010U, 0xb4b4b4b4U,
0x84848484U, 0x43434343U, 0x93939393U, 0xc2c2c2c2U,
0x4a4a4a4aU, 0xbdbdbdbdU, 0x8f8f8f8fU, 0x2d2d2d2dU,
0xbcbcbcbcU, 0x9c9c9c9cU, 0x6a6a6a6aU, 0x40404040U,
0xcfcfcfcfU, 0xa2a2a2a2U, 0x80808080U, 0x4f4f4f4fU,
0x1f1f1f1fU, 0xcacacacaU, 0xaaaaaaaaU, 0x42424242U,
};
static const u32 T5[256] = {
0x00000000U, 0x01020608U, 0x02040c10U, 0x03060a18U,
0x04081820U, 0x050a1e28U, 0x060c1430U, 0x070e1238U,
0x08103040U, 0x09123648U, 0x0a143c50U, 0x0b163a58U,
0x0c182860U, 0x0d1a2e68U, 0x0e1c2470U, 0x0f1e2278U,
0x10206080U, 0x11226688U, 0x12246c90U, 0x13266a98U,
0x142878a0U, 0x152a7ea8U, 0x162c74b0U, 0x172e72b8U,
0x183050c0U, 0x193256c8U, 0x1a345cd0U, 0x1b365ad8U,
0x1c3848e0U, 0x1d3a4ee8U, 0x1e3c44f0U, 0x1f3e42f8U,
0x2040c01dU, 0x2142c615U, 0x2244cc0dU, 0x2346ca05U,
0x2448d83dU, 0x254ade35U, 0x264cd42dU, 0x274ed225U,
0x2850f05dU, 0x2952f655U, 0x2a54fc4dU, 0x2b56fa45U,
0x2c58e87dU, 0x2d5aee75U, 0x2e5ce46dU, 0x2f5ee265U,
0x3060a09dU, 0x3162a695U, 0x3264ac8dU, 0x3366aa85U,
0x3468b8bdU, 0x356abeb5U, 0x366cb4adU, 0x376eb2a5U,
0x387090ddU, 0x397296d5U, 0x3a749ccdU, 0x3b769ac5U,
0x3c7888fdU, 0x3d7a8ef5U, 0x3e7c84edU, 0x3f7e82e5U,
0x40809d3aU, 0x41829b32U, 0x4284912aU, 0x43869722U,
0x4488851aU, 0x458a8312U, 0x468c890aU, 0x478e8f02U,
0x4890ad7aU, 0x4992ab72U, 0x4a94a16aU, 0x4b96a762U,
0x4c98b55aU, 0x4d9ab352U, 0x4e9cb94aU, 0x4f9ebf42U,
0x50a0fdbaU, 0x51a2fbb2U, 0x52a4f1aaU, 0x53a6f7a2U,
0x54a8e59aU, 0x55aae392U, 0x56ace98aU, 0x57aeef82U,
0x58b0cdfaU, 0x59b2cbf2U, 0x5ab4c1eaU, 0x5bb6c7e2U,
0x5cb8d5daU, 0x5dbad3d2U, 0x5ebcd9caU, 0x5fbedfc2U,
0x60c05d27U, 0x61c25b2fU, 0x62c45137U, 0x63c6573fU,
0x64c84507U, 0x65ca430fU, 0x66cc4917U, 0x67ce4f1fU,
0x68d06d67U, 0x69d26b6fU, 0x6ad46177U, 0x6bd6677fU,
0x6cd87547U, 0x6dda734fU, 0x6edc7957U, 0x6fde7f5fU,
0x70e03da7U, 0x71e23bafU, 0x72e431b7U, 0x73e637bfU,
0x74e82587U, 0x75ea238fU, 0x76ec2997U, 0x77ee2f9fU,
0x78f00de7U, 0x79f20befU, 0x7af401f7U, 0x7bf607ffU,
0x7cf815c7U, 0x7dfa13cfU, 0x7efc19d7U, 0x7ffe1fdfU,
0x801d2774U, 0x811f217cU, 0x82192b64U, 0x831b2d6cU,
0x84153f54U, 0x8517395cU, 0x86113344U, 0x8713354cU,
0x880d1734U, 0x890f113cU, 0x8a091b24U, 0x8b0b1d2cU,
0x8c050f14U, 0x8d07091cU, 0x8e010304U, 0x8f03050cU,
0x903d47f4U, 0x913f41fcU, 0x92394be4U, 0x933b4decU,
0x94355fd4U, 0x953759dcU, 0x963153c4U, 0x973355ccU,
0x982d77b4U, 0x992f71bcU, 0x9a297ba4U, 0x9b2b7dacU,
0x9c256f94U, 0x9d27699cU, 0x9e216384U, 0x9f23658cU,
0xa05de769U, 0xa15fe161U, 0xa259eb79U, 0xa35bed71U,
0xa455ff49U, 0xa557f941U, 0xa651f359U, 0xa753f551U,
0xa84dd729U, 0xa94fd121U, 0xaa49db39U, 0xab4bdd31U,
0xac45cf09U, 0xad47c901U, 0xae41c319U, 0xaf43c511U,
0xb07d87e9U, 0xb17f81e1U, 0xb2798bf9U, 0xb37b8df1U,
0xb4759fc9U, 0xb57799c1U, 0xb67193d9U, 0xb77395d1U,
0xb86db7a9U, 0xb96fb1a1U, 0xba69bbb9U, 0xbb6bbdb1U,
0xbc65af89U, 0xbd67a981U, 0xbe61a399U, 0xbf63a591U,
0xc09dba4eU, 0xc19fbc46U, 0xc299b65eU, 0xc39bb056U,
0xc495a26eU, 0xc597a466U, 0xc691ae7eU, 0xc793a876U,
0xc88d8a0eU, 0xc98f8c06U, 0xca89861eU, 0xcb8b8016U,
0xcc85922eU, 0xcd879426U, 0xce819e3eU, 0xcf839836U,
0xd0bddaceU, 0xd1bfdcc6U, 0xd2b9d6deU, 0xd3bbd0d6U,
0xd4b5c2eeU, 0xd5b7c4e6U, 0xd6b1cefeU, 0xd7b3c8f6U,
0xd8adea8eU, 0xd9afec86U, 0xdaa9e69eU, 0xdbabe096U,
0xdca5f2aeU, 0xdda7f4a6U, 0xdea1febeU, 0xdfa3f8b6U,
0xe0dd7a53U, 0xe1df7c5bU, 0xe2d97643U, 0xe3db704bU,
0xe4d56273U, 0xe5d7647bU, 0xe6d16e63U, 0xe7d3686bU,
0xe8cd4a13U, 0xe9cf4c1bU, 0xeac94603U, 0xebcb400bU,
0xecc55233U, 0xedc7543bU, 0xeec15e23U, 0xefc3582bU,
0xf0fd1ad3U, 0xf1ff1cdbU, 0xf2f916c3U, 0xf3fb10cbU,
0xf4f502f3U, 0xf5f704fbU, 0xf6f10ee3U, 0xf7f308ebU,
0xf8ed2a93U, 0xf9ef2c9bU, 0xfae92683U, 0xfbeb208bU,
0xfce532b3U, 0xfde734bbU, 0xfee13ea3U, 0xffe338abU,
};
static const u32 rc[] = {
0xba542f74U, 0x53d3d24dU, 0x50ac8dbfU, 0x70529a4cU,
0xead597d1U, 0x33515ba6U, 0xde48a899U, 0xdb32b7fcU,
0xe39e919bU, 0xe2bb416eU, 0xa5cb6b95U, 0xa1f3b102U,
0xccc41d14U, 0xc363da5dU, 0x5fdc7dcdU, 0x7f5a6c5cU,
0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U,
};
static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
switch (key_len) {
case 16: case 20: case 24: case 28:
case 32: case 36: case 40:
break;
default:
return -EINVAL;
}
ctx->key_len = key_len * 8;
N = ctx->key_len >> 5;
ctx->R = R = 8 + N;
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
kappa[i] = be32_to_cpu(key[i]);
/*
* generate R + 1 round keys:
*/
for (r = 0; r <= R; r++) {
u32 K0, K1, K2, K3;
/*
* generate r-th round key K^r:
*/
K0 = T4[(kappa[N - 1] >> 24) ];
K1 = T4[(kappa[N - 1] >> 16) & 0xff];
K2 = T4[(kappa[N - 1] >> 8) & 0xff];
K3 = T4[(kappa[N - 1] ) & 0xff];
for (i = N - 2; i >= 0; i--) {
K0 = T4[(kappa[i] >> 24) ] ^
(T5[(K0 >> 24) ] & 0xff000000U) ^
(T5[(K0 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K0 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K0 ) & 0xff] & 0x000000ffU);
K1 = T4[(kappa[i] >> 16) & 0xff] ^
(T5[(K1 >> 24) ] & 0xff000000U) ^
(T5[(K1 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K1 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K1 ) & 0xff] & 0x000000ffU);
K2 = T4[(kappa[i] >> 8) & 0xff] ^
(T5[(K2 >> 24) ] & 0xff000000U) ^
(T5[(K2 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K2 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K2 ) & 0xff] & 0x000000ffU);
K3 = T4[(kappa[i] ) & 0xff] ^
(T5[(K3 >> 24) ] & 0xff000000U) ^
(T5[(K3 >> 16) & 0xff] & 0x00ff0000U) ^
(T5[(K3 >> 8) & 0xff] & 0x0000ff00U) ^
(T5[(K3 ) & 0xff] & 0x000000ffU);
}
ctx->E[r][0] = K0;
ctx->E[r][1] = K1;
ctx->E[r][2] = K2;
ctx->E[r][3] = K3;
/*
* compute kappa^{r+1} from kappa^r:
*/
if (r == R)
break;
for (i = 0; i < N; i++) {
int j = i;
inter[i] = T0[(kappa[j--] >> 24) ];
if (j < 0)
j = N - 1;
inter[i] ^= T1[(kappa[j--] >> 16) & 0xff];
if (j < 0)
j = N - 1;
inter[i] ^= T2[(kappa[j--] >> 8) & 0xff];
if (j < 0)
j = N - 1;
inter[i] ^= T3[(kappa[j ] ) & 0xff];
}
kappa[0] = inter[0] ^ rc[r];
for (i = 1; i < N; i++)
kappa[i] = inter[i];
}
/*
* generate inverse key schedule: K'^0 = K^R, K'^R =
* K^0, K'^r = theta(K^{R-r}):
*/
for (i = 0; i < 4; i++) {
ctx->D[0][i] = ctx->E[R][i];
ctx->D[R][i] = ctx->E[0][i];
}
for (r = 1; r < R; r++) {
for (i = 0; i < 4; i++) {
u32 v = ctx->E[R - r][i];
ctx->D[r][i] =
T0[T4[(v >> 24) ] & 0xff] ^
T1[T4[(v >> 16) & 0xff] & 0xff] ^
T2[T4[(v >> 8) & 0xff] & 0xff] ^
T3[T4[(v ) & 0xff] & 0xff];
}
}
return 0;
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
u8 *ciphertext, const u8 *plaintext, const int R)
{
const __be32 *src = (const __be32 *)plaintext;
__be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
/*
* map plaintext block to cipher state (mu)
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
*/
for (r = 1; r < R; r++) {
inter[0] =
T0[(state[0] >> 24) ] ^
T1[(state[1] >> 24) ] ^
T2[(state[2] >> 24) ] ^
T3[(state[3] >> 24) ] ^
roundKey[r][0];
inter[1] =
T0[(state[0] >> 16) & 0xff] ^
T1[(state[1] >> 16) & 0xff] ^
T2[(state[2] >> 16) & 0xff] ^
T3[(state[3] >> 16) & 0xff] ^
roundKey[r][1];
inter[2] =
T0[(state[0] >> 8) & 0xff] ^
T1[(state[1] >> 8) & 0xff] ^
T2[(state[2] >> 8) & 0xff] ^
T3[(state[3] >> 8) & 0xff] ^
roundKey[r][2];
inter[3] =
T0[(state[0] ) & 0xff] ^
T1[(state[1] ) & 0xff] ^
T2[(state[2] ) & 0xff] ^
T3[(state[3] ) & 0xff] ^
roundKey[r][3];
state[0] = inter[0];
state[1] = inter[1];
state[2] = inter[2];
state[3] = inter[3];
}
/*
* last round:
*/
inter[0] =
(T0[(state[0] >> 24) ] & 0xff000000U) ^
(T1[(state[1] >> 24) ] & 0x00ff0000U) ^
(T2[(state[2] >> 24) ] & 0x0000ff00U) ^
(T3[(state[3] >> 24) ] & 0x000000ffU) ^
roundKey[R][0];
inter[1] =
(T0[(state[0] >> 16) & 0xff] & 0xff000000U) ^
(T1[(state[1] >> 16) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] >> 16) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] >> 16) & 0xff] & 0x000000ffU) ^
roundKey[R][1];
inter[2] =
(T0[(state[0] >> 8) & 0xff] & 0xff000000U) ^
(T1[(state[1] >> 8) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] >> 8) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] >> 8) & 0xff] & 0x000000ffU) ^
roundKey[R][2];
inter[3] =
(T0[(state[0] ) & 0xff] & 0xff000000U) ^
(T1[(state[1] ) & 0xff] & 0x00ff0000U) ^
(T2[(state[2] ) & 0xff] & 0x0000ff00U) ^
(T3[(state[3] ) & 0xff] & 0x000000ffU) ^
roundKey[R][3];
/*
* map cipher state to ciphertext block (mu^{-1}):
*/
for (i = 0; i < 4; i++)
dst[i] = cpu_to_be32(inter[i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->E, dst, src, ctx->R);
}
static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
anubis_crypt(ctx->D, dst, src, ctx->R);
}
static struct crypto_alg anubis_alg = {
.cra_name = "anubis",
.cra_driver_name = "anubis-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
.cia_max_keysize = ANUBIS_MAX_KEY_SIZE,
.cia_setkey = anubis_setkey,
.cia_encrypt = anubis_encrypt,
.cia_decrypt = anubis_decrypt } }
};
static int __init anubis_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&anubis_alg);
return ret;
}
static void __exit anubis_mod_fini(void)
{
crypto_unregister_alg(&anubis_alg);
}
subsys_initcall(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
MODULE_ALIAS_CRYPTO("anubis");
| linux-master | crypto/anubis.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/lzo.h>
#include <crypto/internal/scompress.h>
struct lzorle_ctx {
void *lzorle_comp_mem;
};
static void *lzorle_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int lzorle_init(struct crypto_tfm *tfm)
{
struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL);
if (IS_ERR(ctx->lzorle_comp_mem))
return -ENOMEM;
return 0;
}
static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
kvfree(ctx);
}
static void lzorle_exit(struct crypto_tfm *tfm)
{
struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
lzorle_free_ctx(NULL, ctx->lzorle_comp_mem);
}
static int __lzorle_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm);
return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem);
}
static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzorle_compress(src, slen, dst, dlen, ctx);
}
static int __lzorle_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
return __lzorle_decompress(src, slen, dst, dlen);
}
static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzorle_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
.cra_name = "lzo-rle",
.cra_driver_name = "lzo-rle-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lzorle_ctx),
.cra_module = THIS_MODULE,
.cra_init = lzorle_init,
.cra_exit = lzorle_exit,
.cra_u = { .compress = {
.coa_compress = lzorle_compress,
.coa_decompress = lzorle_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = lzorle_alloc_ctx,
.free_ctx = lzorle_free_ctx,
.compress = lzorle_scompress,
.decompress = lzorle_sdecompress,
.base = {
.cra_name = "lzo-rle",
.cra_driver_name = "lzo-rle-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init lzorle_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg);
return ret;
}
return ret;
}
static void __exit lzorle_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO-RLE Compression Algorithm");
MODULE_ALIAS_CRYPTO("lzo-rle");
| linux-master | crypto/lzo-rle.c |
// SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0)
/*
* Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b
* reference implementation, but it has been heavily modified for use in the
* kernel. The reference implementation was:
*
* Copyright 2012, Samuel Neves <[email protected]>. You may use this under
* the terms of the CC0, the OpenSSL Licence, or the Apache Public License
* 2.0, at your option. The terms of these licenses can be found at:
*
* - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
* - OpenSSL license : https://www.openssl.org/source/license.html
* - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
*
* More information about BLAKE2 can be found at https://blake2.net.
*/
#include <asm/unaligned.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <crypto/internal/blake2b.h>
#include <crypto/internal/hash.h>
static const u8 blake2b_sigma[12][16] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 },
{ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 },
{ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 },
{ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 },
{ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 },
{ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 },
{ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 },
{ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 },
{ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 },
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
{ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }
};
static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc)
{
S->t[0] += inc;
S->t[1] += (S->t[0] < inc);
}
#define G(r,i,a,b,c,d) \
do { \
a = a + b + m[blake2b_sigma[r][2*i+0]]; \
d = ror64(d ^ a, 32); \
c = c + d; \
b = ror64(b ^ c, 24); \
a = a + b + m[blake2b_sigma[r][2*i+1]]; \
d = ror64(d ^ a, 16); \
c = c + d; \
b = ror64(b ^ c, 63); \
} while (0)
#define ROUND(r) \
do { \
G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \
G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \
G(r,2,v[ 2],v[ 6],v[10],v[14]); \
G(r,3,v[ 3],v[ 7],v[11],v[15]); \
G(r,4,v[ 0],v[ 5],v[10],v[15]); \
G(r,5,v[ 1],v[ 6],v[11],v[12]); \
G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \
G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \
} while (0)
static void blake2b_compress_one_generic(struct blake2b_state *S,
const u8 block[BLAKE2B_BLOCK_SIZE])
{
u64 m[16];
u64 v[16];
size_t i;
for (i = 0; i < 16; ++i)
m[i] = get_unaligned_le64(block + i * sizeof(m[i]));
for (i = 0; i < 8; ++i)
v[i] = S->h[i];
v[ 8] = BLAKE2B_IV0;
v[ 9] = BLAKE2B_IV1;
v[10] = BLAKE2B_IV2;
v[11] = BLAKE2B_IV3;
v[12] = BLAKE2B_IV4 ^ S->t[0];
v[13] = BLAKE2B_IV5 ^ S->t[1];
v[14] = BLAKE2B_IV6 ^ S->f[0];
v[15] = BLAKE2B_IV7 ^ S->f[1];
ROUND(0);
ROUND(1);
ROUND(2);
ROUND(3);
ROUND(4);
ROUND(5);
ROUND(6);
ROUND(7);
ROUND(8);
ROUND(9);
ROUND(10);
ROUND(11);
#ifdef CONFIG_CC_IS_CLANG
#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */
#endif
for (i = 0; i < 8; ++i)
S->h[i] = S->h[i] ^ v[i] ^ v[i + 8];
}
#undef G
#undef ROUND
void blake2b_compress_generic(struct blake2b_state *state,
const u8 *block, size_t nblocks, u32 inc)
{
do {
blake2b_increment_counter(state, inc);
blake2b_compress_one_generic(state, block);
block += BLAKE2B_BLOCK_SIZE;
} while (--nblocks);
}
EXPORT_SYMBOL(blake2b_compress_generic);
static int crypto_blake2b_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic);
}
static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out)
{
return crypto_blake2b_final(desc, out, blake2b_compress_generic);
}
#define BLAKE2B_ALG(name, driver_name, digest_size) \
{ \
.base.cra_name = name, \
.base.cra_driver_name = driver_name, \
.base.cra_priority = 100, \
.base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \
.base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \
.base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \
.base.cra_module = THIS_MODULE, \
.digestsize = digest_size, \
.setkey = crypto_blake2b_setkey, \
.init = crypto_blake2b_init, \
.update = crypto_blake2b_update_generic, \
.final = crypto_blake2b_final_generic, \
.descsize = sizeof(struct blake2b_state), \
}
static struct shash_alg blake2b_algs[] = {
BLAKE2B_ALG("blake2b-160", "blake2b-160-generic",
BLAKE2B_160_HASH_SIZE),
BLAKE2B_ALG("blake2b-256", "blake2b-256-generic",
BLAKE2B_256_HASH_SIZE),
BLAKE2B_ALG("blake2b-384", "blake2b-384-generic",
BLAKE2B_384_HASH_SIZE),
BLAKE2B_ALG("blake2b-512", "blake2b-512-generic",
BLAKE2B_512_HASH_SIZE),
};
static int __init blake2b_mod_init(void)
{
return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
}
static void __exit blake2b_mod_fini(void)
{
crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs));
}
subsys_initcall(blake2b_mod_init);
module_exit(blake2b_mod_fini);
MODULE_AUTHOR("David Sterba <[email protected]>");
MODULE_DESCRIPTION("BLAKE2b generic implementation");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("blake2b-160");
MODULE_ALIAS_CRYPTO("blake2b-160-generic");
MODULE_ALIAS_CRYPTO("blake2b-256");
MODULE_ALIAS_CRYPTO("blake2b-256-generic");
MODULE_ALIAS_CRYPTO("blake2b-384");
MODULE_ALIAS_CRYPTO("blake2b-384-generic");
MODULE_ALIAS_CRYPTO("blake2b-512");
MODULE_ALIAS_CRYPTO("blake2b-512-generic");
| linux-master | crypto/blake2b_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synchronous Compression operations
*
* Copyright 2015 LG Electronics Inc.
* Copyright (c) 2016, Intel Corporation
* Author: Giovanni Cabiddu <[email protected]>
*/
#include <crypto/internal/acompress.h>
#include <crypto/internal/scompress.h>
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include "compress.h"
struct scomp_scratch {
spinlock_t lock;
void *src;
void *dst;
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
};
static const struct crypto_type crypto_scomp_type;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
static int __maybe_unused crypto_scomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
memset(&rscomp, 0, sizeof(rscomp));
strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(rscomp), &rscomp);
}
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_puts(m, "type : scomp\n");
}
static void crypto_scomp_free_scratches(void)
{
struct scomp_scratch *scratch;
int i;
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);
vfree(scratch->src);
vfree(scratch->dst);
scratch->src = NULL;
scratch->dst = NULL;
}
}
static int crypto_scomp_alloc_scratches(void)
{
struct scomp_scratch *scratch;
int i;
for_each_possible_cpu(i) {
void *mem;
scratch = per_cpu_ptr(&scomp_scratch, i);
mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!mem)
goto error;
scratch->src = mem;
mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!mem)
goto error;
scratch->dst = mem;
}
return 0;
error:
crypto_scomp_free_scratches();
return -ENOMEM;
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
int ret = 0;
mutex_lock(&scomp_lock);
if (!scomp_scratch_users++)
ret = crypto_scomp_alloc_scratches();
mutex_unlock(&scomp_lock);
return ret;
}
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch;
int ret;
if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
return -EINVAL;
if (req->dst && !req->dlen)
return -EINVAL;
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;
scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock);
scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
if (dir)
ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
scratch->dst, &req->dlen, *ctx);
else
ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
scratch->dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
if (!req->dst) {
ret = -ENOMEM;
goto out;
}
}
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1);
}
out:
spin_unlock(&scratch->lock);
return ret;
}
static int scomp_acomp_compress(struct acomp_req *req)
{
return scomp_acomp_comp_decomp(req, 1);
}
static int scomp_acomp_decompress(struct acomp_req *req)
{
return scomp_acomp_comp_decomp(req, 0);
}
static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
{
struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
crypto_free_scomp(*ctx);
mutex_lock(&scomp_lock);
if (!--scomp_scratch_users)
crypto_scomp_free_scratches();
mutex_unlock(&scomp_lock);
}
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
struct crypto_scomp *scomp;
if (!crypto_mod_get(calg))
return -EAGAIN;
scomp = crypto_create_tfm(calg, &crypto_scomp_type);
if (IS_ERR(scomp)) {
crypto_mod_put(calg);
return PTR_ERR(scomp);
}
*ctx = scomp;
tfm->exit = crypto_exit_scomp_ops_async;
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0;
}
struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
{
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void *ctx;
ctx = crypto_scomp_alloc_ctx(scomp);
if (IS_ERR(ctx)) {
kfree(req);
return NULL;
}
*req->__ctx = ctx;
return req;
}
void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
{
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void *ctx = *req->__ctx;
if (ctx)
crypto_scomp_free_ctx(scomp, ctx);
}
static const struct crypto_type crypto_scomp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_scomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_scomp_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_scomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_acomp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SCOMPRESS,
.tfmsize = offsetof(struct crypto_scomp, base),
};
int crypto_register_scomp(struct scomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_scomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_scomp);
void crypto_unregister_scomp(struct scomp_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
int crypto_register_scomps(struct scomp_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_scomp(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_scomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_scomps);
void crypto_unregister_scomps(struct scomp_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_scomp(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous compression type");
| linux-master | crypto/scompress.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
*
* Copyright 2018 Google LLC
*/
/*
* "NHPoly1305" is the main component of Adiantum hashing.
* Specifically, it is the calculation
*
* H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
*
* from the procedure in section 6.4 of the Adiantum paper [1]. It is an
* ε-almost-∆-universal (ε-∆U) hash function for equal-length inputs over
* Z/(2^{128}Z), where the "∆" operation is addition. It hashes 1024-byte
* chunks of the input with the NH hash function [2], reducing the input length
* by 32x. The resulting NH digests are evaluated as a polynomial in
* GF(2^{130}-5), like in the Poly1305 MAC [3]. Note that the polynomial
* evaluation by itself would suffice to achieve the ε-∆U property; NH is used
* for performance since it's over twice as fast as Poly1305.
*
* This is *not* a cryptographic hash function; do not use it as such!
*
* [1] Adiantum: length-preserving encryption for entry-level processors
* (https://eprint.iacr.org/2018/720.pdf)
* [2] UMAC: Fast and Secure Message Authentication
* (https://fastcrypto.org/umac/umac_proc.pdf)
* [3] The Poly1305-AES message-authentication code
* (https://cr.yp.to/mac/poly1305-20050329.pdf)
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/nhpoly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
static void nh_generic(const u32 *key, const u8 *message, size_t message_len,
__le64 hash[NH_NUM_PASSES])
{
u64 sums[4] = { 0, 0, 0, 0 };
BUILD_BUG_ON(NH_PAIR_STRIDE != 2);
BUILD_BUG_ON(NH_NUM_PASSES != 4);
while (message_len) {
u32 m0 = get_unaligned_le32(message + 0);
u32 m1 = get_unaligned_le32(message + 4);
u32 m2 = get_unaligned_le32(message + 8);
u32 m3 = get_unaligned_le32(message + 12);
sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]);
sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]);
sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]);
sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]);
sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]);
sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]);
sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]);
sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]);
key += NH_MESSAGE_UNIT / sizeof(key[0]);
message += NH_MESSAGE_UNIT;
message_len -= NH_MESSAGE_UNIT;
}
hash[0] = cpu_to_le64(sums[0]);
hash[1] = cpu_to_le64(sums[1]);
hash[2] = cpu_to_le64(sums[2]);
hash[3] = cpu_to_le64(sums[3]);
}
/* Pass the next NH hash value through Poly1305 */
static void process_nh_hash_value(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key)
{
BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1);
}
/*
* Feed the next portion of the source data, as a whole number of 16-byte
* "NH message units", through NH and Poly1305. Each NH hash is taken over
* 1024 bytes, except possibly the final one which is taken over a multiple of
* 16 bytes up to 1024. Also, in the case where data is passed in misaligned
* chunks, we combine partial hashes; the end result is the same either way.
*/
static void nhpoly1305_units(struct nhpoly1305_state *state,
const struct nhpoly1305_key *key,
const u8 *src, unsigned int srclen, nh_t nh_fn)
{
do {
unsigned int bytes;
if (state->nh_remaining == 0) {
/* Starting a new NH message */
bytes = min_t(unsigned int, srclen, NH_MESSAGE_BYTES);
nh_fn(key->nh_key, src, bytes, state->nh_hash);
state->nh_remaining = NH_MESSAGE_BYTES - bytes;
} else {
/* Continuing a previous NH message */
__le64 tmp_hash[NH_NUM_PASSES];
unsigned int pos;
int i;
pos = NH_MESSAGE_BYTES - state->nh_remaining;
bytes = min(srclen, state->nh_remaining);
nh_fn(&key->nh_key[pos / 4], src, bytes, tmp_hash);
for (i = 0; i < NH_NUM_PASSES; i++)
le64_add_cpu(&state->nh_hash[i],
le64_to_cpu(tmp_hash[i]));
state->nh_remaining -= bytes;
}
if (state->nh_remaining == 0)
process_nh_hash_value(state, key);
src += bytes;
srclen -= bytes;
} while (srclen);
}
int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct nhpoly1305_key *ctx = crypto_shash_ctx(tfm);
int i;
if (keylen != NHPOLY1305_KEY_SIZE)
return -EINVAL;
poly1305_core_setkey(&ctx->poly_key, key);
key += POLY1305_BLOCK_SIZE;
for (i = 0; i < NH_KEY_WORDS; i++)
ctx->nh_key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_setkey);
int crypto_nhpoly1305_init(struct shash_desc *desc)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
poly1305_core_init(&state->poly_state);
state->buflen = 0;
state->nh_remaining = 0;
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_init);
int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
const u8 *src, unsigned int srclen,
nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
unsigned int bytes;
if (state->buflen) {
bytes = min(srclen, (int)NH_MESSAGE_UNIT - state->buflen);
memcpy(&state->buffer[state->buflen], src, bytes);
state->buflen += bytes;
if (state->buflen < NH_MESSAGE_UNIT)
return 0;
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
state->buflen = 0;
src += bytes;
srclen -= bytes;
}
if (srclen >= NH_MESSAGE_UNIT) {
bytes = round_down(srclen, NH_MESSAGE_UNIT);
nhpoly1305_units(state, key, src, bytes, nh_fn);
src += bytes;
srclen -= bytes;
}
if (srclen) {
memcpy(state->buffer, src, srclen);
state->buflen = srclen;
}
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_update_helper);
int crypto_nhpoly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
return crypto_nhpoly1305_update_helper(desc, src, srclen, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_update);
int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn)
{
struct nhpoly1305_state *state = shash_desc_ctx(desc);
const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
if (state->buflen) {
memset(&state->buffer[state->buflen], 0,
NH_MESSAGE_UNIT - state->buflen);
nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
nh_fn);
}
if (state->nh_remaining)
process_nh_hash_value(state, key);
poly1305_core_emit(&state->poly_state, NULL, dst);
return 0;
}
EXPORT_SYMBOL(crypto_nhpoly1305_final_helper);
int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst)
{
return crypto_nhpoly1305_final_helper(desc, dst, nh_generic);
}
EXPORT_SYMBOL(crypto_nhpoly1305_final);
static struct shash_alg nhpoly1305_alg = {
.base.cra_name = "nhpoly1305",
.base.cra_driver_name = "nhpoly1305-generic",
.base.cra_priority = 100,
.base.cra_ctxsize = sizeof(struct nhpoly1305_key),
.base.cra_module = THIS_MODULE,
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_nhpoly1305_init,
.update = crypto_nhpoly1305_update,
.final = crypto_nhpoly1305_final,
.setkey = crypto_nhpoly1305_setkey,
.descsize = sizeof(struct nhpoly1305_state),
};
static int __init nhpoly1305_mod_init(void)
{
return crypto_register_shash(&nhpoly1305_alg);
}
static void __exit nhpoly1305_mod_exit(void)
{
crypto_unregister_shash(&nhpoly1305_alg);
}
subsys_initcall(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <[email protected]>");
MODULE_ALIAS_CRYPTO("nhpoly1305");
MODULE_ALIAS_CRYPTO("nhpoly1305-generic");
| linux-master | crypto/nhpoly1305.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Adiantum length-preserving encryption mode
*
* Copyright 2018 Google LLC
*/
/*
* Adiantum is a tweakable, length-preserving encryption mode designed for fast
* and secure disk encryption, especially on CPUs without dedicated crypto
* instructions. Adiantum encrypts each sector using the XChaCha12 stream
* cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
* NH and Poly1305, and an invocation of the AES-256 block cipher on a single
* 16-byte block. See the paper for details:
*
* Adiantum: length-preserving encryption for entry-level processors
* (https://eprint.iacr.org/2018/720.pdf)
*
* For flexibility, this implementation also allows other ciphers:
*
* - Stream cipher: XChaCha12 or XChaCha20
* - Block cipher: any with a 128-bit block size and 256-bit key
*
* This implementation doesn't currently allow other ε-∆U hash functions, i.e.
* HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
* but still provably as secure, and also the ε-∆U hash function of HBSH is
* formally defined to take two inputs (tweak, message) which makes it difficult
* to wrap with the crypto_shash API. Rather, some details need to be handled
* here. Nevertheless, if needed in the future, support for other ε-∆U hash
* functions could be added here.
*/
#include <crypto/b128ops.h>
#include <crypto/chacha.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/skcipher.h>
#include <crypto/nhpoly1305.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
/*
* Size of right-hand part of input data, in bytes; also the size of the block
* cipher's block size and the hash function's output.
*/
#define BLOCKCIPHER_BLOCK_SIZE 16
/* Size of the block cipher key (K_E) in bytes */
#define BLOCKCIPHER_KEY_SIZE 32
/* Size of the hash key (K_H) in bytes */
#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
/*
* The specification allows variable-length tweaks, but Linux's crypto API
* currently only allows algorithms to support a single length. The "natural"
* tweak length for Adiantum is 16, since that fits into one Poly1305 block for
* the best performance. But longer tweaks are useful for fscrypt, to avoid
* needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
*/
#define TWEAK_SIZE 32
struct adiantum_instance_ctx {
struct crypto_skcipher_spawn streamcipher_spawn;
struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_shash_spawn hash_spawn;
};
struct adiantum_tfm_ctx {
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
struct poly1305_core_key header_hash_key;
};
struct adiantum_request_ctx {
/*
* Buffer for right-hand part of data, i.e.
*
* P_L => P_M => C_M => C_R when encrypting, or
* C_R => C_M => P_M => P_L when decrypting.
*
* Also used to build the IV for the stream cipher.
*/
union {
u8 bytes[XCHACHA_IV_SIZE];
__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
le128 bignum; /* interpret as element of Z/(2^{128}Z) */
} rbuf;
bool enc; /* true if encrypting, false if decrypting */
/*
* The result of the Poly1305 ε-∆U hash function applied to
* (bulk length, tweak)
*/
le128 header_hash;
/* Sub-requests, must be last */
union {
struct shash_desc hash_desc;
struct skcipher_request streamcipher_req;
} u;
};
/*
* Given the XChaCha stream key K_S, derive the block cipher key K_E and the
* hash key K_H as follows:
*
* K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
*
* Note that this denotes using bits from the XChaCha keystream, which here we
* get indirectly by encrypting a buffer containing all 0's.
*/
static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct {
u8 iv[XCHACHA_IV_SIZE];
u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
struct scatterlist sg;
struct crypto_wait wait;
struct skcipher_request req; /* must be last */
} *data;
u8 *keyp;
int err;
/* Set the stream cipher key (K_S) */
crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->streamcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
if (err)
return err;
/* Derive the subkeys */
data = kzalloc(sizeof(*data) +
crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->iv[0] = 1;
sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
crypto_init_wait(&data->wait);
skcipher_request_set_tfm(&data->req, tctx->streamcipher);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data->wait);
skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
sizeof(data->derived_keys), data->iv);
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
if (err)
goto out;
keyp = data->derived_keys;
/* Set the block cipher key (K_E) */
crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->blockcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tctx->blockcipher, keyp,
BLOCKCIPHER_KEY_SIZE);
if (err)
goto out;
keyp += BLOCKCIPHER_KEY_SIZE;
/* Set the hash key (K_H) */
poly1305_core_setkey(&tctx->header_hash_key, keyp);
keyp += POLY1305_BLOCK_SIZE;
crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
keyp += NHPOLY1305_KEY_SIZE;
WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
out:
kfree_sensitive(data);
return err;
}
/* Addition in Z/(2^{128}Z) */
static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
{
u64 x = le64_to_cpu(v1->b);
u64 y = le64_to_cpu(v2->b);
r->b = cpu_to_le64(x + y);
r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
(x + y < x));
}
/* Subtraction in Z/(2^{128}Z) */
static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
{
u64 x = le64_to_cpu(v1->b);
u64 y = le64_to_cpu(v2->b);
r->b = cpu_to_le64(x - y);
r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
(x - y > x));
}
/*
* Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
* result to rctx->header_hash. This is the calculation
*
* H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
*
* from the procedure in section 6.4 of the Adiantum paper. The resulting value
* is reused in both the first and second hash steps. Specifically, it's added
* to the result of an independently keyed ε-∆U hash function (for equal length
* inputs only) taken over the left-hand part (the "bulk") of the message, to
* give the overall Adiantum hash of the (tweak, left-hand part) pair.
*/
static void adiantum_hash_header(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct {
__le64 message_bits;
__le64 padding;
} header = {
.message_bits = cpu_to_le64((u64)bulk_len * 8)
};
struct poly1305_state state;
poly1305_core_init(&state);
BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key,
&header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
poly1305_core_emit(&state, NULL, &rctx->header_hash);
}
/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
static int adiantum_hash_message(struct skcipher_request *req,
struct scatterlist *sgl, le128 *digest)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct shash_desc *hash_desc = &rctx->u.hash_desc;
struct sg_mapping_iter miter;
unsigned int i, n;
int err;
hash_desc->tfm = tctx->hash;
err = crypto_shash_init(hash_desc);
if (err)
return err;
sg_miter_start(&miter, sgl, sg_nents(sgl),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
err = crypto_shash_update(hash_desc, miter.addr, n);
if (err)
break;
}
sg_miter_stop(&miter);
if (err)
return err;
return crypto_shash_final(hash_desc, (u8 *)digest);
}
/* Continue Adiantum encryption/decryption after the stream cipher step */
static int adiantum_finish(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
le128 digest;
int err;
/* If decrypting, decrypt C_M with the block cipher to get P_M */
if (!rctx->enc)
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
/*
* Second hash step
* enc: C_R = C_M - H_{K_H}(T, C_L)
* dec: P_R = P_M - H_{K_H}(T, P_L)
*/
err = adiantum_hash_message(req, req->dst, &digest);
if (err)
return err;
le128_add(&digest, &digest, &rctx->header_hash);
le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
return 0;
}
static void adiantum_streamcipher_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err)
err = adiantum_finish(req);
skcipher_request_complete(req, err);
}
static int adiantum_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
unsigned int stream_len;
le128 digest;
int err;
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
rctx->enc = enc;
/*
* First hash step
* enc: P_M = P_R + H_{K_H}(T, P_L)
* dec: C_M = C_R + H_{K_H}(T, C_L)
*/
adiantum_hash_header(req);
err = adiantum_hash_message(req, req->src, &digest);
if (err)
return err;
le128_add(&digest, &digest, &rctx->header_hash);
scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
/* If encrypting, encrypt P_M with the block cipher to get C_M */
if (enc)
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
rctx->rbuf.bytes);
/* Initialize the rest of the XChaCha IV (first part is C_M) */
BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
rctx->rbuf.words[4] = cpu_to_le32(1);
rctx->rbuf.words[5] = 0;
rctx->rbuf.words[6] = 0;
rctx->rbuf.words[7] = 0;
/*
* XChaCha needs to be done on all the data except the last 16 bytes;
* for disk encryption that usually means 4080 or 496 bytes. But ChaCha
* implementations tend to be most efficient when passed a whole number
* of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
* And here it doesn't matter whether the last 16 bytes are written to,
* as the second hash step will overwrite them. Thus, round the XChaCha
* length up to the next 64-byte boundary if possible.
*/
stream_len = bulk_len;
if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
req->dst, stream_len, &rctx->rbuf);
skcipher_request_set_callback(&rctx->u.streamcipher_req,
req->base.flags,
adiantum_streamcipher_done, req);
return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
adiantum_finish(req);
}
static int adiantum_encrypt(struct skcipher_request *req)
{
return adiantum_crypt(req, true);
}
static int adiantum_decrypt(struct skcipher_request *req)
{
return adiantum_crypt(req, false);
}
static int adiantum_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *streamcipher;
struct crypto_cipher *blockcipher;
struct crypto_shash *hash;
unsigned int subreq_size;
int err;
streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
if (IS_ERR(streamcipher))
return PTR_ERR(streamcipher);
blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
if (IS_ERR(blockcipher)) {
err = PTR_ERR(blockcipher);
goto err_free_streamcipher;
}
hash = crypto_spawn_shash(&ictx->hash_spawn);
if (IS_ERR(hash)) {
err = PTR_ERR(hash);
goto err_free_blockcipher;
}
tctx->streamcipher = streamcipher;
tctx->blockcipher = blockcipher;
tctx->hash = hash;
BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
sizeof(struct adiantum_request_ctx));
subreq_size = max(sizeof_field(struct adiantum_request_ctx,
u.hash_desc) +
crypto_shash_descsize(hash),
sizeof_field(struct adiantum_request_ctx,
u.streamcipher_req) +
crypto_skcipher_reqsize(streamcipher));
crypto_skcipher_set_reqsize(tfm,
offsetof(struct adiantum_request_ctx, u) +
subreq_size);
return 0;
err_free_blockcipher:
crypto_free_cipher(blockcipher);
err_free_streamcipher:
crypto_free_skcipher(streamcipher);
return err;
}
static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
{
struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(tctx->streamcipher);
crypto_free_cipher(tctx->blockcipher);
crypto_free_shash(tctx->hash);
}
static void adiantum_free_instance(struct skcipher_instance *inst)
{
struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->streamcipher_spawn);
crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_shash(&ictx->hash_spawn);
kfree(inst);
}
/*
* Check for a supported set of inner algorithms.
* See the comment at the beginning of this file.
*/
static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
struct crypto_alg *blockcipher_alg,
struct shash_alg *hash_alg)
{
if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
return false;
if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
return false;
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
return false;
if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
return false;
return true;
}
static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
{
u32 mask;
const char *nhpoly1305_name;
struct skcipher_instance *inst;
struct adiantum_instance_ctx *ictx;
struct skcipher_alg *streamcipher_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *hash_alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, e.g. "xchacha12" */
err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
/* Block cipher, e.g. "aes" */
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
/* NHPoly1305 ε-∆U hash function */
nhpoly1305_name = crypto_attr_alg_name(tb[3]);
if (nhpoly1305_name == ERR_PTR(-ENOENT))
nhpoly1305_name = "nhpoly1305";
err = crypto_grab_shash(&ictx->hash_spawn,
skcipher_crypto_instance(inst),
nhpoly1305_name, 0, mask);
if (err)
goto err_free_inst;
hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
/* Check the set of algorithms */
if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
hash_alg)) {
pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name, hash_alg->base.cra_name);
err = -EINVAL;
goto err_free_inst;
}
/* Instance fields */
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s)", streamcipher_alg->base.cra_name,
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"adiantum(%s,%s,%s)",
streamcipher_alg->base.cra_driver_name,
blockcipher_alg->cra_driver_name,
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
hash_alg->base.cra_alignmask;
/*
* The block cipher is only invoked once per message, so for long
* messages (e.g. sectors for disk encryption) its performance doesn't
* matter as much as that of the stream cipher and hash function. Thus,
* weigh the block cipher's ->cra_priority less.
*/
inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
2 * hash_alg->base.cra_priority +
blockcipher_alg->cra_priority) / 7;
inst->alg.setkey = adiantum_setkey;
inst->alg.encrypt = adiantum_encrypt;
inst->alg.decrypt = adiantum_decrypt;
inst->alg.init = adiantum_init_tfm;
inst->alg.exit = adiantum_exit_tfm;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
inst->alg.ivsize = TWEAK_SIZE;
inst->free = adiantum_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
adiantum_free_instance(inst);
}
return err;
}
/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
static struct crypto_template adiantum_tmpl = {
.name = "adiantum",
.create = adiantum_create,
.module = THIS_MODULE,
};
static int __init adiantum_module_init(void)
{
return crypto_register_template(&adiantum_tmpl);
}
static void __exit adiantum_module_exit(void)
{
crypto_unregister_template(&adiantum_tmpl);
}
subsys_initcall(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Eric Biggers <[email protected]>");
MODULE_ALIAS_CRYPTO("adiantum");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/adiantum.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C)2006 USAGI/WIDE Project
*
* Author:
* Kazunori Miyazawa <[email protected]>
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
0x03030303, 0x03030303, 0x03030303, 0x03030303};
/*
* +------------------------
* | <parent tfm>
* +------------------------
* | xcbc_tfm_ctx
* +------------------------
* | consts (block size * 2)
* +------------------------
*/
struct xcbc_tfm_ctx {
struct crypto_cipher *child;
u8 ctx[];
};
/*
* +------------------------
* | <shash desc>
* +------------------------
* | xcbc_desc_ctx
* +------------------------
* | odds (block size)
* +------------------------
* | prev (block size)
* +------------------------
*/
struct xcbc_desc_ctx {
unsigned int len;
u8 ctx[];
};
#define XCBC_BLOCKSIZE 16
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
u8 key1[XCBC_BLOCKSIZE];
int bs = sizeof(key1);
if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
return err;
crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
return crypto_cipher_setkey(ctx->child, key1, bs);
}
static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
{
unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_blocksize(pdesc->tfm);
u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
ctx->len = 0;
memset(prev, 0, bs);
return 0;
}
static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
u8 *prev = odds + bs;
/* checking the data can fill the block */
if ((ctx->len + len) <= bs) {
memcpy(odds + ctx->len, p, len);
ctx->len += len;
return 0;
}
/* filling odds with new data and encrypting it */
memcpy(odds + ctx->len, p, bs - ctx->len);
len -= bs - ctx->len;
p += bs - ctx->len;
crypto_xor(prev, odds, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
/* clearing the length */
ctx->len = 0;
/* encrypting the rest of data */
while (len > bs) {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
}
/* keeping the surplus of blocksize */
if (len) {
memcpy(odds, p, len);
ctx->len = len;
}
return 0;
}
static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
u8 *prev = odds + bs;
unsigned int offset = 0;
if (ctx->len != bs) {
unsigned int rlen;
u8 *p = odds + ctx->len;
*p = 0x80;
p++;
rlen = bs - ctx->len -1;
if (rlen)
memset(p, 0, rlen);
offset += bs;
}
crypto_xor(prev, odds, bs);
crypto_xor(prev, consts + offset, bs);
crypto_cipher_encrypt_one(tfm, out, prev);
return 0;
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
};
static void xcbc_exit_tfm(struct crypto_tfm *tfm)
{
struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
unsigned long alignmask;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
err = -EINVAL;
if (alg->cra_blocksize != XCBC_BLOCKSIZE)
goto err_free_inst;
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
alignmask = alg->cra_alignmask | 3;
inst->alg.base.cra_alignmask = alignmask;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.digestsize = alg->cra_blocksize;
inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
crypto_tfm_ctx_alignment()) +
(alignmask &
~(crypto_tfm_ctx_alignment() - 1)) +
alg->cra_blocksize * 2;
inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
alignmask + 1) +
alg->cra_blocksize * 2;
inst->alg.base.cra_init = xcbc_init_tfm;
inst->alg.base.cra_exit = xcbc_exit_tfm;
inst->alg.init = crypto_xcbc_digest_init;
inst->alg.update = crypto_xcbc_digest_update;
inst->alg.final = crypto_xcbc_digest_final;
inst->alg.setkey = crypto_xcbc_digest_setkey;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template crypto_xcbc_tmpl = {
.name = "xcbc",
.create = xcbc_create,
.module = THIS_MODULE,
};
static int __init crypto_xcbc_module_init(void)
{
return crypto_register_template(&crypto_xcbc_tmpl);
}
static void __exit crypto_xcbc_module_exit(void)
{
crypto_unregister_template(&crypto_xcbc_tmpl);
}
subsys_initcall(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XCBC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("xcbc");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/xcbc.c |
/*
* CTS: Cipher Text Stealing mode
*
* COPYRIGHT (c) 2008
* The Regents of the University of Michigan
* ALL RIGHTS RESERVED
*
* Permission is granted to use, copy, create derivative works
* and redistribute this software and such derivative works
* for any purpose, so long as the name of The University of
* Michigan is not used in any advertising or publicity
* pertaining to the use of distribution of this software
* without specific, written prior authorization. If the
* above copyright notice or any other identification of the
* University of Michigan is included in any copy of any
* portion of this software, then the disclaimer below must
* also be included.
*
* THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
* FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
* PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
* MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
* WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
* REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
* FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
* CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
* OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
* IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGES.
*/
/* Derived from various:
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
/*
* This is the Cipher Text Stealing mode as described by
* Section 8 of rfc2040 and referenced by rfc3962.
* rfc3962 includes errata information in its Appendix A.
*/
#include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
#include <linux/slab.h>
#include <linux/compiler.h>
struct crypto_cts_ctx {
struct crypto_skcipher *child;
};
struct crypto_cts_reqctx {
struct scatterlist sg[2];
unsigned offset;
struct skcipher_request subreq;
};
static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
{
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child = ctx->child;
return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
crypto_skcipher_alignmask(tfm) + 1);
}
static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, keylen);
}
static void cts_cbc_crypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (err == -EINPROGRESS)
return;
skcipher_request_complete(req, err);
}
static int cts_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
struct scatterlist *sg;
unsigned int offset;
int lastn;
offset = rctx->offset;
lastn = req->cryptlen - offset;
sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
memset(d, 0, bsize);
scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
memzero_explicit(d, sizeof(d));
skcipher_request_set_callback(subreq, req->base.flags &
CRYPTO_TFM_REQ_MAY_BACKLOG,
cts_cbc_crypt_done, req);
skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
return crypto_skcipher_encrypt(subreq);
}
static void crypto_cts_encrypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (err)
goto out;
err = cts_cbc_encrypt(req);
if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
skcipher_request_complete(req, err);
}
static int crypto_cts_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
unsigned int offset;
skcipher_request_set_tfm(subreq, ctx->child);
if (nbytes < bsize)
return -EINVAL;
if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
req->iv);
return crypto_skcipher_encrypt(subreq);
}
offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
skcipher_request_set_callback(subreq, req->base.flags,
crypto_cts_encrypt_done, req);
skcipher_request_set_crypt(subreq, req->src, req->dst,
offset, req->iv);
return crypto_skcipher_encrypt(subreq) ?:
cts_cbc_encrypt(req);
}
static int cts_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
struct scatterlist *sg;
unsigned int offset;
u8 *space;
int lastn;
offset = rctx->offset;
lastn = req->cryptlen - offset;
sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
/* 1. Decrypt Cn-1 (s) to create Dn */
scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
space = crypto_cts_reqctx_space(req);
crypto_xor(d + bsize, space, bsize);
/* 2. Pad Cn with zeros at the end to create C of length BB */
memset(d, 0, bsize);
scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
/* 3. Exclusive-or Dn with C to create Xn */
/* 4. Select the first Ln bytes of Xn to create Pn */
crypto_xor(d + bsize, d, lastn);
/* 5. Append the tail (BB - Ln) bytes of Xn to Cn to create En */
memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
/* 6. Decrypt En to create Pn-1 */
scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
memzero_explicit(d, sizeof(d));
skcipher_request_set_callback(subreq, req->base.flags &
CRYPTO_TFM_REQ_MAY_BACKLOG,
cts_cbc_crypt_done, req);
skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
return crypto_skcipher_decrypt(subreq);
}
static void crypto_cts_decrypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (err)
goto out;
err = cts_cbc_decrypt(req);
if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
skcipher_request_complete(req, err);
}
static int crypto_cts_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
unsigned int offset;
u8 *space;
skcipher_request_set_tfm(subreq, ctx->child);
if (nbytes < bsize)
return -EINVAL;
if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
req->iv);
return crypto_skcipher_decrypt(subreq);
}
skcipher_request_set_callback(subreq, req->base.flags,
crypto_cts_decrypt_done, req);
space = crypto_cts_reqctx_space(req);
offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
if (offset <= bsize)
memcpy(space, req->iv, bsize);
else
scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
bsize, 0);
skcipher_request_set_crypt(subreq, req->src, req->dst,
offset, req->iv);
return crypto_skcipher_decrypt(subreq) ?:
cts_cbc_decrypt(req);
}
static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
unsigned reqsize;
unsigned bsize;
unsigned align;
cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
align = crypto_skcipher_alignmask(tfm);
bsize = crypto_skcipher_blocksize(cipher);
reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
crypto_skcipher_reqsize(cipher),
crypto_tfm_ctx_alignment()) +
(align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
{
struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
}
static void crypto_cts_free(struct skcipher_instance *inst)
{
crypto_drop_skcipher(skcipher_instance_ctx(inst));
kfree(inst);
}
static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_skcipher_spawn *spawn;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_skcipher_alg(spawn);
err = -EINVAL;
if (crypto_skcipher_alg_ivsize(alg) != alg->base.cra_blocksize)
goto err_free_inst;
if (strncmp(alg->base.cra_name, "cbc(", 4))
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
&alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = alg->base.cra_blocksize;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
inst->alg.init = crypto_cts_init_tfm;
inst->alg.exit = crypto_cts_exit_tfm;
inst->alg.setkey = crypto_cts_setkey;
inst->alg.encrypt = crypto_cts_encrypt;
inst->alg.decrypt = crypto_cts_decrypt;
inst->free = crypto_cts_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_cts_free(inst);
}
return err;
}
static struct crypto_template crypto_cts_tmpl = {
.name = "cts",
.create = crypto_cts_create,
.module = THIS_MODULE,
};
static int __init crypto_cts_module_init(void)
{
return crypto_register_template(&crypto_cts_tmpl);
}
static void __exit crypto_cts_module_exit(void)
{
crypto_unregister_template(&crypto_cts_tmpl);
}
subsys_initcall(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
MODULE_ALIAS_CRYPTO("cts");
| linux-master | crypto/cts.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)
*
* Copyright © 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
* Copyright © 2003 Kartikey Mahendra Bhatt <[email protected]>
* Copyright © 2012 Jussi Kivilinna <[email protected]>
*/
#include <linux/module.h>
#include <crypto/cast_common.h>
__visible const u32 cast_s1[256] = {
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f,
0x9c004dd3, 0x6003e540, 0xcf9fc949,
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0,
0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3,
0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1,
0xaa54166b, 0x22568e3a, 0xa2d341d0,
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac,
0x4a97c1d8, 0x527644b7, 0xb5f437a7,
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0,
0x90ecf52e, 0x22b0c054, 0xbc8e5935,
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290,
0xe93b159f, 0xb48ee411, 0x4bff345d,
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad,
0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f,
0xc59c5319, 0xb949e354, 0xb04669fe,
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5,
0x6a390493, 0xe63d37e0, 0x2a54f6b3,
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5,
0xf61b1891, 0xbb72275e, 0xaa508167,
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427,
0xa2d1936b, 0x2ad286af, 0xaa56d291,
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d,
0x73e2bb14, 0xa0bebc3c, 0x54623779,
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e,
0x89fe78e6, 0x3fab0950, 0x325ff6c2,
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf,
0x380782d5, 0xc7fa5cf6, 0x8ac31511,
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241,
0x051ef495, 0xaa573b04, 0x4a805d8d,
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b,
0x50afd341, 0xa7c13275, 0x915a0bf5,
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265,
0xab85c5f3, 0x1b55db94, 0xaad4e324,
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3,
0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6,
0x22513f1e, 0xaa51a79b, 0x2ad344cc,
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6,
0x032268d4, 0xc9600acc, 0xce387e6d,
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da,
0x4736f464, 0x5ad328d8, 0xb347cc96,
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc,
0xbfc5fe4a, 0xa70aec10, 0xac39570a,
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f,
0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4,
0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af,
0x51c85f4d, 0x56907596, 0xa5bb15e6,
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a,
0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf,
0x700b45e1, 0xd5ea50f1, 0x85a92872,
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198,
0x0cd0ede7, 0x26470db8, 0xf881814c,
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db,
0xab838653, 0x6e2f1e23, 0x83719c9e,
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c,
0xe1e696ff, 0xb141ab08, 0x7cca89b9,
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c,
0x5ac9f049, 0xdd8f0f00, 0x5c8165bf
};
EXPORT_SYMBOL_GPL(cast_s1);
__visible const u32 cast_s2[256] = {
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a,
0xeec5207a, 0x55889c94, 0x72fc0651,
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef,
0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086,
0xef944459, 0xba83ccb3, 0xe0c3cdfb,
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb,
0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f,
0x77e83f4e, 0x79929269, 0x24fa9f7b,
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154,
0x0d554b63, 0x5d681121, 0xc866c359,
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181,
0x39f7627f, 0x361e3084, 0xe4eb573b,
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c,
0x99847ab4, 0xa0e3df79, 0xba6cf38c,
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a,
0x8f458c74, 0xd9e0a227, 0x4ec73a34,
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c,
0x1d804366, 0x721d9bfd, 0xa58684bb,
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1,
0x27e19ba5, 0xd5a6c252, 0xe49754bd,
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9,
0xe0b56714, 0x21f043b7, 0xe5d05860,
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf,
0x68561be6, 0x83ca6b94, 0x2d6ed23b,
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c,
0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122,
0xb96726d1, 0x8049a7e8, 0x22b7da7b,
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402,
0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53,
0xe3214517, 0xb4542835, 0x9f63293c,
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6,
0x30a22c95, 0x31a70850, 0x60930f13,
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6,
0xa02b1741, 0x7cbad9a2, 0x2180036f,
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676,
0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb,
0x846a3bae, 0x8ff77888, 0xee5d60f6,
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54,
0x157fd7fa, 0xef8579cc, 0xd152de58,
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5,
0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8,
0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc,
0x301e16e6, 0x273be979, 0xb0ffeaa6,
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a,
0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e,
0x1a513742, 0xef6828bc, 0x520365d6,
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb,
0x5eea29cb, 0x145892f5, 0x91584f7f,
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4,
0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3,
0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589,
0xa345415e, 0x5c038323, 0x3e5d3bb9,
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539,
0x73bfbe70, 0x83877605, 0x4523ecf1
};
EXPORT_SYMBOL_GPL(cast_s2);
__visible const u32 cast_s3[256] = {
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff,
0x369fe44b, 0x8c1fc644, 0xaececa90,
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806,
0xf0ad0548, 0xe13c8d83, 0x927010d5,
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820,
0xfade82e0, 0xa067268b, 0x8272792e,
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee,
0x825b1bfd, 0x9255c5ed, 0x1257a240,
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf,
0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1,
0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c,
0x4a012d6e, 0xc5884a28, 0xccc36f71,
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850,
0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e,
0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0,
0x1eac5790, 0x796fb449, 0x8252dc15,
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403,
0xe83ec305, 0x4f91751a, 0x925669c2,
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574,
0x927985b2, 0x8276dbcb, 0x02778176,
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83,
0x340ce5c8, 0x96bbb682, 0x93b4b148,
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20,
0x8437aa88, 0x7d29dc96, 0x2756d3dc,
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e,
0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9,
0xbda8229c, 0x127dadaa, 0x438a074e,
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff,
0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a,
0x76a2e214, 0xb9a40368, 0x925d958f,
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623,
0x193cbcfa, 0x27627545, 0x825cf47a,
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7,
0x8272a972, 0x9270c4a8, 0x127de50b,
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb,
0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11,
0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c,
0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40,
0x7c34671c, 0x02717ef6, 0x4feb5536,
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1,
0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33,
0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff,
0x856302e0, 0x72dbd92b, 0xee971b69,
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2,
0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38,
0x0ff0443d, 0x606e6dc6, 0x60543a49,
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f,
0x68458425, 0x99833be5, 0x600d457d,
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31,
0x9c305a00, 0x52bce688, 0x1b03588a,
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636,
0xa133c501, 0xe9d3531c, 0xee353783
};
EXPORT_SYMBOL_GPL(cast_s3);
__visible const u32 cast_s4[256] = {
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb,
0x64ad8c57, 0x85510443, 0xfa020ed1,
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43,
0x6497b7b1, 0xf3641f63, 0x241e4adf,
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30,
0xc0a5374f, 0x1d2d00d9, 0x24147b15,
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f,
0x0c13fefe, 0x081b08ca, 0x05170121,
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f,
0x06df4261, 0xbb9e9b8a, 0x7293ea25,
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400,
0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061,
0x11b638e1, 0x72500e03, 0xf80eb2bb,
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400,
0x6920318f, 0x081dbb99, 0xffc304a5,
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea,
0x9f926f91, 0x9f46222f, 0x3991467d,
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8,
0x3fb6180c, 0x18f8931e, 0x281658e6,
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25,
0x79098b02, 0xe4eabb81, 0x28123b23,
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9,
0x0014377b, 0x041e8ac8, 0x09114003,
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de,
0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0,
0x56c8c391, 0x6b65811c, 0x5e146119,
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d,
0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a,
0xeca1d7c7, 0x041afa32, 0x1d16625a,
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb,
0xc70b8b46, 0xd9e66a48, 0x56e55a79,
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3,
0xedda04eb, 0x17a9be04, 0x2c18f4df,
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254,
0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2,
0x0418f2c8, 0x001a96a6, 0x0d1526ab,
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86,
0x311170a7, 0x3e9b640c, 0xcc3e10d7,
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1,
0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca,
0xb4be31cd, 0xd8782806, 0x12a3a4e2,
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5,
0x9711aac5, 0x001d7b95, 0x82e5e7d2,
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415,
0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7,
0x0ce454a9, 0xd60acd86, 0x015f1919,
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe,
0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb,
0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8,
0x296b299e, 0x492fc295, 0x9266beab,
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee,
0xf65324e6, 0x6afce36c, 0x0316cc04,
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979,
0x932bcdf6, 0xb657c34d, 0x4edfd282,
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0,
0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2
};
EXPORT_SYMBOL_GPL(cast_s4);
MODULE_LICENSE("GPL");
| linux-master | crypto/cast_common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Scatterlist Cryptographic API.
*
* Procfs information.
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2005 Herbert Xu <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/module.h> /* for module_name() */
#include <linux/rwsem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
static void *c_start(struct seq_file *m, loff_t *pos)
{
down_read(&crypto_alg_sem);
return seq_list_start(&crypto_alg_list, *pos);
}
static void *c_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &crypto_alg_list, pos);
}
static void c_stop(struct seq_file *m, void *p)
{
up_read(&crypto_alg_sem);
}
static int c_show(struct seq_file *m, void *p)
{
struct crypto_alg *alg = list_entry(p, struct crypto_alg, cra_list);
seq_printf(m, "name : %s\n", alg->cra_name);
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt));
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
seq_printf(m, "internal : %s\n",
(alg->cra_flags & CRYPTO_ALG_INTERNAL) ?
"yes" : "no");
if (fips_enabled) {
seq_printf(m, "fips : %s\n",
(alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL) ?
"no" : "yes");
}
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
seq_printf(m, "type : larval\n");
seq_printf(m, "flags : 0x%x\n", alg->cra_flags);
goto out;
}
if (alg->cra_type && alg->cra_type->show) {
alg->cra_type->show(m, alg);
goto out;
}
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
seq_printf(m, "type : cipher\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n",
alg->cra_cipher.cia_min_keysize);
seq_printf(m, "max keysize : %u\n",
alg->cra_cipher.cia_max_keysize);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
seq_printf(m, "type : compression\n");
break;
default:
seq_printf(m, "type : unknown\n");
break;
}
out:
seq_putc(m, '\n');
return 0;
}
static const struct seq_operations crypto_seq_ops = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = c_show
};
void __init crypto_init_proc(void)
{
proc_create_seq("crypto", 0, NULL, &crypto_seq_ops);
}
void __exit crypto_exit_proc(void)
{
remove_proc_entry("crypto", NULL);
}
| linux-master | crypto/proc.c |
/*
* algif_rng: User-space interface for random number generators
*
* This file provides the user-space API for random number generators.
*
* Copyright (C) 2014, Stephan Mueller <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2
* are required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <crypto/rng.h>
#include <linux/random.h>
#include <crypto/if_alg.h>
#include <linux/net.h>
#include <net/sock.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("User-space interface for random number generators");
struct rng_ctx {
#define MAXSIZE 128
unsigned int len;
struct crypto_rng *drng;
u8 *addtl;
size_t addtl_len;
};
struct rng_parent_ctx {
struct crypto_rng *drng;
u8 *entropy;
};
static void rng_reset_addtl(struct rng_ctx *ctx)
{
kfree_sensitive(ctx->addtl);
ctx->addtl = NULL;
ctx->addtl_len = 0;
}
static int _rng_recvmsg(struct crypto_rng *drng, struct msghdr *msg, size_t len,
u8 *addtl, size_t addtl_len)
{
int err = 0;
int genlen = 0;
u8 result[MAXSIZE];
if (len == 0)
return 0;
if (len > MAXSIZE)
len = MAXSIZE;
/*
* although not strictly needed, this is a precaution against coding
* errors
*/
memset(result, 0, len);
/*
* The enforcement of a proper seeding of an RNG is done within an
* RNG implementation. Some RNGs (DRBG, krng) do not need specific
* seeding as they automatically seed. The X9.31 DRNG will return
* an error if it was not seeded properly.
*/
genlen = crypto_rng_generate(drng, addtl, addtl_len, result, len);
if (genlen < 0)
return genlen;
err = memcpy_to_msg(msg, result, len);
memzero_explicit(result, len);
return err ? err : len;
}
static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
return _rng_recvmsg(ctx->drng, msg, len, NULL, 0);
}
static int rng_test_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
int ret;
lock_sock(sock->sk);
ret = _rng_recvmsg(ctx->drng, msg, len, ctx->addtl, ctx->addtl_len);
rng_reset_addtl(ctx);
release_sock(sock->sk);
return ret;
}
static int rng_test_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
int err;
struct alg_sock *ask = alg_sk(sock->sk);
struct rng_ctx *ctx = ask->private;
lock_sock(sock->sk);
if (len > MAXSIZE) {
err = -EMSGSIZE;
goto unlock;
}
rng_reset_addtl(ctx);
ctx->addtl = kmalloc(len, GFP_KERNEL);
if (!ctx->addtl) {
err = -ENOMEM;
goto unlock;
}
err = memcpy_from_msg(ctx->addtl, msg, len);
if (err) {
rng_reset_addtl(ctx);
goto unlock;
}
ctx->addtl_len = len;
unlock:
release_sock(sock->sk);
return err ? err : len;
}
static struct proto_ops algif_rng_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.sendmsg = sock_no_sendmsg,
.release = af_alg_release,
.recvmsg = rng_recvmsg,
};
static struct proto_ops __maybe_unused algif_rng_test_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.recvmsg = rng_test_recvmsg,
.sendmsg = rng_test_sendmsg,
};
static void *rng_bind(const char *name, u32 type, u32 mask)
{
struct rng_parent_ctx *pctx;
struct crypto_rng *rng;
pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
if (!pctx)
return ERR_PTR(-ENOMEM);
rng = crypto_alloc_rng(name, type, mask);
if (IS_ERR(rng)) {
kfree(pctx);
return ERR_CAST(rng);
}
pctx->drng = rng;
return pctx;
}
static void rng_release(void *private)
{
struct rng_parent_ctx *pctx = private;
if (unlikely(!pctx))
return;
crypto_free_rng(pctx->drng);
kfree_sensitive(pctx->entropy);
kfree_sensitive(pctx);
}
static void rng_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct rng_ctx *ctx = ask->private;
rng_reset_addtl(ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int rng_accept_parent(void *private, struct sock *sk)
{
struct rng_ctx *ctx;
struct rng_parent_ctx *pctx = private;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->len = len;
ctx->addtl = NULL;
ctx->addtl_len = 0;
/*
* No seeding done at that point -- if multiple accepts are
* done on one RNG instance, each resulting FD points to the same
* state of the RNG.
*/
ctx->drng = pctx->drng;
ask->private = ctx;
sk->sk_destruct = rng_sock_destruct;
/*
* Non NULL pctx->entropy means that CAVP test has been initiated on
* this socket, replace proto_ops algif_rng_ops with algif_rng_test_ops.
*/
if (IS_ENABLED(CONFIG_CRYPTO_USER_API_RNG_CAVP) && pctx->entropy)
sk->sk_socket->ops = &algif_rng_test_ops;
return 0;
}
static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen)
{
struct rng_parent_ctx *pctx = private;
/*
* Check whether seedlen is of sufficient size is done in RNG
* implementations.
*/
return crypto_rng_reset(pctx->drng, seed, seedlen);
}
static int __maybe_unused rng_setentropy(void *private, sockptr_t entropy,
unsigned int len)
{
struct rng_parent_ctx *pctx = private;
u8 *kentropy = NULL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (pctx->entropy)
return -EINVAL;
if (len > MAXSIZE)
return -EMSGSIZE;
if (len) {
kentropy = memdup_sockptr(entropy, len);
if (IS_ERR(kentropy))
return PTR_ERR(kentropy);
}
crypto_rng_alg(pctx->drng)->set_ent(pctx->drng, kentropy, len);
/*
* Since rng doesn't perform any memory management for the entropy
* buffer, save kentropy pointer to pctx now to free it after use.
*/
pctx->entropy = kentropy;
return 0;
}
static const struct af_alg_type algif_type_rng = {
.bind = rng_bind,
.release = rng_release,
.accept = rng_accept_parent,
.setkey = rng_setkey,
#ifdef CONFIG_CRYPTO_USER_API_RNG_CAVP
.setentropy = rng_setentropy,
#endif
.ops = &algif_rng_ops,
.name = "rng",
.owner = THIS_MODULE
};
static int __init rng_init(void)
{
return af_alg_register_type(&algif_type_rng);
}
static void __exit rng_exit(void)
{
int err = af_alg_unregister_type(&algif_type_rng);
BUG_ON(err);
}
module_init(rng_init);
module_exit(rng_exit);
| linux-master | crypto/algif_rng.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Public Key Encryption
*
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <[email protected]>
*/
#include <crypto/internal/akcipher.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
static int __maybe_unused crypto_akcipher_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_akcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_puts(m, "type : akcipher\n");
}
static void crypto_akcipher_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
alg->exit(akcipher);
}
static int crypto_akcipher_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_akcipher *akcipher = __crypto_akcipher_tfm(tfm);
struct akcipher_alg *alg = crypto_akcipher_alg(akcipher);
if (alg->exit)
akcipher->base.exit = crypto_akcipher_exit_tfm;
if (alg->init)
return alg->init(akcipher);
return 0;
}
static void crypto_akcipher_free_instance(struct crypto_instance *inst)
{
struct akcipher_instance *akcipher = akcipher_instance(inst);
akcipher->free(akcipher);
}
static int __maybe_unused crypto_akcipher_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct akcipher_alg *akcipher = __crypto_akcipher_alg(alg);
struct crypto_istat_akcipher *istat;
struct crypto_stat_akcipher rakcipher;
istat = akcipher_get_stat(akcipher);
memset(&rakcipher, 0, sizeof(rakcipher));
strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
rakcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
rakcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
rakcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
rakcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
rakcipher.stat_sign_cnt = atomic64_read(&istat->sign_cnt);
rakcipher.stat_verify_cnt = atomic64_read(&istat->verify_cnt);
rakcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
}
static const struct crypto_type crypto_akcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_akcipher_init_tfm,
.free = crypto_akcipher_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_akcipher_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_akcipher_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_akcipher_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AKCIPHER,
.tfmsize = offsetof(struct crypto_akcipher, base),
};
int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_akcipher_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_akcipher);
struct crypto_akcipher *crypto_alloc_akcipher(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_akcipher_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_akcipher);
static void akcipher_prepare_alg(struct akcipher_alg *alg)
{
struct crypto_istat_akcipher *istat = akcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_akcipher_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
static int akcipher_default_op(struct akcipher_request *req)
{
return -ENOSYS;
}
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
if (!alg->sign)
alg->sign = akcipher_default_op;
if (!alg->verify)
alg->verify = akcipher_default_op;
if (!alg->encrypt)
alg->encrypt = akcipher_default_op;
if (!alg->decrypt)
alg->decrypt = akcipher_default_op;
if (!alg->set_priv_key)
alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_akcipher);
void crypto_unregister_akcipher(struct akcipher_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_akcipher);
int akcipher_register_instance(struct crypto_template *tmpl,
struct akcipher_instance *inst)
{
if (WARN_ON(!inst->free))
return -EINVAL;
akcipher_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, akcipher_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(akcipher_register_instance);
int crypto_akcipher_sync_prep(struct crypto_akcipher_sync_data *data)
{
unsigned int reqsize = crypto_akcipher_reqsize(data->tfm);
struct akcipher_request *req;
struct scatterlist *sg;
unsigned int mlen;
unsigned int len;
u8 *buf;
if (data->dst)
mlen = max(data->slen, data->dlen);
else
mlen = data->slen + data->dlen;
len = sizeof(*req) + reqsize + mlen;
if (len < mlen)
return -EOVERFLOW;
req = kzalloc(len, GFP_KERNEL);
if (!req)
return -ENOMEM;
data->req = req;
akcipher_request_set_tfm(req, data->tfm);
buf = (u8 *)(req + 1) + reqsize;
data->buf = buf;
memcpy(buf, data->src, data->slen);
sg = &data->sg;
sg_init_one(sg, buf, mlen);
akcipher_request_set_crypt(req, sg, data->dst ? sg : NULL,
data->slen, data->dlen);
crypto_init_wait(&data->cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &data->cwait);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_prep);
int crypto_akcipher_sync_post(struct crypto_akcipher_sync_data *data, int err)
{
err = crypto_wait_req(err, &data->cwait);
if (data->dst)
memcpy(data->dst, data->buf, data->dlen);
data->dlen = data->req->dst_len;
kfree_sensitive(data->req);
return err;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_post);
int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher_sync_data data = {
.tfm = tfm,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_encrypt(data.req));
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_encrypt);
int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher_sync_data data = {
.tfm = tfm,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_decrypt(data.req)) ?:
data.dlen;
}
EXPORT_SYMBOL_GPL(crypto_akcipher_sync_decrypt);
static void crypto_exit_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
crypto_free_akcipher(*ctx);
}
int crypto_init_akcipher_ops_sig(struct crypto_tfm *tfm)
{
struct crypto_akcipher **ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *calg = tfm->__crt_alg;
struct crypto_akcipher *akcipher;
if (!crypto_mod_get(calg))
return -EAGAIN;
akcipher = crypto_create_tfm(calg, &crypto_akcipher_type);
if (IS_ERR(akcipher)) {
crypto_mod_put(calg);
return PTR_ERR(akcipher);
}
*ctx = akcipher;
tfm->exit = crypto_exit_akcipher_ops_sig;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_init_akcipher_ops_sig);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic public key cipher type");
| linux-master | crypto/akcipher.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Asynchronous Compression operations
*
* Copyright (c) 2016, Intel Corporation
* Authors: Weigang Li <[email protected]>
* Giovanni Cabiddu <[email protected]>
*/
#include <crypto/internal/acompress.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
static const struct crypto_type crypto_acomp_type;
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
}
static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
{
return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
}
static int __maybe_unused crypto_acomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
memset(&racomp, 0, sizeof(racomp));
strscpy(racomp.type, "acomp", sizeof(racomp.type));
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_puts(m, "type : acomp\n");
}
static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
alg->exit(acomp);
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
acomp->dst_free = alg->dst_free;
acomp->reqsize = alg->reqsize;
if (alg->exit)
acomp->base.exit = crypto_acomp_exit_tfm;
if (alg->init)
return alg->init(acomp);
return 0;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
{
int extsize = crypto_alg_extsize(alg);
if (alg->cra_type != &crypto_acomp_type)
extsize += sizeof(struct crypto_scomp *);
return extsize;
}
static inline int __crypto_acomp_report_stat(struct sk_buff *skb,
struct crypto_alg *alg)
{
struct comp_alg_common *calg = __crypto_comp_alg_common(alg);
struct crypto_istat_compress *istat = comp_get_stat(calg);
struct crypto_stat_compress racomp;
memset(&racomp, 0, sizeof(racomp));
strscpy(racomp.type, "acomp", sizeof(racomp.type));
racomp.stat_compress_cnt = atomic64_read(&istat->compress_cnt);
racomp.stat_compress_tlen = atomic64_read(&istat->compress_tlen);
racomp.stat_decompress_cnt = atomic64_read(&istat->decompress_cnt);
racomp.stat_decompress_tlen = atomic64_read(&istat->decompress_tlen);
racomp.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
}
#ifdef CONFIG_CRYPTO_STATS
int crypto_acomp_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
{
return __crypto_acomp_report_stat(skb, alg);
}
#endif
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_acomp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
u32 mask, int node)
{
return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
node);
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
{
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
struct acomp_req *req;
req = __acomp_request_alloc(acomp);
if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
return crypto_acomp_scomp_alloc_ctx(req);
return req;
}
EXPORT_SYMBOL_GPL(acomp_request_alloc);
void acomp_request_free(struct acomp_req *req)
{
struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
crypto_acomp_scomp_free_ctx(req);
if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
acomp->dst_free(req->dst);
req->dst = NULL;
}
__acomp_request_free(req);
}
EXPORT_SYMBOL_GPL(acomp_request_free);
void comp_prepare_alg(struct comp_alg_common *alg)
{
struct crypto_istat_compress *istat = comp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
int crypto_register_acomp(struct acomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_acomp);
void crypto_unregister_acomp(struct acomp_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
int crypto_register_acomps(struct acomp_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_acomp(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_acomps);
void crypto_unregister_acomps(struct acomp_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
| linux-master | crypto/acompress.c |
/*
* Non-physical true random number generator based on timing jitter --
* Linux Kernel Crypto API specific code
*
* Copyright Stephan Mueller <[email protected]>, 2015 - 2023
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2 are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include <crypto/hash.h>
#include <crypto/sha3.h>
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <crypto/internal/rng.h>
#include "jitterentropy.h"
#define JENT_CONDITIONING_HASH "sha3-256-generic"
/***************************************************************************
* Helper function
***************************************************************************/
void *jent_zalloc(unsigned int len)
{
return kzalloc(len, GFP_KERNEL);
}
void jent_zfree(void *ptr)
{
kfree_sensitive(ptr);
}
/*
* Obtain a high-resolution time stamp value. The time stamp is used to measure
* the execution time of a given code path and its variations. Hence, the time
* stamp must have a sufficiently high resolution.
*
* Note, if the function returns zero because a given architecture does not
* implement a high-resolution time stamp, the RNG code's runtime test
* will detect it and will not produce output.
*/
void jent_get_nstime(__u64 *out)
{
__u64 tmp = 0;
tmp = random_get_entropy();
/*
* If random_get_entropy does not return a value, i.e. it is not
* implemented for a given architecture, use a clock source.
* hoping that there are timers we can work with.
*/
if (tmp == 0)
tmp = ktime_get_ns();
*out = tmp;
jent_raw_hires_entropy_store(tmp);
}
int jent_hash_time(void *hash_state, __u64 time, u8 *addtl,
unsigned int addtl_len, __u64 hash_loop_cnt,
unsigned int stuck)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
SHASH_DESC_ON_STACK(desc, hash_state_desc->tfm);
u8 intermediary[SHA3_256_DIGEST_SIZE];
__u64 j = 0;
int ret;
desc->tfm = hash_state_desc->tfm;
if (sizeof(intermediary) != crypto_shash_digestsize(desc->tfm)) {
pr_warn_ratelimited("Unexpected digest size\n");
return -EINVAL;
}
/*
* This loop fills a buffer which is injected into the entropy pool.
* The main reason for this loop is to execute something over which we
* can perform a timing measurement. The injection of the resulting
* data into the pool is performed to ensure the result is used and
* the compiler cannot optimize the loop away in case the result is not
* used at all. Yet that data is considered "additional information"
* considering the terminology from SP800-90A without any entropy.
*
* Note, it does not matter which or how much data you inject, we are
* interested in one Keccack1600 compression operation performed with
* the crypto_shash_final.
*/
for (j = 0; j < hash_loop_cnt; j++) {
ret = crypto_shash_init(desc) ?:
crypto_shash_update(desc, intermediary,
sizeof(intermediary)) ?:
crypto_shash_finup(desc, addtl, addtl_len, intermediary);
if (ret)
goto err;
}
/*
* Inject the data from the previous loop into the pool. This data is
* not considered to contain any entropy, but it stirs the pool a bit.
*/
ret = crypto_shash_update(desc, intermediary, sizeof(intermediary));
if (ret)
goto err;
/*
* Insert the time stamp into the hash context representing the pool.
*
* If the time stamp is stuck, do not finally insert the value into the
* entropy pool. Although this operation should not do any harm even
* when the time stamp has no entropy, SP800-90B requires that any
* conditioning operation to have an identical amount of input data
* according to section 3.1.5.
*/
if (!stuck) {
ret = crypto_shash_update(hash_state_desc, (u8 *)&time,
sizeof(__u64));
}
err:
shash_desc_zero(desc);
memzero_explicit(intermediary, sizeof(intermediary));
return ret;
}
int jent_read_random_block(void *hash_state, char *dst, unsigned int dst_len)
{
struct shash_desc *hash_state_desc = (struct shash_desc *)hash_state;
u8 jent_block[SHA3_256_DIGEST_SIZE];
/* Obtain data from entropy pool and re-initialize it */
int ret = crypto_shash_final(hash_state_desc, jent_block) ?:
crypto_shash_init(hash_state_desc) ?:
crypto_shash_update(hash_state_desc, jent_block,
sizeof(jent_block));
if (!ret && dst_len)
memcpy(dst, jent_block, dst_len);
memzero_explicit(jent_block, sizeof(jent_block));
return ret;
}
/***************************************************************************
* Kernel crypto API interface
***************************************************************************/
struct jitterentropy {
spinlock_t jent_lock;
struct rand_data *entropy_collector;
struct crypto_shash *tfm;
struct shash_desc *sdesc;
};
static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
spin_lock(&rng->jent_lock);
if (rng->sdesc) {
shash_desc_zero(rng->sdesc);
kfree(rng->sdesc);
}
rng->sdesc = NULL;
if (rng->tfm)
crypto_free_shash(rng->tfm);
rng->tfm = NULL;
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
spin_unlock(&rng->jent_lock);
}
static int jent_kcapi_init(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
struct crypto_shash *hash;
struct shash_desc *sdesc;
int size, ret = 0;
spin_lock_init(&rng->jent_lock);
/*
* Use SHA3-256 as conditioner. We allocate only the generic
* implementation as we are not interested in high-performance. The
* execution time of the SHA3 operation is measured and adds to the
* Jitter RNG's unpredictable behavior. If we have a slower hash
* implementation, the execution timing variations are larger. When
* using a fast implementation, we would need to call it more often
* as its variations are lower.
*/
hash = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(hash)) {
pr_err("Cannot allocate conditioning digest\n");
return PTR_ERR(hash);
}
rng->tfm = hash;
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
ret = -ENOMEM;
goto err;
}
sdesc->tfm = hash;
crypto_shash_init(sdesc);
rng->sdesc = sdesc;
rng->entropy_collector = jent_entropy_collector_alloc(1, 0, sdesc);
if (!rng->entropy_collector) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&rng->jent_lock);
return 0;
err:
jent_kcapi_cleanup(tfm);
return ret;
}
static int jent_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
{
struct jitterentropy *rng = crypto_rng_ctx(tfm);
int ret = 0;
spin_lock(&rng->jent_lock);
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
if (ret == -3) {
/* Handle permanent health test error */
/*
* If the kernel was booted with fips=1, it implies that
* the entire kernel acts as a FIPS 140 module. In this case
* an SP800-90B permanent health test error is treated as
* a FIPS module error.
*/
if (fips_enabled)
panic("Jitter RNG permanent health test failure\n");
pr_err("Jitter RNG permanent health test failure\n");
ret = -EFAULT;
} else if (ret == -2) {
/* Handle intermittent health test error */
pr_warn_ratelimited("Reset Jitter RNG due to intermittent health test failure\n");
ret = -EAGAIN;
} else if (ret == -1) {
/* Handle other errors */
ret = -EINVAL;
}
spin_unlock(&rng->jent_lock);
return ret;
}
static int jent_kcapi_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
return 0;
}
static struct rng_alg jent_alg = {
.generate = jent_kcapi_random,
.seed = jent_kcapi_reset,
.seedsize = 0,
.base = {
.cra_name = "jitterentropy_rng",
.cra_driver_name = "jitterentropy_rng",
.cra_priority = 100,
.cra_ctxsize = sizeof(struct jitterentropy),
.cra_module = THIS_MODULE,
.cra_init = jent_kcapi_init,
.cra_exit = jent_kcapi_cleanup,
}
};
static int __init jent_mod_init(void)
{
SHASH_DESC_ON_STACK(desc, tfm);
struct crypto_shash *tfm;
int ret = 0;
jent_testing_init();
tfm = crypto_alloc_shash(JENT_CONDITIONING_HASH, 0, 0);
if (IS_ERR(tfm)) {
jent_testing_exit();
return PTR_ERR(tfm);
}
desc->tfm = tfm;
crypto_shash_init(desc);
ret = jent_entropy_init(desc);
shash_desc_zero(desc);
crypto_free_shash(tfm);
if (ret) {
/* Handle permanent health test error */
if (fips_enabled)
panic("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
jent_testing_exit();
pr_info("jitterentropy: Initialization failed with host not compliant with requirements: %d\n", ret);
return -EFAULT;
}
return crypto_register_rng(&jent_alg);
}
static void __exit jent_mod_exit(void)
{
jent_testing_exit();
crypto_unregister_rng(&jent_alg);
}
module_init(jent_mod_init);
module_exit(jent_mod_exit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("Non-physical True Random Number Generator based on CPU Jitter");
MODULE_ALIAS_CRYPTO("jitterentropy_rng");
| linux-master | crypto/jitterentropy-kcapi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Common Blowfish algorithm parts shared between the c and assembler
* implementations.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html
*
* Adapted from Kerneli implementation.
*
* Copyright (c) Herbert Valerio Riedel <[email protected]>
* Copyright (c) Kyle McMartin <[email protected]>
* Copyright (c) 2002 James Morris <[email protected]>
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
static const u32 bf_pbox[16 + 2] = {
0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
0x9216d5d9, 0x8979fb1b,
};
static const u32 bf_sbox[256 * 4] = {
0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
};
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
/*
* The blowfish encipher, processes 64-bit blocks.
* NOTE: This function MUSTN'T respect endianess
*/
static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src)
{
const u32 *P = bctx->p;
const u32 *S = bctx->s;
u32 yl = src[0];
u32 yr = src[1];
ROUND(yr, yl, 0);
ROUND(yl, yr, 1);
ROUND(yr, yl, 2);
ROUND(yl, yr, 3);
ROUND(yr, yl, 4);
ROUND(yl, yr, 5);
ROUND(yr, yl, 6);
ROUND(yl, yr, 7);
ROUND(yr, yl, 8);
ROUND(yl, yr, 9);
ROUND(yr, yl, 10);
ROUND(yl, yr, 11);
ROUND(yr, yl, 12);
ROUND(yl, yr, 13);
ROUND(yr, yl, 14);
ROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
dst[0] = yr;
dst[1] = yl;
}
/*
* Calculates the blowfish S and P boxes for encryption and decryption.
*/
int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *P = ctx->p;
u32 *S = ctx->s;
short i, j, count;
u32 data[2], temp;
/* Copy the initialization s-boxes */
for (i = 0, count = 0; i < 256; i++)
for (j = 0; j < 4; j++, count++)
S[count] = bf_sbox[count];
/* Set the p-boxes */
for (i = 0; i < 16 + 2; i++)
P[i] = bf_pbox[i];
/* Actual subkey generation */
for (j = 0, i = 0; i < 16 + 2; i++) {
temp = (((u32)key[j] << 24) |
((u32)key[(j + 1) % keylen] << 16) |
((u32)key[(j + 2) % keylen] << 8) |
((u32)key[(j + 3) % keylen]));
P[i] = P[i] ^ temp;
j = (j + 4) % keylen;
}
data[0] = 0x00000000;
data[1] = 0x00000000;
for (i = 0; i < 16 + 2; i += 2) {
encrypt_block((struct bf_ctx *)ctx, data, data);
P[i] = data[0];
P[i + 1] = data[1];
}
for (i = 0; i < 4; i++) {
for (j = 0, count = i * 256; j < 256; j += 2, count += 2) {
encrypt_block((struct bf_ctx *)ctx, data, data);
S[count] = data[0];
S[count + 1] = data[1];
}
}
/* Bruce says not to bother with the weak key check. */
return 0;
}
EXPORT_SYMBOL_GPL(blowfish_setkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher common functions");
| linux-master | crypto/blowfish_common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019 Linaro, Ltd. <[email protected]>
*/
#ifdef CONFIG_ARM64
#include <asm/neon-intrinsics.h>
#define AES_ROUND "aese %0.16b, %1.16b \n\t aesmc %0.16b, %0.16b"
#else
#include <arm_neon.h>
#define AES_ROUND "aese.8 %q0, %q1 \n\t aesmc.8 %q0, %q0"
#endif
#define AEGIS_BLOCK_SIZE 16
#include <stddef.h>
#include "aegis-neon.h"
extern int aegis128_have_aes_insn;
void *memcpy(void *dest, const void *src, size_t n);
struct aegis128_state {
uint8x16_t v[5];
};
extern const uint8_t crypto_aes_sbox[];
static struct aegis128_state aegis128_load_state_neon(const void *state)
{
return (struct aegis128_state){ {
vld1q_u8(state),
vld1q_u8(state + 16),
vld1q_u8(state + 32),
vld1q_u8(state + 48),
vld1q_u8(state + 64)
} };
}
static void aegis128_save_state_neon(struct aegis128_state st, void *state)
{
vst1q_u8(state, st.v[0]);
vst1q_u8(state + 16, st.v[1]);
vst1q_u8(state + 32, st.v[2]);
vst1q_u8(state + 48, st.v[3]);
vst1q_u8(state + 64, st.v[4]);
}
static inline __attribute__((always_inline))
uint8x16_t aegis_aes_round(uint8x16_t w)
{
uint8x16_t z = {};
#ifdef CONFIG_ARM64
if (!__builtin_expect(aegis128_have_aes_insn, 1)) {
static const uint8_t shift_rows[] = {
0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3,
0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb,
};
static const uint8_t ror32by8[] = {
0x1, 0x2, 0x3, 0x0, 0x5, 0x6, 0x7, 0x4,
0x9, 0xa, 0xb, 0x8, 0xd, 0xe, 0xf, 0xc,
};
uint8x16_t v;
// shift rows
w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
// sub bytes
#ifndef CONFIG_CC_IS_GCC
v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w);
v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40);
v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80);
v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0);
#else
asm("tbl %0.16b, {v16.16b-v19.16b}, %1.16b" : "=w"(v) : "w"(w));
w -= 0x40;
asm("tbx %0.16b, {v20.16b-v23.16b}, %1.16b" : "+w"(v) : "w"(w));
w -= 0x40;
asm("tbx %0.16b, {v24.16b-v27.16b}, %1.16b" : "+w"(v) : "w"(w));
w -= 0x40;
asm("tbx %0.16b, {v28.16b-v31.16b}, %1.16b" : "+w"(v) : "w"(w));
#endif
// mix columns
w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b);
w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v);
w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
return w;
}
#endif
/*
* We use inline asm here instead of the vaeseq_u8/vaesmcq_u8 intrinsics
* to force the compiler to issue the aese/aesmc instructions in pairs.
* This is much faster on many cores, where the instruction pair can
* execute in a single cycle.
*/
asm(AES_ROUND : "+w"(w) : "w"(z));
return w;
}
static inline __attribute__((always_inline))
struct aegis128_state aegis128_update_neon(struct aegis128_state st,
uint8x16_t m)
{
m ^= aegis_aes_round(st.v[4]);
st.v[4] ^= aegis_aes_round(st.v[3]);
st.v[3] ^= aegis_aes_round(st.v[2]);
st.v[2] ^= aegis_aes_round(st.v[1]);
st.v[1] ^= aegis_aes_round(st.v[0]);
st.v[0] ^= m;
return st;
}
static inline __attribute__((always_inline))
void preload_sbox(void)
{
if (!IS_ENABLED(CONFIG_ARM64) ||
!IS_ENABLED(CONFIG_CC_IS_GCC) ||
__builtin_expect(aegis128_have_aes_insn, 1))
return;
asm("ld1 {v16.16b-v19.16b}, [%0], #64 \n\t"
"ld1 {v20.16b-v23.16b}, [%0], #64 \n\t"
"ld1 {v24.16b-v27.16b}, [%0], #64 \n\t"
"ld1 {v28.16b-v31.16b}, [%0] \n\t"
:: "r"(crypto_aes_sbox));
}
void crypto_aegis128_init_neon(void *state, const void *key, const void *iv)
{
static const uint8_t const0[] = {
0x00, 0x01, 0x01, 0x02, 0x03, 0x05, 0x08, 0x0d,
0x15, 0x22, 0x37, 0x59, 0x90, 0xe9, 0x79, 0x62,
};
static const uint8_t const1[] = {
0xdb, 0x3d, 0x18, 0x55, 0x6d, 0xc2, 0x2f, 0xf1,
0x20, 0x11, 0x31, 0x42, 0x73, 0xb5, 0x28, 0xdd,
};
uint8x16_t k = vld1q_u8(key);
uint8x16_t kiv = k ^ vld1q_u8(iv);
struct aegis128_state st = {{
kiv,
vld1q_u8(const1),
vld1q_u8(const0),
k ^ vld1q_u8(const0),
k ^ vld1q_u8(const1),
}};
int i;
preload_sbox();
for (i = 0; i < 5; i++) {
st = aegis128_update_neon(st, k);
st = aegis128_update_neon(st, kiv);
}
aegis128_save_state_neon(st, state);
}
void crypto_aegis128_update_neon(void *state, const void *msg)
{
struct aegis128_state st = aegis128_load_state_neon(state);
preload_sbox();
st = aegis128_update_neon(st, vld1q_u8(msg));
aegis128_save_state_neon(st, state);
}
#ifdef CONFIG_ARM
/*
* AArch32 does not provide these intrinsics natively because it does not
* implement the underlying instructions. AArch32 only provides 64-bit
* wide vtbl.8/vtbx.8 instruction, so use those instead.
*/
static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
{
union {
uint8x16_t val;
uint8x8x2_t pair;
} __a = { a };
return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
vtbl2_u8(__a.pair, vget_high_u8(b)));
}
static uint8x16_t vqtbx1q_u8(uint8x16_t v, uint8x16_t a, uint8x16_t b)
{
union {
uint8x16_t val;
uint8x8x2_t pair;
} __a = { a };
return vcombine_u8(vtbx2_u8(vget_low_u8(v), __a.pair, vget_low_u8(b)),
vtbx2_u8(vget_high_u8(v), __a.pair, vget_high_u8(b)));
}
static int8_t vminvq_s8(int8x16_t v)
{
int8x8_t s = vpmin_s8(vget_low_s8(v), vget_high_s8(v));
s = vpmin_s8(s, s);
s = vpmin_s8(s, s);
s = vpmin_s8(s, s);
return vget_lane_s8(s, 0);
}
#endif
static const uint8_t permute[] __aligned(64) = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
};
void crypto_aegis128_encrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size)
{
struct aegis128_state st = aegis128_load_state_neon(state);
const int short_input = size < AEGIS_BLOCK_SIZE;
uint8x16_t msg;
preload_sbox();
while (size >= AEGIS_BLOCK_SIZE) {
uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
msg = vld1q_u8(src);
st = aegis128_update_neon(st, msg);
msg ^= s;
vst1q_u8(dst, msg);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
if (size > 0) {
uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
uint8_t buf[AEGIS_BLOCK_SIZE];
const void *in = src;
void *out = dst;
uint8x16_t m;
if (__builtin_expect(short_input, 0))
in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size);
m = vqtbl1q_u8(vld1q_u8(in + size - AEGIS_BLOCK_SIZE),
vld1q_u8(permute + 32 - size));
st = aegis128_update_neon(st, m);
vst1q_u8(out + size - AEGIS_BLOCK_SIZE,
vqtbl1q_u8(m ^ s, vld1q_u8(permute + size)));
if (__builtin_expect(short_input, 0))
memcpy(dst, out, size);
else
vst1q_u8(out - AEGIS_BLOCK_SIZE, msg);
}
aegis128_save_state_neon(st, state);
}
void crypto_aegis128_decrypt_chunk_neon(void *state, void *dst, const void *src,
unsigned int size)
{
struct aegis128_state st = aegis128_load_state_neon(state);
const int short_input = size < AEGIS_BLOCK_SIZE;
uint8x16_t msg;
preload_sbox();
while (size >= AEGIS_BLOCK_SIZE) {
msg = vld1q_u8(src) ^ st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
st = aegis128_update_neon(st, msg);
vst1q_u8(dst, msg);
size -= AEGIS_BLOCK_SIZE;
src += AEGIS_BLOCK_SIZE;
dst += AEGIS_BLOCK_SIZE;
}
if (size > 0) {
uint8x16_t s = st.v[1] ^ (st.v[2] & st.v[3]) ^ st.v[4];
uint8_t buf[AEGIS_BLOCK_SIZE];
const void *in = src;
void *out = dst;
uint8x16_t m;
if (__builtin_expect(short_input, 0))
in = out = memcpy(buf + AEGIS_BLOCK_SIZE - size, src, size);
m = s ^ vqtbx1q_u8(s, vld1q_u8(in + size - AEGIS_BLOCK_SIZE),
vld1q_u8(permute + 32 - size));
st = aegis128_update_neon(st, m);
vst1q_u8(out + size - AEGIS_BLOCK_SIZE,
vqtbl1q_u8(m, vld1q_u8(permute + size)));
if (__builtin_expect(short_input, 0))
memcpy(dst, out, size);
else
vst1q_u8(out - AEGIS_BLOCK_SIZE, msg);
}
aegis128_save_state_neon(st, state);
}
int crypto_aegis128_final_neon(void *state, void *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize)
{
struct aegis128_state st = aegis128_load_state_neon(state);
uint8x16_t v;
int i;
preload_sbox();
v = st.v[3] ^ (uint8x16_t)vcombine_u64(vmov_n_u64(8ULL * assoclen),
vmov_n_u64(8ULL * cryptlen));
for (i = 0; i < 7; i++)
st = aegis128_update_neon(st, v);
v = st.v[0] ^ st.v[1] ^ st.v[2] ^ st.v[3] ^ st.v[4];
if (authsize > 0) {
v = vqtbl1q_u8(~vceqq_u8(v, vld1q_u8(tag_xor)),
vld1q_u8(permute + authsize));
return vminvq_s8((int8x16_t)v);
}
vst1q_u8(tag_xor, v);
return 0;
}
| linux-master | crypto/aegis128-neon-inner.c |
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = {
0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
};
EXPORT_SYMBOL_GPL(md5_zero_message_hash);
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, in, s) \
(w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
static void md5_transform(__u32 *hash, __u32 const *in)
{
u32 a, b, c, d;
a = hash[0];
b = hash[1];
c = hash[2];
d = hash[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
hash[0] += a;
hash[1] += b;
hash[2] += c;
hash[3] += d;
}
static inline void md5_transform_helper(struct md5_state *ctx)
{
le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32));
md5_transform(ctx->hash, ctx->block);
}
static int md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = MD5_H0;
mctx->hash[1] = MD5_H1;
mctx->hash[2] = MD5_H2;
mctx->hash[3] = MD5_H3;
mctx->byte_count = 0;
return 0;
}
static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return 0;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, avail);
md5_transform_helper(mctx);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
memcpy(mctx->block, data, sizeof(mctx->block));
md5_transform_helper(mctx);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
memcpy(mctx->block, data, len);
return 0;
}
static int md5_final(struct shash_desc *desc, u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
md5_transform_helper(mctx);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
sizeof(u64)) / sizeof(u32));
md5_transform(mctx->hash, mctx->block);
cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32));
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
return 0;
}
static int md5_export(struct shash_desc *desc, void *out)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(out, ctx, sizeof(*ctx));
return 0;
}
static int md5_import(struct shash_desc *desc, const void *in)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_init,
.update = md5_update,
.final = md5_final,
.export = md5_export,
.import = md5_import,
.descsize = sizeof(struct md5_state),
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-generic",
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init md5_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit md5_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
MODULE_ALIAS_CRYPTO("md5");
| linux-master | crypto/md5.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Asynchronous Cryptographic Hash operations.
*
* This is the asynchronous version of hash.c with notification of
* completion via a callback.
*
* Copyright (c) 2008 Loc Ho <[email protected]>
*/
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "hash.h"
static const struct crypto_type crypto_ahash_type;
struct ahash_request_priv {
crypto_completion_t complete;
void *data;
u8 *result;
u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
static int hash_walk_next(struct crypto_hash_walk *walk)
{
unsigned int alignmask = walk->alignmask;
unsigned int offset = walk->offset;
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
walk->data = kmap_local_page(walk->pg);
walk->data += offset;
if (offset & alignmask) {
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
if (nbytes > unaligned)
nbytes = unaligned;
}
walk->entrylen -= nbytes;
return nbytes;
}
static int hash_walk_new_entry(struct crypto_hash_walk *walk)
{
struct scatterlist *sg;
sg = walk->sg;
walk->offset = sg->offset;
walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
walk->offset = offset_in_page(walk->offset);
walk->entrylen = sg->length;
if (walk->entrylen > walk->total)
walk->entrylen = walk->total;
walk->total -= walk->entrylen;
return hash_walk_next(walk);
}
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
walk->data -= walk->offset;
if (walk->entrylen && (walk->offset & alignmask) && !err) {
unsigned int nbytes;
walk->offset = ALIGN(walk->offset, alignmask + 1);
nbytes = min(walk->entrylen,
(unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
}
kunmap_local(walk->data);
crypto_yield(walk->flags);
if (err)
return err;
if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
}
if (!walk->total)
return 0;
walk->sg = sg_next(walk->sg);
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk)
{
walk->total = req->nbytes;
if (!walk->total) {
walk->entrylen = 0;
return 0;
}
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
walk->flags = req->base.flags;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = tfm->setkey(tfm, alignbuffer, keylen);
kfree_sensitive(buffer);
return ret;
}
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
static void ahash_set_needkey(struct crypto_ahash *tfm)
{
const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (tfm->setkey != ahash_nosetkey &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int err;
if ((unsigned long)key & alignmask)
err = ahash_setkey_unaligned(tfm, key, keylen);
else
err = tfm->setkey(tfm, key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm);
return err;
}
crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
unsigned int ds = crypto_ahash_digestsize(tfm);
struct ahash_request *subreq;
unsigned int subreq_size;
unsigned int reqsize;
u8 *result;
gfp_t gfp;
u32 flags;
subreq_size = sizeof(*subreq);
reqsize = crypto_ahash_reqsize(tfm);
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
subreq_size += reqsize;
subreq_size += ds;
subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
flags = ahash_request_flags(req);
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
subreq = kmalloc(subreq_size, gfp);
if (!subreq)
return -ENOMEM;
ahash_request_set_tfm(subreq, tfm);
ahash_request_set_callback(subreq, flags, cplt, req);
result = (u8 *)(subreq + 1) + reqsize;
result = PTR_ALIGN(result, alignmask + 1);
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
if (has_state) {
void *state;
state = kmalloc(crypto_ahash_statesize(tfm), gfp);
if (!state) {
kfree(subreq);
return -ENOMEM;
}
crypto_ahash_export(req, state);
crypto_ahash_import(subreq, state);
kfree_sensitive(state);
}
req->priv = subreq;
return 0;
}
static void ahash_restore_req(struct ahash_request *req, int err)
{
struct ahash_request *subreq = req->priv;
if (!err)
memcpy(req->result, subreq->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
req->priv = NULL;
kfree_sensitive(subreq);
}
static void ahash_op_unaligned_done(void *data, int err)
{
struct ahash_request *areq = data;
if (err == -EINPROGRESS)
goto out;
/* First copy req->result into req->priv.result */
ahash_restore_req(areq, err);
out:
/* Complete the ORIGINAL request. */
ahash_request_complete(areq, err);
}
static int ahash_op_unaligned(struct ahash_request *req,
int (*op)(struct ahash_request *),
bool has_state)
{
int err;
err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
if (err)
return err;
err = op(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
ahash_restore_req(req, err);
return err;
}
static int crypto_ahash_op(struct ahash_request *req,
int (*op)(struct ahash_request *),
bool has_state)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
unsigned long alignmask = crypto_ahash_alignmask(tfm);
int err;
if ((unsigned long)req->result & alignmask)
err = ahash_op_unaligned(req, op, has_state);
else
err = op(req);
return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
}
int crypto_ahash_final(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
return crypto_ahash_op(req, tfm->final, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = hash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
return crypto_ahash_op(req, tfm->finup, true);
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = hash_get_stat(alg);
atomic64_inc(&istat->hash_cnt);
atomic64_add(req->nbytes, &istat->hash_tlen);
}
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return crypto_hash_errstat(alg, -ENOKEY);
return crypto_ahash_op(req, tfm->digest, false);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
static void ahash_def_finup_done2(void *data, int err)
{
struct ahash_request *areq = data;
if (err == -EINPROGRESS)
return;
ahash_restore_req(areq, err);
ahash_request_complete(areq, err);
}
static int ahash_def_finup_finish1(struct ahash_request *req, int err)
{
struct ahash_request *subreq = req->priv;
if (err)
goto out;
subreq->base.complete = ahash_def_finup_done2;
err = crypto_ahash_reqtfm(req)->final(subreq);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
out:
ahash_restore_req(req, err);
return err;
}
static void ahash_def_finup_done1(void *data, int err)
{
struct ahash_request *areq = data;
struct ahash_request *subreq;
if (err == -EINPROGRESS)
goto out;
subreq = areq->priv;
subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = ahash_def_finup_finish1(areq, err);
if (err == -EINPROGRESS || err == -EBUSY)
return;
out:
ahash_request_complete(areq, err);
}
static int ahash_def_finup(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
int err;
err = ahash_save_req(req, ahash_def_finup_done1, true);
if (err)
return err;
err = tfm->update(req->priv);
if (err == -EINPROGRESS || err == -EBUSY)
return err;
return ahash_def_finup_finish1(req, err);
}
static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
alg->exit_tfm(hash);
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
crypto_ahash_set_statesize(hash, alg->halg.statesize);
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);
hash->init = alg->init;
hash->update = alg->update;
hash->final = alg->final;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
hash->export = alg->export;
hash->import = alg->import;
if (alg->setkey) {
hash->setkey = alg->setkey;
ahash_set_needkey(hash);
}
if (alg->exit_tfm)
tfm->exit = crypto_ahash_exit_tfm;
return alg->init_tfm ? alg->init_tfm(hash) : 0;
}
static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
{
if (alg->cra_type != &crypto_ahash_type)
return sizeof(struct crypto_shash *);
return crypto_alg_extsize(alg);
}
static void crypto_ahash_free_instance(struct crypto_instance *inst)
{
struct ahash_instance *ahash = ahash_instance(inst);
ahash->free(ahash);
}
static int __maybe_unused crypto_ahash_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
memset(&rhash, 0, sizeof(rhash));
strscpy(rhash.type, "ahash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : ahash\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n",
__crypto_hash_alg_common(alg)->digestsize);
}
static int __maybe_unused crypto_ahash_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
return crypto_hash_report_stat(skb, alg, "ahash");
}
static const struct crypto_type crypto_ahash_type = {
.extsize = crypto_ahash_extsize,
.init_tfm = crypto_ahash_init_tfm,
.free = crypto_ahash_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_ahash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_ahash_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
.type = CRYPTO_ALG_TYPE_AHASH,
.tfmsize = offsetof(struct crypto_ahash, base),
};
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_ahash_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_ahash);
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_ahash);
struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
{
struct hash_alg_common *halg = crypto_hash_alg_common(hash);
struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
struct crypto_ahash *nhash;
struct ahash_alg *alg;
int err;
if (!crypto_hash_alg_has_setkey(halg)) {
tfm = crypto_tfm_get(tfm);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
return hash;
}
nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
if (IS_ERR(nhash))
return nhash;
nhash->init = hash->init;
nhash->update = hash->update;
nhash->final = hash->final;
nhash->finup = hash->finup;
nhash->digest = hash->digest;
nhash->export = hash->export;
nhash->import = hash->import;
nhash->setkey = hash->setkey;
nhash->reqsize = hash->reqsize;
nhash->statesize = hash->statesize;
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_clone_shash_ops_async(nhash, hash);
err = -ENOSYS;
alg = crypto_ahash_alg(hash);
if (!alg->clone_tfm)
goto out_free_nhash;
err = alg->clone_tfm(nhash, hash);
if (err)
goto out_free_nhash;
return nhash;
out_free_nhash:
crypto_free_ahash(nhash);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_clone_ahash);
static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
if (alg->halg.statesize == 0)
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
base->cra_type = &crypto_ahash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
return 0;
}
int crypto_register_ahash(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
err = ahash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_ahash);
void crypto_unregister_ahash(struct ahash_alg *alg)
{
crypto_unregister_alg(&alg->halg.base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
int crypto_register_ahashes(struct ahash_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_ahash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_ahash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_ahashes);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst)
{
int err;
if (WARN_ON(!inst->free))
return -EINVAL;
err = ahash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(ahash_register_instance);
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
{
struct crypto_alg *alg = &halg->base;
if (alg->cra_type != &crypto_ahash_type)
return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
return __crypto_ahash_alg(alg)->setkey != NULL;
}
EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
| linux-master | crypto/ahash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API
*
* ARC4 Cipher Algorithm
*
* Jon Oberheide <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
return arc4_setkey(ctx, in_key, key_len);
}
static int crypto_arc4_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_arc4_init(struct crypto_skcipher *tfm)
{
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete ecb(arc4) skcipher\n",
current->comm, (unsigned long)current->pid);
return 0;
}
static struct skcipher_alg arc4_alg = {
/*
* For legacy reasons, this is named "ecb(arc4)", not "arc4".
* Nevertheless it's actually a stream cipher, not a block cipher.
*/
.base.cra_name = "ecb(arc4)",
.base.cra_driver_name = "ecb(arc4)-generic",
.base.cra_priority = 100,
.base.cra_blocksize = ARC4_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct arc4_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = ARC4_MIN_KEY_SIZE,
.max_keysize = ARC4_MAX_KEY_SIZE,
.setkey = crypto_arc4_setkey,
.encrypt = crypto_arc4_crypt,
.decrypt = crypto_arc4_crypt,
.init = crypto_arc4_init,
};
static int __init arc4_init(void)
{
return crypto_register_skcipher(&arc4_alg);
}
static void __exit arc4_exit(void)
{
crypto_unregister_skcipher(&arc4_alg);
}
subsys_initcall(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
MODULE_AUTHOR("Jon Oberheide <[email protected]>");
MODULE_ALIAS_CRYPTO("ecb(arc4)");
| linux-master | crypto/arc4.c |
// SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause
/*
* Streebog hash function as specified by GOST R 34.11-2012 and
* described at https://tools.ietf.org/html/rfc6986
*
* Copyright (c) 2013 Alexey Degtyarev <[email protected]>
* Copyright (c) 2018 Vitaly Chikunov <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/streebog.h>
static const struct streebog_uint512 buffer0 = { {
0, 0, 0, 0, 0, 0, 0, 0
} };
static const struct streebog_uint512 buffer512 = { {
cpu_to_le64(0x200), 0, 0, 0, 0, 0, 0, 0
} };
static const struct streebog_uint512 C[12] = {
{ {
cpu_to_le64(0xdd806559f2a64507ULL),
cpu_to_le64(0x05767436cc744d23ULL),
cpu_to_le64(0xa2422a08a460d315ULL),
cpu_to_le64(0x4b7ce09192676901ULL),
cpu_to_le64(0x714eb88d7585c4fcULL),
cpu_to_le64(0x2f6a76432e45d016ULL),
cpu_to_le64(0xebcb2f81c0657c1fULL),
cpu_to_le64(0xb1085bda1ecadae9ULL)
} },
{ {
cpu_to_le64(0xe679047021b19bb7ULL),
cpu_to_le64(0x55dda21bd7cbcd56ULL),
cpu_to_le64(0x5cb561c2db0aa7caULL),
cpu_to_le64(0x9ab5176b12d69958ULL),
cpu_to_le64(0x61d55e0f16b50131ULL),
cpu_to_le64(0xf3feea720a232b98ULL),
cpu_to_le64(0x4fe39d460f70b5d7ULL),
cpu_to_le64(0x6fa3b58aa99d2f1aULL)
} },
{ {
cpu_to_le64(0x991e96f50aba0ab2ULL),
cpu_to_le64(0xc2b6f443867adb31ULL),
cpu_to_le64(0xc1c93a376062db09ULL),
cpu_to_le64(0xd3e20fe490359eb1ULL),
cpu_to_le64(0xf2ea7514b1297b7bULL),
cpu_to_le64(0x06f15e5f529c1f8bULL),
cpu_to_le64(0x0a39fc286a3d8435ULL),
cpu_to_le64(0xf574dcac2bce2fc7ULL)
} },
{ {
cpu_to_le64(0x220cbebc84e3d12eULL),
cpu_to_le64(0x3453eaa193e837f1ULL),
cpu_to_le64(0xd8b71333935203beULL),
cpu_to_le64(0xa9d72c82ed03d675ULL),
cpu_to_le64(0x9d721cad685e353fULL),
cpu_to_le64(0x488e857e335c3c7dULL),
cpu_to_le64(0xf948e1a05d71e4ddULL),
cpu_to_le64(0xef1fdfb3e81566d2ULL)
} },
{ {
cpu_to_le64(0x601758fd7c6cfe57ULL),
cpu_to_le64(0x7a56a27ea9ea63f5ULL),
cpu_to_le64(0xdfff00b723271a16ULL),
cpu_to_le64(0xbfcd1747253af5a3ULL),
cpu_to_le64(0x359e35d7800fffbdULL),
cpu_to_le64(0x7f151c1f1686104aULL),
cpu_to_le64(0x9a3f410c6ca92363ULL),
cpu_to_le64(0x4bea6bacad474799ULL)
} },
{ {
cpu_to_le64(0xfa68407a46647d6eULL),
cpu_to_le64(0xbf71c57236904f35ULL),
cpu_to_le64(0x0af21f66c2bec6b6ULL),
cpu_to_le64(0xcffaa6b71c9ab7b4ULL),
cpu_to_le64(0x187f9ab49af08ec6ULL),
cpu_to_le64(0x2d66c4f95142a46cULL),
cpu_to_le64(0x6fa4c33b7a3039c0ULL),
cpu_to_le64(0xae4faeae1d3ad3d9ULL)
} },
{ {
cpu_to_le64(0x8886564d3a14d493ULL),
cpu_to_le64(0x3517454ca23c4af3ULL),
cpu_to_le64(0x06476983284a0504ULL),
cpu_to_le64(0x0992abc52d822c37ULL),
cpu_to_le64(0xd3473e33197a93c9ULL),
cpu_to_le64(0x399ec6c7e6bf87c9ULL),
cpu_to_le64(0x51ac86febf240954ULL),
cpu_to_le64(0xf4c70e16eeaac5ecULL)
} },
{ {
cpu_to_le64(0xa47f0dd4bf02e71eULL),
cpu_to_le64(0x36acc2355951a8d9ULL),
cpu_to_le64(0x69d18d2bd1a5c42fULL),
cpu_to_le64(0xf4892bcb929b0690ULL),
cpu_to_le64(0x89b4443b4ddbc49aULL),
cpu_to_le64(0x4eb7f8719c36de1eULL),
cpu_to_le64(0x03e7aa020c6e4141ULL),
cpu_to_le64(0x9b1f5b424d93c9a7ULL)
} },
{ {
cpu_to_le64(0x7261445183235adbULL),
cpu_to_le64(0x0e38dc92cb1f2a60ULL),
cpu_to_le64(0x7b2b8a9aa6079c54ULL),
cpu_to_le64(0x800a440bdbb2ceb1ULL),
cpu_to_le64(0x3cd955b7e00d0984ULL),
cpu_to_le64(0x3a7d3a1b25894224ULL),
cpu_to_le64(0x944c9ad8ec165fdeULL),
cpu_to_le64(0x378f5a541631229bULL)
} },
{ {
cpu_to_le64(0x74b4c7fb98459cedULL),
cpu_to_le64(0x3698fad1153bb6c3ULL),
cpu_to_le64(0x7a1e6c303b7652f4ULL),
cpu_to_le64(0x9fe76702af69334bULL),
cpu_to_le64(0x1fffe18a1b336103ULL),
cpu_to_le64(0x8941e71cff8a78dbULL),
cpu_to_le64(0x382ae548b2e4f3f3ULL),
cpu_to_le64(0xabbedea680056f52ULL)
} },
{ {
cpu_to_le64(0x6bcaa4cd81f32d1bULL),
cpu_to_le64(0xdea2594ac06fd85dULL),
cpu_to_le64(0xefbacd1d7d476e98ULL),
cpu_to_le64(0x8a1d71efea48b9caULL),
cpu_to_le64(0x2001802114846679ULL),
cpu_to_le64(0xd8fa6bbbebab0761ULL),
cpu_to_le64(0x3002c6cd635afe94ULL),
cpu_to_le64(0x7bcd9ed0efc889fbULL)
} },
{ {
cpu_to_le64(0x48bc924af11bd720ULL),
cpu_to_le64(0xfaf417d5d9b21b99ULL),
cpu_to_le64(0xe71da4aa88e12852ULL),
cpu_to_le64(0x5d80ef9d1891cc86ULL),
cpu_to_le64(0xf82012d430219f9bULL),
cpu_to_le64(0xcda43c32bcdf1d77ULL),
cpu_to_le64(0xd21380b00449b17aULL),
cpu_to_le64(0x378ee767f11631baULL)
} }
};
static const unsigned long long Ax[8][256] = {
{
0xd01f715b5c7ef8e6ULL, 0x16fa240980778325ULL, 0xa8a42e857ee049c8ULL,
0x6ac1068fa186465bULL, 0x6e417bd7a2e9320bULL, 0x665c8167a437daabULL,
0x7666681aa89617f6ULL, 0x4b959163700bdcf5ULL, 0xf14be6b78df36248ULL,
0xc585bd689a625cffULL, 0x9557d7fca67d82cbULL, 0x89f0b969af6dd366ULL,
0xb0833d48749f6c35ULL, 0xa1998c23b1ecbc7cULL, 0x8d70c431ac02a736ULL,
0xd6dfbc2fd0a8b69eULL, 0x37aeb3e551fa198bULL, 0x0b7d128a40b5cf9cULL,
0x5a8f2008b5780cbcULL, 0xedec882284e333e5ULL, 0xd25fc177d3c7c2ceULL,
0x5e0f5d50b61778ecULL, 0x1d873683c0c24cb9ULL, 0xad040bcbb45d208cULL,
0x2f89a0285b853c76ULL, 0x5732fff6791b8d58ULL, 0x3e9311439ef6ec3fULL,
0xc9183a809fd3c00fULL, 0x83adf3f5260a01eeULL, 0xa6791941f4e8ef10ULL,
0x103ae97d0ca1cd5dULL, 0x2ce948121dee1b4aULL, 0x39738421dbf2bf53ULL,
0x093da2a6cf0cf5b4ULL, 0xcd9847d89cbcb45fULL, 0xf9561c078b2d8ae8ULL,
0x9c6a755a6971777fULL, 0xbc1ebaa0712ef0c5ULL, 0x72e61542abf963a6ULL,
0x78bb5fde229eb12eULL, 0x14ba94250fceb90dULL, 0x844d6697630e5282ULL,
0x98ea08026a1e032fULL, 0xf06bbea144217f5cULL, 0xdb6263d11ccb377aULL,
0x641c314b2b8ee083ULL, 0x320e96ab9b4770cfULL, 0x1ee7deb986a96b85ULL,
0xe96cf57a878c47b5ULL, 0xfdd6615f8842feb8ULL, 0xc83862965601dd1bULL,
0x2ea9f83e92572162ULL, 0xf876441142ff97fcULL, 0xeb2c455608357d9dULL,
0x5612a7e0b0c9904cULL, 0x6c01cbfb2d500823ULL, 0x4548a6a7fa037a2dULL,
0xabc4c6bf388b6ef4ULL, 0xbade77d4fdf8bebdULL, 0x799b07c8eb4cac3aULL,
0x0c9d87e805b19cf0ULL, 0xcb588aac106afa27ULL, 0xea0c1d40c1e76089ULL,
0x2869354a1e816f1aULL, 0xff96d17307fbc490ULL, 0x9f0a9d602f1a5043ULL,
0x96373fc6e016a5f7ULL, 0x5292dab8b3a6e41cULL, 0x9b8ae0382c752413ULL,
0x4f15ec3b7364a8a5ULL, 0x3fb349555724f12bULL, 0xc7c50d4415db66d7ULL,
0x92b7429ee379d1a7ULL, 0xd37f99611a15dfdaULL, 0x231427c05e34a086ULL,
0xa439a96d7b51d538ULL, 0xb403401077f01865ULL, 0xdda2aea5901d7902ULL,
0x0a5d4a9c8967d288ULL, 0xc265280adf660f93ULL, 0x8bb0094520d4e94eULL,
0x2a29856691385532ULL, 0x42a833c5bf072941ULL, 0x73c64d54622b7eb2ULL,
0x07e095624504536cULL, 0x8a905153e906f45aULL, 0x6f6123c16b3b2f1fULL,
0xc6e55552dc097bc3ULL, 0x4468feb133d16739ULL, 0xe211e7f0c7398829ULL,
0xa2f96419f7879b40ULL, 0x19074bdbc3ad38e9ULL, 0xf4ebc3f9474e0b0cULL,
0x43886bd376d53455ULL, 0xd8028beb5aa01046ULL, 0x51f23282f5cdc320ULL,
0xe7b1c2be0d84e16dULL, 0x081dfab006dee8a0ULL, 0x3b33340d544b857bULL,
0x7f5bcabc679ae242ULL, 0x0edd37c48a08a6d8ULL, 0x81ed43d9a9b33bc6ULL,
0xb1a3655ebd4d7121ULL, 0x69a1eeb5e7ed6167ULL, 0xf6ab73d5c8f73124ULL,
0x1a67a3e185c61fd5ULL, 0x2dc91004d43c065eULL, 0x0240b02c8fb93a28ULL,
0x90f7f2b26cc0eb8fULL, 0x3cd3a16f114fd617ULL, 0xaae49ea9f15973e0ULL,
0x06c0cd748cd64e78ULL, 0xda423bc7d5192a6eULL, 0xc345701c16b41287ULL,
0x6d2193ede4821537ULL, 0xfcf639494190e3acULL, 0x7c3b228621f1c57eULL,
0xfb16ac2b0494b0c0ULL, 0xbf7e529a3745d7f9ULL, 0x6881b6a32e3f7c73ULL,
0xca78d2bad9b8e733ULL, 0xbbfe2fc2342aa3a9ULL, 0x0dbddffecc6381e4ULL,
0x70a6a56e2440598eULL, 0xe4d12a844befc651ULL, 0x8c509c2765d0ba22ULL,
0xee8c6018c28814d9ULL, 0x17da7c1f49a59e31ULL, 0x609c4c1328e194d3ULL,
0xb3e3d57232f44b09ULL, 0x91d7aaa4a512f69bULL, 0x0ffd6fd243dabbccULL,
0x50d26a943c1fde34ULL, 0x6be15e9968545b4fULL, 0x94778fea6faf9fdfULL,
0x2b09dd7058ea4826ULL, 0x677cd9716de5c7bfULL, 0x49d5214fffb2e6ddULL,
0x0360e83a466b273cULL, 0x1fc786af4f7b7691ULL, 0xa0b9d435783ea168ULL,
0xd49f0c035f118cb6ULL, 0x01205816c9d21d14ULL, 0xac2453dd7d8f3d98ULL,
0x545217cc3f70aa64ULL, 0x26b4028e9489c9c2ULL, 0xdec2469fd6765e3eULL,
0x04807d58036f7450ULL, 0xe5f17292823ddb45ULL, 0xf30b569b024a5860ULL,
0x62dcfc3fa758aefbULL, 0xe84cad6c4e5e5aa1ULL, 0xccb81fce556ea94bULL,
0x53b282ae7a74f908ULL, 0x1b47fbf74c1402c1ULL, 0x368eebf39828049fULL,
0x7afbeff2ad278b06ULL, 0xbe5e0a8cfe97caedULL, 0xcfd8f7f413058e77ULL,
0xf78b2bc301252c30ULL, 0x4d555c17fcdd928dULL, 0x5f2f05467fc565f8ULL,
0x24f4b2a21b30f3eaULL, 0x860dd6bbecb768aaULL, 0x4c750401350f8f99ULL,
0x0000000000000000ULL, 0xecccd0344d312ef1ULL, 0xb5231806be220571ULL,
0xc105c030990d28afULL, 0x653c695de25cfd97ULL, 0x159acc33c61ca419ULL,
0xb89ec7f872418495ULL, 0xa9847693b73254dcULL, 0x58cf90243ac13694ULL,
0x59efc832f3132b80ULL, 0x5c4fed7c39ae42c4ULL, 0x828dabe3efd81cfaULL,
0xd13f294d95ace5f2ULL, 0x7d1b7a90e823d86aULL, 0xb643f03cf849224dULL,
0x3df3f979d89dcb03ULL, 0x7426d836272f2ddeULL, 0xdfe21e891fa4432aULL,
0x3a136c1b9d99986fULL, 0xfa36f43dcd46add4ULL, 0xc025982650df35bbULL,
0x856d3e81aadc4f96ULL, 0xc4a5e57e53b041ebULL, 0x4708168b75ba4005ULL,
0xaf44bbe73be41aa4ULL, 0x971767d029c4b8e3ULL, 0xb9be9feebb939981ULL,
0x215497ecd18d9aaeULL, 0x316e7e91dd2c57f3ULL, 0xcef8afe2dad79363ULL,
0x3853dc371220a247ULL, 0x35ee03c9de4323a3ULL, 0xe6919aa8c456fc79ULL,
0xe05157dc4880b201ULL, 0x7bdbb7e464f59612ULL, 0x127a59518318f775ULL,
0x332ecebd52956ddbULL, 0x8f30741d23bb9d1eULL, 0xd922d3fd93720d52ULL,
0x7746300c61440ae2ULL, 0x25d4eab4d2e2eefeULL, 0x75068020eefd30caULL,
0x135a01474acaea61ULL, 0x304e268714fe4ae7ULL, 0xa519f17bb283c82cULL,
0xdc82f6b359cf6416ULL, 0x5baf781e7caa11a8ULL, 0xb2c38d64fb26561dULL,
0x34ce5bdf17913eb7ULL, 0x5d6fb56af07c5fd0ULL, 0x182713cd0a7f25fdULL,
0x9e2ac576e6c84d57ULL, 0x9aaab82ee5a73907ULL, 0xa3d93c0f3e558654ULL,
0x7e7b92aaae48ff56ULL, 0x872d8ead256575beULL, 0x41c8dbfff96c0e7dULL,
0x99ca5014a3cc1e3bULL, 0x40e883e930be1369ULL, 0x1ca76e95091051adULL,
0x4e35b42dbab6b5b1ULL, 0x05a0254ecabd6944ULL, 0xe1710fca8152af15ULL,
0xf22b0e8dcb984574ULL, 0xb763a82a319b3f59ULL, 0x63fca4296e8ab3efULL,
0x9d4a2d4ca0a36a6bULL, 0xe331bfe60eeb953dULL, 0xd5bf541596c391a2ULL,
0xf5cb9bef8e9c1618ULL, 0x46284e9dbc685d11ULL, 0x2074cffa185f87baULL,
0xbd3ee2b6b8fcedd1ULL, 0xae64e3f1f23607b0ULL, 0xfeb68965ce29d984ULL,
0x55724fdaf6a2b770ULL, 0x29496d5cd753720eULL, 0xa75941573d3af204ULL,
0x8e102c0bea69800aULL, 0x111ab16bc573d049ULL, 0xd7ffe439197aab8aULL,
0xefac380e0b5a09cdULL, 0x48f579593660fbc9ULL, 0x22347fd697e6bd92ULL,
0x61bc1405e13389c7ULL, 0x4ab5c975b9d9c1e1ULL, 0x80cd1bcf606126d2ULL,
0x7186fd78ed92449aULL, 0x93971a882aabccb3ULL, 0x88d0e17f66bfce72ULL,
0x27945a985d5bd4d6ULL
}, {
0xde553f8c05a811c8ULL, 0x1906b59631b4f565ULL, 0x436e70d6b1964ff7ULL,
0x36d343cb8b1e9d85ULL, 0x843dfacc858aab5aULL, 0xfdfc95c299bfc7f9ULL,
0x0f634bdea1d51fa2ULL, 0x6d458b3b76efb3cdULL, 0x85c3f77cf8593f80ULL,
0x3c91315fbe737cb2ULL, 0x2148b03366ace398ULL, 0x18f8b8264c6761bfULL,
0xc830c1c495c9fb0fULL, 0x981a76102086a0aaULL, 0xaa16012142f35760ULL,
0x35cc54060c763cf6ULL, 0x42907d66cc45db2dULL, 0x8203d44b965af4bcULL,
0x3d6f3cefc3a0e868ULL, 0xbc73ff69d292bda7ULL, 0x8722ed0102e20a29ULL,
0x8f8185e8cd34deb7ULL, 0x9b0561dda7ee01d9ULL, 0x5335a0193227fad6ULL,
0xc9cecc74e81a6fd5ULL, 0x54f5832e5c2431eaULL, 0x99e47ba05d553470ULL,
0xf7bee756acd226ceULL, 0x384e05a5571816fdULL, 0xd1367452a47d0e6aULL,
0xf29fde1c386ad85bULL, 0x320c77316275f7caULL, 0xd0c879e2d9ae9ab0ULL,
0xdb7406c69110ef5dULL, 0x45505e51a2461011ULL, 0xfc029872e46c5323ULL,
0xfa3cb6f5f7bc0cc5ULL, 0x031f17cd8768a173ULL, 0xbd8df2d9af41297dULL,
0x9d3b4f5ab43e5e3fULL, 0x4071671b36feee84ULL, 0x716207e7d3e3b83dULL,
0x48d20ff2f9283a1aULL, 0x27769eb4757cbc7eULL, 0x5c56ebc793f2e574ULL,
0xa48b474f9ef5dc18ULL, 0x52cbada94ff46e0cULL, 0x60c7da982d8199c6ULL,
0x0e9d466edc068b78ULL, 0x4eec2175eaf865fcULL, 0x550b8e9e21f7a530ULL,
0x6b7ba5bc653fec2bULL, 0x5eb7f1ba6949d0ddULL, 0x57ea94e3db4c9099ULL,
0xf640eae6d101b214ULL, 0xdd4a284182c0b0bbULL, 0xff1d8fbf6304f250ULL,
0xb8accb933bf9d7e8ULL, 0xe8867c478eb68c4dULL, 0x3f8e2692391bddc1ULL,
0xcb2fd60912a15a7cULL, 0xaec935dbab983d2fULL, 0xf55ffd2b56691367ULL,
0x80e2ce366ce1c115ULL, 0x179bf3f8edb27e1dULL, 0x01fe0db07dd394daULL,
0xda8a0b76ecc37b87ULL, 0x44ae53e1df9584cbULL, 0xb310b4b77347a205ULL,
0xdfab323c787b8512ULL, 0x3b511268d070b78eULL, 0x65e6e3d2b9396753ULL,
0x6864b271e2574d58ULL, 0x259784c98fc789d7ULL, 0x02e11a7dfabb35a9ULL,
0x8841a6dfa337158bULL, 0x7ade78c39b5dcdd0ULL, 0xb7cf804d9a2cc84aULL,
0x20b6bd831b7f7742ULL, 0x75bd331d3a88d272ULL, 0x418f6aab4b2d7a5eULL,
0xd9951cbb6babdaf4ULL, 0xb6318dfde7ff5c90ULL, 0x1f389b112264aa83ULL,
0x492c024284fbaec0ULL, 0xe33a0363c608f9a0ULL, 0x2688930408af28a4ULL,
0xc7538a1a341ce4adULL, 0x5da8e677ee2171aeULL, 0x8c9e92254a5c7fc4ULL,
0x63d8cd55aae938b5ULL, 0x29ebd8daa97a3706ULL, 0x959827b37be88aa1ULL,
0x1484e4356adadf6eULL, 0xa7945082199d7d6bULL, 0xbf6ce8a455fa1cd4ULL,
0x9cc542eac9edcae5ULL, 0x79c16f0e1c356ca3ULL, 0x89bfab6fdee48151ULL,
0xd4174d1830c5f0ffULL, 0x9258048415eb419dULL, 0x6139d72850520d1cULL,
0x6a85a80c18ec78f1ULL, 0xcd11f88e0171059aULL, 0xcceff53e7ca29140ULL,
0xd229639f2315af19ULL, 0x90b91ef9ef507434ULL, 0x5977d28d074a1be1ULL,
0x311360fce51d56b9ULL, 0xc093a92d5a1f2f91ULL, 0x1a19a25bb6dc5416ULL,
0xeb996b8a09de2d3eULL, 0xfee3820f1ed7668aULL, 0xd7085ad5b7ad518cULL,
0x7fff41890fe53345ULL, 0xec5948bd67dde602ULL, 0x2fd5f65dbaaa68e0ULL,
0xa5754affe32648c2ULL, 0xf8ddac880d07396cULL, 0x6fa491468c548664ULL,
0x0c7c5c1326bdbed1ULL, 0x4a33158f03930fb3ULL, 0x699abfc19f84d982ULL,
0xe4fa2054a80b329cULL, 0x6707f9af438252faULL, 0x08a368e9cfd6d49eULL,
0x47b1442c58fd25b8ULL, 0xbbb3dc5ebc91769bULL, 0x1665fe489061eac7ULL,
0x33f27a811fa66310ULL, 0x93a609346838d547ULL, 0x30ed6d4c98cec263ULL,
0x1dd9816cd8df9f2aULL, 0x94662a03063b1e7bULL, 0x83fdd9fbeb896066ULL,
0x7b207573e68e590aULL, 0x5f49fc0a149a4407ULL, 0x343259b671a5a82cULL,
0xfbc2bb458a6f981fULL, 0xc272b350a0a41a38ULL, 0x3aaf1fd8ada32354ULL,
0x6cbb868b0b3c2717ULL, 0xa2b569c88d2583feULL, 0xf180c9d1bf027928ULL,
0xaf37386bd64ba9f5ULL, 0x12bacab2790a8088ULL, 0x4c0d3b0810435055ULL,
0xb2eeb9070e9436dfULL, 0xc5b29067cea7d104ULL, 0xdcb425f1ff132461ULL,
0x4f122cc5972bf126ULL, 0xac282fa651230886ULL, 0xe7e537992f6393efULL,
0xe61b3a2952b00735ULL, 0x709c0a57ae302ce7ULL, 0xe02514ae416058d3ULL,
0xc44c9dd7b37445deULL, 0x5a68c5408022ba92ULL, 0x1c278cdca50c0bf0ULL,
0x6e5a9cf6f18712beULL, 0x86dce0b17f319ef3ULL, 0x2d34ec2040115d49ULL,
0x4bcd183f7e409b69ULL, 0x2815d56ad4a9a3dcULL, 0x24698979f2141d0dULL,
0x0000000000000000ULL, 0x1ec696a15fb73e59ULL, 0xd86b110b16784e2eULL,
0x8e7f8858b0e74a6dULL, 0x063e2e8713d05fe6ULL, 0xe2c40ed3bbdb6d7aULL,
0xb1f1aeca89fc97acULL, 0xe1db191e3cb3cc09ULL, 0x6418ee62c4eaf389ULL,
0xc6ad87aa49cf7077ULL, 0xd6f65765ca7ec556ULL, 0x9afb6c6dda3d9503ULL,
0x7ce05644888d9236ULL, 0x8d609f95378feb1eULL, 0x23a9aa4e9c17d631ULL,
0x6226c0e5d73aac6fULL, 0x56149953a69f0443ULL, 0xeeb852c09d66d3abULL,
0x2b0ac2a753c102afULL, 0x07c023376e03cb3cULL, 0x2ccae1903dc2c993ULL,
0xd3d76e2f5ec63bc3ULL, 0x9e2458973356ff4cULL, 0xa66a5d32644ee9b1ULL,
0x0a427294356de137ULL, 0x783f62be61e6f879ULL, 0x1344c70204d91452ULL,
0x5b96c8f0fdf12e48ULL, 0xa90916ecc59bf613ULL, 0xbe92e5142829880eULL,
0x727d102a548b194eULL, 0x1be7afebcb0fc0ccULL, 0x3e702b2244c8491bULL,
0xd5e940a84d166425ULL, 0x66f9f41f3e51c620ULL, 0xabe80c913f20c3baULL,
0xf07ec461c2d1edf2ULL, 0xf361d3ac45b94c81ULL, 0x0521394a94b8fe95ULL,
0xadd622162cf09c5cULL, 0xe97871f7f3651897ULL, 0xf4a1f09b2bba87bdULL,
0x095d6559b2054044ULL, 0x0bbc7f2448be75edULL, 0x2af4cf172e129675ULL,
0x157ae98517094bb4ULL, 0x9fda55274e856b96ULL, 0x914713499283e0eeULL,
0xb952c623462a4332ULL, 0x74433ead475b46a8ULL, 0x8b5eb112245fb4f8ULL,
0xa34b6478f0f61724ULL, 0x11a5dd7ffe6221fbULL, 0xc16da49d27ccbb4bULL,
0x76a224d0bde07301ULL, 0x8aa0bca2598c2022ULL, 0x4df336b86d90c48fULL,
0xea67663a740db9e4ULL, 0xef465f70e0b54771ULL, 0x39b008152acb8227ULL,
0x7d1e5bf4f55e06ecULL, 0x105bd0cf83b1b521ULL, 0x775c2960c033e7dbULL,
0x7e014c397236a79fULL, 0x811cc386113255cfULL, 0xeda7450d1a0e72d8ULL,
0x5889df3d7a998f3bULL, 0x2e2bfbedc779fc3aULL, 0xce0eef438619a4e9ULL,
0x372d4e7bf6cd095fULL, 0x04df34fae96b6a4fULL, 0xf923a13870d4adb6ULL,
0xa1aa7e050a4d228dULL, 0xa8f71b5cb84862c9ULL, 0xb52e9a306097fde3ULL,
0x0d8251a35b6e2a0bULL, 0x2257a7fee1c442ebULL, 0x73831d9a29588d94ULL,
0x51d4ba64c89ccf7fULL, 0x502ab7d4b54f5ba5ULL, 0x97793dce8153bf08ULL,
0xe5042de4d5d8a646ULL, 0x9687307efc802bd2ULL, 0xa05473b5779eb657ULL,
0xb4d097801d446939ULL, 0xcff0e2f3fbca3033ULL, 0xc38cbee0dd778ee2ULL,
0x464f499c252eb162ULL, 0xcad1dbb96f72cea6ULL, 0xba4dd1eec142e241ULL,
0xb00fa37af42f0376ULL
}, {
0xcce4cd3aa968b245ULL, 0x089d5484e80b7fafULL, 0x638246c1b3548304ULL,
0xd2fe0ec8c2355492ULL, 0xa7fbdf7ff2374eeeULL, 0x4df1600c92337a16ULL,
0x84e503ea523b12fbULL, 0x0790bbfd53ab0c4aULL, 0x198a780f38f6ea9dULL,
0x2ab30c8f55ec48cbULL, 0xe0f7fed6b2c49db5ULL, 0xb6ecf3f422cadbdcULL,
0x409c9a541358df11ULL, 0xd3ce8a56dfde3fe3ULL, 0xc3e9224312c8c1a0ULL,
0x0d6dfa58816ba507ULL, 0xddf3e1b179952777ULL, 0x04c02a42748bb1d9ULL,
0x94c2abff9f2decb8ULL, 0x4f91752da8f8acf4ULL, 0x78682befb169bf7bULL,
0xe1c77a48af2ff6c4ULL, 0x0c5d7ec69c80ce76ULL, 0x4cc1e4928fd81167ULL,
0xfeed3d24d9997b62ULL, 0x518bb6dfc3a54a23ULL, 0x6dbf2d26151f9b90ULL,
0xb5bc624b05ea664fULL, 0xe86aaa525acfe21aULL, 0x4801ced0fb53a0beULL,
0xc91463e6c00868edULL, 0x1027a815cd16fe43ULL, 0xf67069a0319204cdULL,
0xb04ccc976c8abce7ULL, 0xc0b9b3fc35e87c33ULL, 0xf380c77c58f2de65ULL,
0x50bb3241de4e2152ULL, 0xdf93f490435ef195ULL, 0xf1e0d25d62390887ULL,
0xaf668bfb1a3c3141ULL, 0xbc11b251f00a7291ULL, 0x73a5eed47e427d47ULL,
0x25bee3f6ee4c3b2eULL, 0x43cc0beb34786282ULL, 0xc824e778dde3039cULL,
0xf97d86d98a327728ULL, 0xf2b043e24519b514ULL, 0xe297ebf7880f4b57ULL,
0x3a94a49a98fab688ULL, 0x868516cb68f0c419ULL, 0xeffa11af0964ee50ULL,
0xa4ab4ec0d517f37dULL, 0xa9c6b498547c567aULL, 0x8e18424f80fbbbb6ULL,
0x0bcdc53bcf2bc23cULL, 0x137739aaea3643d0ULL, 0x2c1333ec1bac2ff0ULL,
0x8d48d3f0a7db0625ULL, 0x1e1ac3f26b5de6d7ULL, 0xf520f81f16b2b95eULL,
0x9f0f6ec450062e84ULL, 0x0130849e1deb6b71ULL, 0xd45e31ab8c7533a9ULL,
0x652279a2fd14e43fULL, 0x3209f01e70f1c927ULL, 0xbe71a770cac1a473ULL,
0x0e3d6be7a64b1894ULL, 0x7ec8148cff29d840ULL, 0xcb7476c7fac3be0fULL,
0x72956a4a63a91636ULL, 0x37f95ec21991138fULL, 0x9e3fea5a4ded45f5ULL,
0x7b38ba50964902e8ULL, 0x222e580bbde73764ULL, 0x61e253e0899f55e6ULL,
0xfc8d2805e352ad80ULL, 0x35994be3235ac56dULL, 0x09add01af5e014deULL,
0x5e8659a6780539c6ULL, 0xb17c48097161d796ULL, 0x026015213acbd6e2ULL,
0xd1ae9f77e515e901ULL, 0xb7dc776a3f21b0adULL, 0xaba6a1b96eb78098ULL,
0x9bcf4486248d9f5dULL, 0x582666c536455efdULL, 0xfdbdac9bfeb9c6f1ULL,
0xc47999be4163cdeaULL, 0x765540081722a7efULL, 0x3e548ed8ec710751ULL,
0x3d041f67cb51bac2ULL, 0x7958af71ac82d40aULL, 0x36c9da5c047a78feULL,
0xed9a048e33af38b2ULL, 0x26ee7249c96c86bdULL, 0x900281bdeba65d61ULL,
0x11172c8bd0fd9532ULL, 0xea0abf73600434f8ULL, 0x42fc8f75299309f3ULL,
0x34a9cf7d3eb1ae1cULL, 0x2b838811480723baULL, 0x5ce64c8742ceef24ULL,
0x1adae9b01fd6570eULL, 0x3c349bf9d6bad1b3ULL, 0x82453c891c7b75c0ULL,
0x97923a40b80d512bULL, 0x4a61dbf1c198765cULL, 0xb48ce6d518010d3eULL,
0xcfb45c858e480fd6ULL, 0xd933cbf30d1e96aeULL, 0xd70ea014ab558e3aULL,
0xc189376228031742ULL, 0x9262949cd16d8b83ULL, 0xeb3a3bed7def5f89ULL,
0x49314a4ee6b8cbcfULL, 0xdcc3652f647e4c06ULL, 0xda635a4c2a3e2b3dULL,
0x470c21a940f3d35bULL, 0x315961a157d174b4ULL, 0x6672e81dda3459acULL,
0x5b76f77a1165e36eULL, 0x445cb01667d36ec8ULL, 0xc5491d205c88a69bULL,
0x456c34887a3805b9ULL, 0xffddb9bac4721013ULL, 0x99af51a71e4649bfULL,
0xa15be01cbc7729d5ULL, 0x52db2760e485f7b0ULL, 0x8c78576eba306d54ULL,
0xae560f6507d75a30ULL, 0x95f22f6182c687c9ULL, 0x71c5fbf54489aba5ULL,
0xca44f259e728d57eULL, 0x88b87d2ccebbdc8dULL, 0xbab18d32be4a15aaULL,
0x8be8ec93e99b611eULL, 0x17b713e89ebdf209ULL, 0xb31c5d284baa0174ULL,
0xeeca9531148f8521ULL, 0xb8d198138481c348ULL, 0x8988f9b2d350b7fcULL,
0xb9e11c8d996aa839ULL, 0x5a4673e40c8e881fULL, 0x1687977683569978ULL,
0xbf4123eed72acf02ULL, 0x4ea1f1b3b513c785ULL, 0xe767452be16f91ffULL,
0x7505d1b730021a7cULL, 0xa59bca5ec8fc980cULL, 0xad069eda20f7e7a3ULL,
0x38f4b1bba231606aULL, 0x60d2d77e94743e97ULL, 0x9affc0183966f42cULL,
0x248e6768f3a7505fULL, 0xcdd449a4b483d934ULL, 0x87b59255751baf68ULL,
0x1bea6d2e023d3c7fULL, 0x6b1f12455b5ffcabULL, 0x743555292de9710dULL,
0xd8034f6d10f5fddfULL, 0xc6198c9f7ba81b08ULL, 0xbb8109aca3a17edbULL,
0xfa2d1766ad12cabbULL, 0xc729080166437079ULL, 0x9c5fff7b77269317ULL,
0x0000000000000000ULL, 0x15d706c9a47624ebULL, 0x6fdf38072fd44d72ULL,
0x5fb6dd3865ee52b7ULL, 0xa33bf53d86bcff37ULL, 0xe657c1b5fc84fa8eULL,
0xaa962527735cebe9ULL, 0x39c43525bfda0b1bULL, 0x204e4d2a872ce186ULL,
0x7a083ece8ba26999ULL, 0x554b9c9db72efbfaULL, 0xb22cd9b656416a05ULL,
0x96a2bedea5e63a5aULL, 0x802529a826b0a322ULL, 0x8115ad363b5bc853ULL,
0x8375b81701901eb1ULL, 0x3069e53f4a3a1fc5ULL, 0xbd2136cfede119e0ULL,
0x18bafc91251d81ecULL, 0x1d4a524d4c7d5b44ULL, 0x05f0aedc6960daa8ULL,
0x29e39d3072ccf558ULL, 0x70f57f6b5962c0d4ULL, 0x989fd53903ad22ceULL,
0xf84d024797d91c59ULL, 0x547b1803aac5908bULL, 0xf0d056c37fd263f6ULL,
0xd56eb535919e58d8ULL, 0x1c7ad6d351963035ULL, 0x2e7326cd2167f912ULL,
0xac361a443d1c8cd2ULL, 0x697f076461942a49ULL, 0x4b515f6fdc731d2dULL,
0x8ad8680df4700a6fULL, 0x41ac1eca0eb3b460ULL, 0x7d988533d80965d3ULL,
0xa8f6300649973d0bULL, 0x7765c4960ac9cc9eULL, 0x7ca801adc5e20ea2ULL,
0xdea3700e5eb59ae4ULL, 0xa06b6482a19c42a4ULL, 0x6a2f96db46b497daULL,
0x27def6d7d487edccULL, 0x463ca5375d18b82aULL, 0xa6cb5be1efdc259fULL,
0x53eba3fef96e9cc1ULL, 0xce84d81b93a364a7ULL, 0xf4107c810b59d22fULL,
0x333974806d1aa256ULL, 0x0f0def79bba073e5ULL, 0x231edc95a00c5c15ULL,
0xe437d494c64f2c6cULL, 0x91320523f64d3610ULL, 0x67426c83c7df32ddULL,
0x6eefbc99323f2603ULL, 0x9d6f7be56acdf866ULL, 0x5916e25b2bae358cULL,
0x7ff89012e2c2b331ULL, 0x035091bf2720bd93ULL, 0x561b0d22900e4669ULL,
0x28d319ae6f279e29ULL, 0x2f43a2533c8c9263ULL, 0xd09e1be9f8fe8270ULL,
0xf740ed3e2c796fbcULL, 0xdb53ded237d5404cULL, 0x62b2c25faebfe875ULL,
0x0afd41a5d2c0a94dULL, 0x6412fd3ce0ff8f4eULL, 0xe3a76f6995e42026ULL,
0x6c8fa9b808f4f0e1ULL, 0xc2d9a6dd0f23aad1ULL, 0x8f28c6d19d10d0c7ULL,
0x85d587744fd0798aULL, 0xa20b71a39b579446ULL, 0x684f83fa7c7f4138ULL,
0xe507500adba4471dULL, 0x3f640a46f19a6c20ULL, 0x1247bd34f7dd28a1ULL,
0x2d23b77206474481ULL, 0x93521002cc86e0f2ULL, 0x572b89bc8de52d18ULL,
0xfb1d93f8b0f9a1caULL, 0xe95a2ecc4724896bULL, 0x3ba420048511ddf9ULL,
0xd63e248ab6bee54bULL, 0x5dd6c8195f258455ULL, 0x06a03f634e40673bULL,
0x1f2a476c76b68da6ULL, 0x217ec9b49ac78af7ULL, 0xecaa80102e4453c3ULL,
0x14e78257b99d4f9aULL
}, {
0x20329b2cc87bba05ULL, 0x4f5eb6f86546a531ULL, 0xd4f44775f751b6b1ULL,
0x8266a47b850dfa8bULL, 0xbb986aa15a6ca985ULL, 0xc979eb08f9ae0f99ULL,
0x2da6f447a2375ea1ULL, 0x1e74275dcd7d8576ULL, 0xbc20180a800bc5f8ULL,
0xb4a2f701b2dc65beULL, 0xe726946f981b6d66ULL, 0x48e6c453bf21c94cULL,
0x42cad9930f0a4195ULL, 0xefa47b64aacccd20ULL, 0x71180a8960409a42ULL,
0x8bb3329bf6a44e0cULL, 0xd34c35de2d36daccULL, 0xa92f5b7cbc23dc96ULL,
0xb31a85aa68bb09c3ULL, 0x13e04836a73161d2ULL, 0xb24dfc4129c51d02ULL,
0x8ae44b70b7da5acdULL, 0xe671ed84d96579a7ULL, 0xa4bb3417d66f3832ULL,
0x4572ab38d56d2de8ULL, 0xb1b47761ea47215cULL, 0xe81c09cf70aba15dULL,
0xffbdb872ce7f90acULL, 0xa8782297fd5dc857ULL, 0x0d946f6b6a4ce4a4ULL,
0xe4df1f4f5b995138ULL, 0x9ebc71edca8c5762ULL, 0x0a2c1dc0b02b88d9ULL,
0x3b503c115d9d7b91ULL, 0xc64376a8111ec3a2ULL, 0xcec199a323c963e4ULL,
0xdc76a87ec58616f7ULL, 0x09d596e073a9b487ULL, 0x14583a9d7d560dafULL,
0xf4c6dc593f2a0cb4ULL, 0xdd21d19584f80236ULL, 0x4a4836983ddde1d3ULL,
0xe58866a41ae745f9ULL, 0xf591a5b27e541875ULL, 0x891dc05074586693ULL,
0x5b068c651810a89eULL, 0xa30346bc0c08544fULL, 0x3dbf3751c684032dULL,
0x2a1e86ec785032dcULL, 0xf73f5779fca830eaULL, 0xb60c05ca30204d21ULL,
0x0cc316802b32f065ULL, 0x8770241bdd96be69ULL, 0xb861e18199ee95dbULL,
0xf805cad91418fcd1ULL, 0x29e70dccbbd20e82ULL, 0xc7140f435060d763ULL,
0x0f3a9da0e8b0cc3bULL, 0xa2543f574d76408eULL, 0xbd7761e1c175d139ULL,
0x4b1f4f737ca3f512ULL, 0x6dc2df1f2fc137abULL, 0xf1d05c3967b14856ULL,
0xa742bf3715ed046cULL, 0x654030141d1697edULL, 0x07b872abda676c7dULL,
0x3ce84eba87fa17ecULL, 0xc1fb0403cb79afdfULL, 0x3e46bc7105063f73ULL,
0x278ae987121cd678ULL, 0xa1adb4778ef47cd0ULL, 0x26dd906c5362c2b9ULL,
0x05168060589b44e2ULL, 0xfbfc41f9d79ac08fULL, 0x0e6de44ba9ced8faULL,
0x9feb08068bf243a3ULL, 0x7b341749d06b129bULL, 0x229c69e74a87929aULL,
0xe09ee6c4427c011bULL, 0x5692e30e725c4c3aULL, 0xda99a33e5e9f6e4bULL,
0x353dd85af453a36bULL, 0x25241b4c90e0fee7ULL, 0x5de987258309d022ULL,
0xe230140fc0802984ULL, 0x93281e86a0c0b3c6ULL, 0xf229d719a4337408ULL,
0x6f6c2dd4ad3d1f34ULL, 0x8ea5b2fbae3f0aeeULL, 0x8331dd90c473ee4aULL,
0x346aa1b1b52db7aaULL, 0xdf8f235e06042aa9ULL, 0xcc6f6b68a1354b7bULL,
0x6c95a6f46ebf236aULL, 0x52d31a856bb91c19ULL, 0x1a35ded6d498d555ULL,
0xf37eaef2e54d60c9ULL, 0x72e181a9a3c2a61cULL, 0x98537aad51952fdeULL,
0x16f6c856ffaa2530ULL, 0xd960281e9d1d5215ULL, 0x3a0745fa1ce36f50ULL,
0x0b7b642bf1559c18ULL, 0x59a87eae9aec8001ULL, 0x5e100c05408bec7cULL,
0x0441f98b19e55023ULL, 0xd70dcc5534d38aefULL, 0x927f676de1bea707ULL,
0x9769e70db925e3e5ULL, 0x7a636ea29115065aULL, 0x468b201816ef11b6ULL,
0xab81a9b73edff409ULL, 0xc0ac7de88a07bb1eULL, 0x1f235eb68c0391b7ULL,
0x6056b074458dd30fULL, 0xbe8eeac102f7ed67ULL, 0xcd381283e04b5fbaULL,
0x5cbefecec277c4e3ULL, 0xd21b4c356c48ce0dULL, 0x1019c31664b35d8cULL,
0x247362a7d19eea26ULL, 0xebe582efb3299d03ULL, 0x02aef2cb82fc289fULL,
0x86275df09ce8aaa8ULL, 0x28b07427faac1a43ULL, 0x38a9b7319e1f47cfULL,
0xc82e92e3b8d01b58ULL, 0x06ef0b409b1978bcULL, 0x62f842bfc771fb90ULL,
0x9904034610eb3b1fULL, 0xded85ab5477a3e68ULL, 0x90d195a663428f98ULL,
0x5384636e2ac708d8ULL, 0xcbd719c37b522706ULL, 0xae9729d76644b0ebULL,
0x7c8c65e20a0c7ee6ULL, 0x80c856b007f1d214ULL, 0x8c0b40302cc32271ULL,
0xdbcedad51fe17a8aULL, 0x740e8ae938dbdea0ULL, 0xa615c6dc549310adULL,
0x19cc55f6171ae90bULL, 0x49b1bdb8fe5fdd8dULL, 0xed0a89af2830e5bfULL,
0x6a7aadb4f5a65bd6ULL, 0x7e22972988f05679ULL, 0xf952b3325566e810ULL,
0x39fecedadf61530eULL, 0x6101c99f04f3c7ceULL, 0x2e5f7f6761b562ffULL,
0xf08725d226cf5c97ULL, 0x63af3b54860fef51ULL, 0x8ff2cb10ef411e2fULL,
0x884ab9bb35267252ULL, 0x4df04433e7ba8daeULL, 0x9afd8866d3690741ULL,
0x66b9bb34de94abb3ULL, 0x9baaf18d92171380ULL, 0x543c11c5f0a064a5ULL,
0x17a1b1bdbed431f1ULL, 0xb5f58eeaf3a2717fULL, 0xc355f6c849858740ULL,
0xec5df044694ef17eULL, 0xd83751f5dc6346d4ULL, 0xfc4433520dfdacf2ULL,
0x0000000000000000ULL, 0x5a51f58e596ebc5fULL, 0x3285aaf12e34cf16ULL,
0x8d5c39db6dbd36b0ULL, 0x12b731dde64f7513ULL, 0x94906c2d7aa7dfbbULL,
0x302b583aacc8e789ULL, 0x9d45facd090e6b3cULL, 0x2165e2c78905aec4ULL,
0x68d45f7f775a7349ULL, 0x189b2c1d5664fdcaULL, 0xe1c99f2f030215daULL,
0x6983269436246788ULL, 0x8489af3b1e148237ULL, 0xe94b702431d5b59cULL,
0x33d2d31a6f4adbd7ULL, 0xbfd9932a4389f9a6ULL, 0xb0e30e8aab39359dULL,
0xd1e2c715afcaf253ULL, 0x150f43763c28196eULL, 0xc4ed846393e2eb3dULL,
0x03f98b20c3823c5eULL, 0xfd134ab94c83b833ULL, 0x556b682eb1de7064ULL,
0x36c4537a37d19f35ULL, 0x7559f30279a5ca61ULL, 0x799ae58252973a04ULL,
0x9c12832648707ffdULL, 0x78cd9c6913e92ec5ULL, 0x1d8dac7d0effb928ULL,
0x439da0784e745554ULL, 0x413352b3cc887dcbULL, 0xbacf134a1b12bd44ULL,
0x114ebafd25cd494dULL, 0x2f08068c20cb763eULL, 0x76a07822ba27f63fULL,
0xeab2fb04f25789c2ULL, 0xe3676de481fe3d45ULL, 0x1b62a73d95e6c194ULL,
0x641749ff5c68832cULL, 0xa5ec4dfc97112cf3ULL, 0xf6682e92bdd6242bULL,
0x3f11c59a44782bb2ULL, 0x317c21d1edb6f348ULL, 0xd65ab5be75ad9e2eULL,
0x6b2dd45fb4d84f17ULL, 0xfaab381296e4d44eULL, 0xd0b5befeeeb4e692ULL,
0x0882ef0b32d7a046ULL, 0x512a91a5a83b2047ULL, 0x963e9ee6f85bf724ULL,
0x4e09cf132438b1f0ULL, 0x77f701c9fb59e2feULL, 0x7ddb1c094b726a27ULL,
0x5f4775ee01f5f8bdULL, 0x9186ec4d223c9b59ULL, 0xfeeac1998f01846dULL,
0xac39db1ce4b89874ULL, 0xb75b7c21715e59e0ULL, 0xafc0503c273aa42aULL,
0x6e3b543fec430bf5ULL, 0x704f7362213e8e83ULL, 0x58ff0745db9294c0ULL,
0x67eec2df9feabf72ULL, 0xa0facd9ccf8a6811ULL, 0xb936986ad890811aULL,
0x95c715c63bd9cb7aULL, 0xca8060283a2c33c7ULL, 0x507de84ee9453486ULL,
0x85ded6d05f6a96f6ULL, 0x1cdad5964f81ade9ULL, 0xd5a33e9eb62fa270ULL,
0x40642b588df6690aULL, 0x7f75eec2c98e42b8ULL, 0x2cf18dace3494a60ULL,
0x23cb100c0bf9865bULL, 0xeef3028febb2d9e1ULL, 0x4425d2d394133929ULL,
0xaad6d05c7fa1e0c8ULL, 0xad6ea2f7a5c68cb5ULL, 0xc2028f2308fb9381ULL,
0x819f2f5b468fc6d5ULL, 0xc5bafd88d29cfffcULL, 0x47dc59f357910577ULL,
0x2b49ff07392e261dULL, 0x57c59ae5332258fbULL, 0x73b6f842e2bcb2ddULL,
0xcf96e04862b77725ULL, 0x4ca73dd8a6c4996fULL, 0x015779eb417e14c1ULL,
0x37932a9176af8bf4ULL
}, {
0x190a2c9b249df23eULL, 0x2f62f8b62263e1e9ULL, 0x7a7f754740993655ULL,
0x330b7ba4d5564d9fULL, 0x4c17a16a46672582ULL, 0xb22f08eb7d05f5b8ULL,
0x535f47f40bc148ccULL, 0x3aec5d27d4883037ULL, 0x10ed0a1825438f96ULL,
0x516101f72c233d17ULL, 0x13cc6f949fd04eaeULL, 0x739853c441474bfdULL,
0x653793d90d3f5b1bULL, 0x5240647b96b0fc2fULL, 0x0c84890ad27623e0ULL,
0xd7189b32703aaea3ULL, 0x2685de3523bd9c41ULL, 0x99317c5b11bffefaULL,
0x0d9baa854f079703ULL, 0x70b93648fbd48ac5ULL, 0xa80441fce30bc6beULL,
0x7287704bdc36ff1eULL, 0xb65384ed33dc1f13ULL, 0xd36417343ee34408ULL,
0x39cd38ab6e1bf10fULL, 0x5ab861770a1f3564ULL, 0x0ebacf09f594563bULL,
0xd04572b884708530ULL, 0x3cae9722bdb3af47ULL, 0x4a556b6f2f5cbaf2ULL,
0xe1704f1f76c4bd74ULL, 0x5ec4ed7144c6dfcfULL, 0x16afc01d4c7810e6ULL,
0x283f113cd629ca7aULL, 0xaf59a8761741ed2dULL, 0xeed5a3991e215facULL,
0x3bf37ea849f984d4ULL, 0xe413e096a56ce33cULL, 0x2c439d3a98f020d1ULL,
0x637559dc6404c46bULL, 0x9e6c95d1e5f5d569ULL, 0x24bb9836045fe99aULL,
0x44efa466dac8ecc9ULL, 0xc6eab2a5c80895d6ULL, 0x803b50c035220cc4ULL,
0x0321658cba93c138ULL, 0x8f9ebc465dc7ee1cULL, 0xd15a5137190131d3ULL,
0x0fa5ec8668e5e2d8ULL, 0x91c979578d1037b1ULL, 0x0642ca05693b9f70ULL,
0xefca80168350eb4fULL, 0x38d21b24f36a45ecULL, 0xbeab81e1af73d658ULL,
0x8cbfd9cae7542f24ULL, 0xfd19cc0d81f11102ULL, 0x0ac6430fbb4dbc90ULL,
0x1d76a09d6a441895ULL, 0x2a01573ff1cbbfa1ULL, 0xb572e161894fde2bULL,
0x8124734fa853b827ULL, 0x614b1fdf43e6b1b0ULL, 0x68ac395c4238cc18ULL,
0x21d837bfd7f7b7d2ULL, 0x20c714304a860331ULL, 0x5cfaab726324aa14ULL,
0x74c5ba4eb50d606eULL, 0xf3a3030474654739ULL, 0x23e671bcf015c209ULL,
0x45f087e947b9582aULL, 0xd8bd77b418df4c7bULL, 0xe06f6c90ebb50997ULL,
0x0bd96080263c0873ULL, 0x7e03f9410e40dcfeULL, 0xb8e94be4c6484928ULL,
0xfb5b0608e8ca8e72ULL, 0x1a2b49179e0e3306ULL, 0x4e29e76961855059ULL,
0x4f36c4e6fcf4e4baULL, 0x49740ee395cf7bcaULL, 0xc2963ea386d17f7dULL,
0x90d65ad810618352ULL, 0x12d34c1b02a1fa4dULL, 0xfa44258775bb3a91ULL,
0x18150f14b9ec46ddULL, 0x1491861e6b9a653dULL, 0x9a1019d7ab2c3fc2ULL,
0x3668d42d06fe13d7ULL, 0xdcc1fbb25606a6d0ULL, 0x969490dd795a1c22ULL,
0x3549b1a1bc6dd2efULL, 0xc94f5e23a0ed770eULL, 0xb9f6686b5b39fdcbULL,
0xc4d4f4a6efeae00dULL, 0xe732851a1fff2204ULL, 0x94aad6de5eb869f9ULL,
0x3f8ff2ae07206e7fULL, 0xfe38a9813b62d03aULL, 0xa7a1ad7a8bee2466ULL,
0x7b6056c8dde882b6ULL, 0x302a1e286fc58ca7ULL, 0x8da0fa457a259bc7ULL,
0xb3302b64e074415bULL, 0x5402ae7eff8b635fULL, 0x08f8050c9cafc94bULL,
0xae468bf98a3059ceULL, 0x88c355cca98dc58fULL, 0xb10e6d67c7963480ULL,
0xbad70de7e1aa3cf3ULL, 0xbfb4a26e320262bbULL, 0xcb711820870f02d5ULL,
0xce12b7a954a75c9dULL, 0x563ce87dd8691684ULL, 0x9f73b65e7884618aULL,
0x2b1e74b06cba0b42ULL, 0x47cec1ea605b2df1ULL, 0x1c698312f735ac76ULL,
0x5fdbcefed9b76b2cULL, 0x831a354c8fb1cdfcULL, 0x820516c312c0791fULL,
0xb74ca762aeadabf0ULL, 0xfc06ef821c80a5e1ULL, 0x5723cbf24518a267ULL,
0x9d4df05d5f661451ULL, 0x588627742dfd40bfULL, 0xda8331b73f3d39a0ULL,
0x17b0e392d109a405ULL, 0xf965400bcf28fba9ULL, 0x7c3dbf4229a2a925ULL,
0x023e460327e275dbULL, 0x6cd0b55a0ce126b3ULL, 0xe62da695828e96e7ULL,
0x42ad6e63b3f373b9ULL, 0xe50cc319381d57dfULL, 0xc5cbd729729b54eeULL,
0x46d1e265fd2a9912ULL, 0x6428b056904eeff8ULL, 0x8be23040131e04b7ULL,
0x6709d5da2add2ec0ULL, 0x075de98af44a2b93ULL, 0x8447dcc67bfbe66fULL,
0x6616f655b7ac9a23ULL, 0xd607b8bded4b1a40ULL, 0x0563af89d3a85e48ULL,
0x3db1b4ad20c21ba4ULL, 0x11f22997b8323b75ULL, 0x292032b34b587e99ULL,
0x7f1cdace9331681dULL, 0x8e819fc9c0b65affULL, 0xa1e3677fe2d5bb16ULL,
0xcd33d225ee349da5ULL, 0xd9a2543b85aef898ULL, 0x795e10cbfa0af76dULL,
0x25a4bbb9992e5d79ULL, 0x78413344677b438eULL, 0xf0826688cef68601ULL,
0xd27b34bba392f0ebULL, 0x551d8df162fad7bcULL, 0x1e57c511d0d7d9adULL,
0xdeffbdb171e4d30bULL, 0xf4feea8e802f6caaULL, 0xa480c8f6317de55eULL,
0xa0fc44f07fa40ff5ULL, 0x95b5f551c3c9dd1aULL, 0x22f952336d6476eaULL,
0x0000000000000000ULL, 0xa6be8ef5169f9085ULL, 0xcc2cf1aa73452946ULL,
0x2e7ddb39bf12550aULL, 0xd526dd3157d8db78ULL, 0x486b2d6c08becf29ULL,
0x9b0f3a58365d8b21ULL, 0xac78cdfaadd22c15ULL, 0xbc95c7e28891a383ULL,
0x6a927f5f65dab9c3ULL, 0xc3891d2c1ba0cb9eULL, 0xeaa92f9f50f8b507ULL,
0xcf0d9426c9d6e87eULL, 0xca6e3baf1a7eb636ULL, 0xab25247059980786ULL,
0x69b31ad3df4978fbULL, 0xe2512a93cc577c4cULL, 0xff278a0ea61364d9ULL,
0x71a615c766a53e26ULL, 0x89dc764334fc716cULL, 0xf87a638452594f4aULL,
0xf2bc208be914f3daULL, 0x8766b94ac1682757ULL, 0xbbc82e687cdb8810ULL,
0x626a7a53f9757088ULL, 0xa2c202f358467a2eULL, 0x4d0882e5db169161ULL,
0x09e7268301de7da8ULL, 0xe897699c771ac0dcULL, 0xc8507dac3d9cc3edULL,
0xc0a878a0a1330aa6ULL, 0x978bb352e42ba8c1ULL, 0xe9884a13ea6b743fULL,
0x279afdbabecc28a2ULL, 0x047c8c064ed9eaabULL, 0x507e2278b15289f4ULL,
0x599904fbb08cf45cULL, 0xbd8ae46d15e01760ULL, 0x31353da7f2b43844ULL,
0x8558ff49e68a528cULL, 0x76fbfc4d92ef15b5ULL, 0x3456922e211c660cULL,
0x86799ac55c1993b4ULL, 0x3e90d1219a51da9cULL, 0x2d5cbeb505819432ULL,
0x982e5fd48cce4a19ULL, 0xdb9c1238a24c8d43ULL, 0xd439febecaa96f9bULL,
0x418c0bef0960b281ULL, 0x158ea591f6ebd1deULL, 0x1f48e69e4da66d4eULL,
0x8afd13cf8e6fb054ULL, 0xf5e1c9011d5ed849ULL, 0xe34e091c5126c8afULL,
0xad67ee7530a398f6ULL, 0x43b24dec2e82c75aULL, 0x75da99c1287cd48dULL,
0x92e81cdb3783f689ULL, 0xa3dd217cc537cecdULL, 0x60543c50de970553ULL,
0x93f73f54aaf2426aULL, 0xa91b62737e7a725dULL, 0xf19d4507538732e2ULL,
0x77e4dfc20f9ea156ULL, 0x7d229ccdb4d31dc6ULL, 0x1b346a98037f87e5ULL,
0xedf4c615a4b29e94ULL, 0x4093286094110662ULL, 0xb0114ee85ae78063ULL,
0x6ff1d0d6b672e78bULL, 0x6dcf96d591909250ULL, 0xdfe09e3eec9567e8ULL,
0x3214582b4827f97cULL, 0xb46dc2ee143e6ac8ULL, 0xf6c0ac8da7cd1971ULL,
0xebb60c10cd8901e4ULL, 0xf7df8f023abcad92ULL, 0x9c52d3d2c217a0b2ULL,
0x6b8d5cd0f8ab0d20ULL, 0x3777f7a29b8fa734ULL, 0x011f238f9d71b4e3ULL,
0xc1b75b2f3c42be45ULL, 0x5de588fdfe551ef7ULL, 0x6eeef3592b035368ULL,
0xaa3a07ffc4e9b365ULL, 0xecebe59a39c32a77ULL, 0x5ba742f8976e8187ULL,
0x4b4a48e0b22d0e11ULL, 0xddded83dcb771233ULL, 0xa59feb79ac0c51bdULL,
0xc7f5912a55792135ULL
}, {
0x6d6ae04668a9b08aULL, 0x3ab3f04b0be8c743ULL, 0xe51e166b54b3c908ULL,
0xbe90a9eb35c2f139ULL, 0xb2c7066637f2bec1ULL, 0xaa6945613392202cULL,
0x9a28c36f3b5201ebULL, 0xddce5a93ab536994ULL, 0x0e34133ef6382827ULL,
0x52a02ba1ec55048bULL, 0xa2f88f97c4b2a177ULL, 0x8640e513ca2251a5ULL,
0xcdf1d36258137622ULL, 0xfe6cb708dedf8ddbULL, 0x8a174a9ec8121e5dULL,
0x679896036b81560eULL, 0x59ed033395795feeULL, 0x1dd778ab8b74edafULL,
0xee533ef92d9f926dULL, 0x2a8c79baf8a8d8f5ULL, 0x6bcf398e69b119f6ULL,
0xe20491742fafdd95ULL, 0x276488e0809c2aecULL, 0xea955b82d88f5cceULL,
0x7102c63a99d9e0c4ULL, 0xf9763017a5c39946ULL, 0x429fa2501f151b3dULL,
0x4659c72bea05d59eULL, 0x984b7fdccf5a6634ULL, 0xf742232953fbb161ULL,
0x3041860e08c021c7ULL, 0x747bfd9616cd9386ULL, 0x4bb1367192312787ULL,
0x1b72a1638a6c44d3ULL, 0x4a0e68a6e8359a66ULL, 0x169a5039f258b6caULL,
0xb98a2ef44edee5a4ULL, 0xd9083fe85e43a737ULL, 0x967f6ce239624e13ULL,
0x8874f62d3c1a7982ULL, 0x3c1629830af06e3fULL, 0x9165ebfd427e5a8eULL,
0xb5dd81794ceeaa5cULL, 0x0de8f15a7834f219ULL, 0x70bd98ede3dd5d25ULL,
0xaccc9ca9328a8950ULL, 0x56664eda1945ca28ULL, 0x221db34c0f8859aeULL,
0x26dbd637fa98970dULL, 0x1acdffb4f068f932ULL, 0x4585254f64090fa0ULL,
0x72de245e17d53afaULL, 0x1546b25d7c546cf4ULL, 0x207e0ffffb803e71ULL,
0xfaaad2732bcf4378ULL, 0xb462dfae36ea17bdULL, 0xcf926fd1ac1b11fdULL,
0xe0672dc7dba7ba4aULL, 0xd3fa49ad5d6b41b3ULL, 0x8ba81449b216a3bcULL,
0x14f9ec8a0650d115ULL, 0x40fc1ee3eb1d7ce2ULL, 0x23a2ed9b758ce44fULL,
0x782c521b14fddc7eULL, 0x1c68267cf170504eULL, 0xbcf31558c1ca96e6ULL,
0xa781b43b4ba6d235ULL, 0xf6fd7dfe29ff0c80ULL, 0xb0a4bad5c3fad91eULL,
0xd199f51ea963266cULL, 0x414340349119c103ULL, 0x5405f269ed4dadf7ULL,
0xabd61bb649969dcdULL, 0x6813dbeae7bdc3c8ULL, 0x65fb2ab09f8931d1ULL,
0xf1e7fae152e3181dULL, 0xc1a67cef5a2339daULL, 0x7a4feea8e0f5bba1ULL,
0x1e0b9acf05783791ULL, 0x5b8ebf8061713831ULL, 0x80e53cdbcb3af8d9ULL,
0x7e898bd315e57502ULL, 0xc6bcfbf0213f2d47ULL, 0x95a38e86b76e942dULL,
0x092e94218d243cbaULL, 0x8339debf453622e7ULL, 0xb11be402b9fe64ffULL,
0x57d9100d634177c9ULL, 0xcc4e8db52217cbc3ULL, 0x3b0cae9c71ec7aa2ULL,
0xfb158ca451cbfe99ULL, 0x2b33276d82ac6514ULL, 0x01bf5ed77a04bde1ULL,
0xc5601994af33f779ULL, 0x75c4a3416cc92e67ULL, 0xf3844652a6eb7fc2ULL,
0x3487e375fdd0ef64ULL, 0x18ae430704609eedULL, 0x4d14efb993298efbULL,
0x815a620cb13e4538ULL, 0x125c354207487869ULL, 0x9eeea614ce42cf48ULL,
0xce2d3106d61fac1cULL, 0xbbe99247bad6827bULL, 0x071a871f7b1c149dULL,
0x2e4a1cc10db81656ULL, 0x77a71ff298c149b8ULL, 0x06a5d9c80118a97cULL,
0xad73c27e488e34b1ULL, 0x443a7b981e0db241ULL, 0xe3bbcfa355ab6074ULL,
0x0af276450328e684ULL, 0x73617a896dd1871bULL, 0x58525de4ef7de20fULL,
0xb7be3dcab8e6cd83ULL, 0x19111dd07e64230cULL, 0x842359a03e2a367aULL,
0x103f89f1f3401fb6ULL, 0xdc710444d157d475ULL, 0xb835702334da5845ULL,
0x4320fc876511a6dcULL, 0xd026abc9d3679b8dULL, 0x17250eee885c0b2bULL,
0x90dab52a387ae76fULL, 0x31fed8d972c49c26ULL, 0x89cba8fa461ec463ULL,
0x2ff5421677bcabb7ULL, 0x396f122f85e41d7dULL, 0xa09b332430bac6a8ULL,
0xc888e8ced7070560ULL, 0xaeaf201ac682ee8fULL, 0x1180d7268944a257ULL,
0xf058a43628e7a5fcULL, 0xbd4c4b8fbbce2b07ULL, 0xa1246df34abe7b49ULL,
0x7d5569b79be9af3cULL, 0xa9b5a705bd9efa12ULL, 0xdb6b835baa4bc0e8ULL,
0x05793bac8f147342ULL, 0x21c1512881848390ULL, 0xfdb0556c50d357e5ULL,
0x613d4fcb6a99ff72ULL, 0x03dce2648e0cda3eULL, 0xe949b9e6568386f0ULL,
0xfc0f0bbb2ad7ea04ULL, 0x6a70675913b5a417ULL, 0x7f36d5046fe1c8e3ULL,
0x0c57af8d02304ff8ULL, 0x32223abdfcc84618ULL, 0x0891caf6f720815bULL,
0xa63eeaec31a26fd4ULL, 0x2507345374944d33ULL, 0x49d28ac266394058ULL,
0xf5219f9aa7f3d6beULL, 0x2d96fea583b4cc68ULL, 0x5a31e1571b7585d0ULL,
0x8ed12fe53d02d0feULL, 0xdfade6205f5b0e4bULL, 0x4cabb16ee92d331aULL,
0x04c6657bf510cea3ULL, 0xd73c2cd6a87b8f10ULL, 0xe1d87310a1a307abULL,
0x6cd5be9112ad0d6bULL, 0x97c032354366f3f2ULL, 0xd4e0ceb22677552eULL,
0x0000000000000000ULL, 0x29509bde76a402cbULL, 0xc27a9e8bd42fe3e4ULL,
0x5ef7842cee654b73ULL, 0xaf107ecdbc86536eULL, 0x3fcacbe784fcb401ULL,
0xd55f90655c73e8cfULL, 0xe6c2f40fdabf1336ULL, 0xe8f6e7312c873b11ULL,
0xeb2a0555a28be12fULL, 0xe4a148bc2eb774e9ULL, 0x9b979db84156bc0aULL,
0x6eb60222e6a56ab4ULL, 0x87ffbbc4b026ec44ULL, 0xc703a5275b3b90a6ULL,
0x47e699fc9001687fULL, 0x9c8d1aa73a4aa897ULL, 0x7cea3760e1ed12ddULL,
0x4ec80ddd1d2554c5ULL, 0x13e36b957d4cc588ULL, 0x5d2b66486069914dULL,
0x92b90999cc7280b0ULL, 0x517cc9c56259deb5ULL, 0xc937b619ad03b881ULL,
0xec30824ad997f5b2ULL, 0xa45d565fc5aa080bULL, 0xd6837201d27f32f1ULL,
0x635ef3789e9198adULL, 0x531f75769651b96aULL, 0x4f77530a6721e924ULL,
0x486dd4151c3dfdb9ULL, 0x5f48dafb9461f692ULL, 0x375b011173dc355aULL,
0x3da9775470f4d3deULL, 0x8d0dcd81b30e0ac0ULL, 0x36e45fc609d888bbULL,
0x55baacbe97491016ULL, 0x8cb29356c90ab721ULL, 0x76184125e2c5f459ULL,
0x99f4210bb55edbd5ULL, 0x6f095cf59ca1d755ULL, 0x9f51f8c3b44672a9ULL,
0x3538bda287d45285ULL, 0x50c39712185d6354ULL, 0xf23b1885dcefc223ULL,
0x79930ccc6ef9619fULL, 0xed8fdc9da3934853ULL, 0xcb540aaa590bdf5eULL,
0x5c94389f1a6d2cacULL, 0xe77daad8a0bbaed7ULL, 0x28efc5090ca0bf2aULL,
0xbf2ff73c4fc64cd8ULL, 0xb37858b14df60320ULL, 0xf8c96ec0dfc724a7ULL,
0x828680683f329f06ULL, 0x941cd051cd6a29ccULL, 0xc3c5c05cae2b5e05ULL,
0xb601631dc2e27062ULL, 0xc01922382027843bULL, 0x24b86a840e90f0d2ULL,
0xd245177a276ffc52ULL, 0x0f8b4de98c3c95c6ULL, 0x3e759530fef809e0ULL,
0x0b4d2892792c5b65ULL, 0xc4df4743d5374a98ULL, 0xa5e20888bfaeb5eaULL,
0xba56cc90c0d23f9aULL, 0x38d04cf8ffe0a09cULL, 0x62e1adafe495254cULL,
0x0263bcb3f40867dfULL, 0xcaeb547d230f62bfULL, 0x6082111c109d4293ULL,
0xdad4dd8cd04f7d09ULL, 0xefec602e579b2f8cULL, 0x1fb4c4187f7c8a70ULL,
0xffd3e9dfa4db303aULL, 0x7bf0b07f9af10640ULL, 0xf49ec14dddf76b5fULL,
0x8f6e713247066d1fULL, 0x339d646a86ccfbf9ULL, 0x64447467e58d8c30ULL,
0x2c29a072f9b07189ULL, 0xd8b7613f24471ad6ULL, 0x6627c8d41185ebefULL,
0xa347d140beb61c96ULL, 0xde12b8f7255fb3aaULL, 0x9d324470404e1576ULL,
0x9306574eb6763d51ULL, 0xa80af9d2c79a47f3ULL, 0x859c0777442e8b9bULL,
0x69ac853d9db97e29ULL
}, {
0xc3407dfc2de6377eULL, 0x5b9e93eea4256f77ULL, 0xadb58fdd50c845e0ULL,
0x5219ff11a75bed86ULL, 0x356b61cfd90b1de9ULL, 0xfb8f406e25abe037ULL,
0x7a5a0231c0f60796ULL, 0x9d3cd216e1f5020bULL, 0x0c6550fb6b48d8f3ULL,
0xf57508c427ff1c62ULL, 0x4ad35ffa71cb407dULL, 0x6290a2da1666aa6dULL,
0xe284ec2349355f9fULL, 0xb3c307c53d7c84ecULL, 0x05e23c0468365a02ULL,
0x190bac4d6c9ebfa8ULL, 0x94bbbee9e28b80faULL, 0xa34fc777529cb9b5ULL,
0xcc7b39f095bcd978ULL, 0x2426addb0ce532e3ULL, 0x7e79329312ce4fc7ULL,
0xab09a72eebec2917ULL, 0xf8d15499f6b9d6c2ULL, 0x1a55b8babf8c895dULL,
0xdb8add17fb769a85ULL, 0xb57f2f368658e81bULL, 0x8acd36f18f3f41f6ULL,
0x5ce3b7bba50f11d3ULL, 0x114dcc14d5ee2f0aULL, 0xb91a7fcded1030e8ULL,
0x81d5425fe55de7a1ULL, 0xb6213bc1554adeeeULL, 0x80144ef95f53f5f2ULL,
0x1e7688186db4c10cULL, 0x3b912965db5fe1bcULL, 0xc281715a97e8252dULL,
0x54a5d7e21c7f8171ULL, 0x4b12535ccbc5522eULL, 0x1d289cefbea6f7f9ULL,
0x6ef5f2217d2e729eULL, 0xe6a7dc819b0d17ceULL, 0x1b94b41c05829b0eULL,
0x33d7493c622f711eULL, 0xdcf7f942fa5ce421ULL, 0x600fba8b7f7a8ecbULL,
0x46b60f011a83988eULL, 0x235b898e0dcf4c47ULL, 0x957ab24f588592a9ULL,
0x4354330572b5c28cULL, 0xa5f3ef84e9b8d542ULL, 0x8c711e02341b2d01ULL,
0x0b1874ae6a62a657ULL, 0x1213d8e306fc19ffULL, 0xfe6d7c6a4d9dba35ULL,
0x65ed868f174cd4c9ULL, 0x88522ea0e6236550ULL, 0x899322065c2d7703ULL,
0xc01e690bfef4018bULL, 0x915982ed8abddaf8ULL, 0xbe675b98ec3a4e4cULL,
0xa996bf7f82f00db1ULL, 0xe1daf8d49a27696aULL, 0x2effd5d3dc8986e7ULL,
0xd153a51f2b1a2e81ULL, 0x18caa0ebd690adfbULL, 0x390e3134b243c51aULL,
0x2778b92cdff70416ULL, 0x029f1851691c24a6ULL, 0x5e7cafeacc133575ULL,
0xfa4e4cc89fa5f264ULL, 0x5a5f9f481e2b7d24ULL, 0x484c47ab18d764dbULL,
0x400a27f2a1a7f479ULL, 0xaeeb9b2a83da7315ULL, 0x721c626879869734ULL,
0x042330a2d2384851ULL, 0x85f672fd3765aff0ULL, 0xba446b3a3e02061dULL,
0x73dd6ecec3888567ULL, 0xffac70ccf793a866ULL, 0xdfa9edb5294ed2d4ULL,
0x6c6aea7014325638ULL, 0x834a5a0e8c41c307ULL, 0xcdba35562fb2cb2bULL,
0x0ad97808d06cb404ULL, 0x0f3b440cb85aee06ULL, 0xe5f9c876481f213bULL,
0x98deee1289c35809ULL, 0x59018bbfcd394bd1ULL, 0xe01bf47220297b39ULL,
0xde68e1139340c087ULL, 0x9fa3ca4788e926adULL, 0xbb85679c840c144eULL,
0x53d8f3b71d55ffd5ULL, 0x0da45c5dd146caa0ULL, 0x6f34fe87c72060cdULL,
0x57fbc315cf6db784ULL, 0xcee421a1fca0fddeULL, 0x3d2d0196607b8d4bULL,
0x642c8a29ad42c69aULL, 0x14aff010bdd87508ULL, 0xac74837beac657b3ULL,
0x3216459ad821634dULL, 0x3fb219c70967a9edULL, 0x06bc28f3bb246cf7ULL,
0xf2082c9126d562c6ULL, 0x66b39278c45ee23cULL, 0xbd394f6f3f2878b9ULL,
0xfd33689d9e8f8cc0ULL, 0x37f4799eb017394fULL, 0x108cc0b26fe03d59ULL,
0xda4bd1b1417888d6ULL, 0xb09d1332ee6eb219ULL, 0x2f3ed975668794b4ULL,
0x58c0871977375982ULL, 0x7561463d78ace990ULL, 0x09876cff037e82f1ULL,
0x7fb83e35a8c05d94ULL, 0x26b9b58a65f91645ULL, 0xef20b07e9873953fULL,
0x3148516d0b3355b8ULL, 0x41cb2b541ba9e62aULL, 0x790416c613e43163ULL,
0xa011d380818e8f40ULL, 0x3a5025c36151f3efULL, 0xd57095bdf92266d0ULL,
0x498d4b0da2d97688ULL, 0x8b0c3a57353153a5ULL, 0x21c491df64d368e1ULL,
0x8f2f0af5e7091bf4ULL, 0x2da1c1240f9bb012ULL, 0xc43d59a92ccc49daULL,
0xbfa6573e56345c1fULL, 0x828b56a8364fd154ULL, 0x9a41f643e0df7cafULL,
0xbcf843c985266aeaULL, 0x2b1de9d7b4bfdce5ULL, 0x20059d79dedd7ab2ULL,
0x6dabe6d6ae3c446bULL, 0x45e81bf6c991ae7bULL, 0x6351ae7cac68b83eULL,
0xa432e32253b6c711ULL, 0xd092a9b991143cd2ULL, 0xcac711032e98b58fULL,
0xd8d4c9e02864ac70ULL, 0xc5fc550f96c25b89ULL, 0xd7ef8dec903e4276ULL,
0x67729ede7e50f06fULL, 0xeac28c7af045cf3dULL, 0xb15c1f945460a04aULL,
0x9cfddeb05bfb1058ULL, 0x93c69abce3a1fe5eULL, 0xeb0380dc4a4bdd6eULL,
0xd20db1e8f8081874ULL, 0x229a8528b7c15e14ULL, 0x44291750739fbc28ULL,
0xd3ccbd4e42060a27ULL, 0xf62b1c33f4ed2a97ULL, 0x86a8660ae4779905ULL,
0xd62e814a2a305025ULL, 0x477703a7a08d8addULL, 0x7b9b0e977af815c5ULL,
0x78c51a60a9ea2330ULL, 0xa6adfb733aaae3b7ULL, 0x97e5aa1e3199b60fULL,
0x0000000000000000ULL, 0xf4b404629df10e31ULL, 0x5564db44a6719322ULL,
0x9207961a59afec0dULL, 0x9624a6b88b97a45cULL, 0x363575380a192b1cULL,
0x2c60cd82b595a241ULL, 0x7d272664c1dc7932ULL, 0x7142769faa94a1c1ULL,
0xa1d0df263b809d13ULL, 0x1630e841d4c451aeULL, 0xc1df65ad44fa13d8ULL,
0x13d2d445bcf20bacULL, 0xd915c546926abe23ULL, 0x38cf3d92084dd749ULL,
0xe766d0272103059dULL, 0xc7634d5effde7f2fULL, 0x077d2455012a7ea4ULL,
0xedbfa82ff16fb199ULL, 0xaf2a978c39d46146ULL, 0x42953fa3c8bbd0dfULL,
0xcb061da59496a7dcULL, 0x25e7a17db6eb20b0ULL, 0x34aa6d6963050fbaULL,
0xa76cf7d580a4f1e4ULL, 0xf7ea10954ee338c4ULL, 0xfcf2643b24819e93ULL,
0xcf252d0746aeef8dULL, 0x4ef06f58a3f3082cULL, 0x563acfb37563a5d7ULL,
0x5086e740ce47c920ULL, 0x2982f186dda3f843ULL, 0x87696aac5e798b56ULL,
0x5d22bb1d1f010380ULL, 0x035e14f7d31236f5ULL, 0x3cec0d30da759f18ULL,
0xf3c920379cdb7095ULL, 0xb8db736b571e22bbULL, 0xdd36f5e44052f672ULL,
0xaac8ab8851e23b44ULL, 0xa857b3d938fe1fe2ULL, 0x17f1e4e76eca43fdULL,
0xec7ea4894b61a3caULL, 0x9e62c6e132e734feULL, 0xd4b1991b432c7483ULL,
0x6ad6c283af163acfULL, 0x1ce9904904a8e5aaULL, 0x5fbda34c761d2726ULL,
0xf910583f4cb7c491ULL, 0xc6a241f845d06d7cULL, 0x4f3163fe19fd1a7fULL,
0xe99c988d2357f9c8ULL, 0x8eee06535d0709a7ULL, 0x0efa48aa0254fc55ULL,
0xb4be23903c56fa48ULL, 0x763f52caabbedf65ULL, 0xeee1bcd8227d876cULL,
0xe345e085f33b4dccULL, 0x3e731561b369bbbeULL, 0x2843fd2067adea10ULL,
0x2adce5710eb1ceb6ULL, 0xb7e03767ef44ccbdULL, 0x8db012a48e153f52ULL,
0x61ceb62dc5749c98ULL, 0xe85d942b9959eb9bULL, 0x4c6f7709caef2c8aULL,
0x84377e5b8d6bbda3ULL, 0x30895dcbb13d47ebULL, 0x74a04a9bc2a2fbc3ULL,
0x6b17ce251518289cULL, 0xe438c4d0f2113368ULL, 0x1fb784bed7bad35fULL,
0x9b80fae55ad16efcULL, 0x77fe5e6c11b0cd36ULL, 0xc858095247849129ULL,
0x08466059b97090a2ULL, 0x01c10ca6ba0e1253ULL, 0x6988d6747c040c3aULL,
0x6849dad2c60a1e69ULL, 0x5147ebe67449db73ULL, 0xc99905f4fd8a837aULL,
0x991fe2b433cd4a5aULL, 0xf09734c04fc94660ULL, 0xa28ecbd1e892abe6ULL,
0xf1563866f5c75433ULL, 0x4dae7baf70e13ed9ULL, 0x7ce62ac27bd26b61ULL,
0x70837a39109ab392ULL, 0x90988e4b30b3c8abULL, 0xb2020b63877296bfULL,
0x156efcb607d6675bULL
}, {
0xe63f55ce97c331d0ULL, 0x25b506b0015bba16ULL, 0xc8706e29e6ad9ba8ULL,
0x5b43d3775d521f6aULL, 0x0bfa3d577035106eULL, 0xab95fc172afb0e66ULL,
0xf64b63979e7a3276ULL, 0xf58b4562649dad4bULL, 0x48f7c3dbae0c83f1ULL,
0xff31916642f5c8c5ULL, 0xcbb048dc1c4a0495ULL, 0x66b8f83cdf622989ULL,
0x35c130e908e2b9b0ULL, 0x7c761a61f0b34fa1ULL, 0x3601161cf205268dULL,
0x9e54ccfe2219b7d6ULL, 0x8b7d90a538940837ULL, 0x9cd403588ea35d0bULL,
0xbc3c6fea9ccc5b5aULL, 0xe5ff733b6d24aeedULL, 0xceed22de0f7eb8d2ULL,
0xec8581cab1ab545eULL, 0xb96105e88ff8e71dULL, 0x8ca03501871a5eadULL,
0x76ccce65d6db2a2fULL, 0x5883f582a7b58057ULL, 0x3f7be4ed2e8adc3eULL,
0x0fe7be06355cd9c9ULL, 0xee054e6c1d11be83ULL, 0x1074365909b903a6ULL,
0x5dde9f80b4813c10ULL, 0x4a770c7d02b6692cULL, 0x5379c8d5d7809039ULL,
0xb4067448161ed409ULL, 0x5f5e5026183bd6cdULL, 0xe898029bf4c29df9ULL,
0x7fb63c940a54d09cULL, 0xc5171f897f4ba8bcULL, 0xa6f28db7b31d3d72ULL,
0x2e4f3be7716eaa78ULL, 0x0d6771a099e63314ULL, 0x82076254e41bf284ULL,
0x2f0fd2b42733df98ULL, 0x5c9e76d3e2dc49f0ULL, 0x7aeb569619606cdbULL,
0x83478b07b2468764ULL, 0xcfadcb8d5923cd32ULL, 0x85dac7f05b95a41eULL,
0xb5469d1b4043a1e9ULL, 0xb821ecbbd9a592fdULL, 0x1b8e0b0e798c13c8ULL,
0x62a57b6d9a0be02eULL, 0xfcf1b793b81257f8ULL, 0x9d94ea0bd8fe28ebULL,
0x4cea408aeb654a56ULL, 0x23284a47e888996cULL, 0x2d8f1d128b893545ULL,
0xf4cbac3132c0d8abULL, 0xbd7c86b9ca912ebaULL, 0x3a268eef3dbe6079ULL,
0xf0d62f6077a9110cULL, 0x2735c916ade150cbULL, 0x89fd5f03942ee2eaULL,
0x1acee25d2fd16628ULL, 0x90f39bab41181bffULL, 0x430dfe8cde39939fULL,
0xf70b8ac4c8274796ULL, 0x1c53aeaac6024552ULL, 0x13b410acf35e9c9bULL,
0xa532ab4249faa24fULL, 0x2b1251e5625a163fULL, 0xd7e3e676da4841c7ULL,
0xa7b264e4e5404892ULL, 0xda8497d643ae72d3ULL, 0x861ae105a1723b23ULL,
0x38a6414991048aa4ULL, 0x6578dec92585b6b4ULL, 0x0280cfa6acbaeaddULL,
0x88bdb650c273970aULL, 0x9333bd5ebbff84c2ULL, 0x4e6a8f2c47dfa08bULL,
0x321c954db76cef2aULL, 0x418d312a72837942ULL, 0xb29b38bfffcdf773ULL,
0x6c022c38f90a4c07ULL, 0x5a033a240b0f6a8aULL, 0x1f93885f3ce5da6fULL,
0xc38a537e96988bc6ULL, 0x39e6a81ac759ff44ULL, 0x29929e43cee0fce2ULL,
0x40cdd87924de0ca2ULL, 0xe9d8ebc8a29fe819ULL, 0x0c2798f3cfbb46f4ULL,
0x55e484223e53b343ULL, 0x4650948ecd0d2fd8ULL, 0x20e86cb2126f0651ULL,
0x6d42c56baf5739e7ULL, 0xa06fc1405ace1e08ULL, 0x7babbfc54f3d193bULL,
0x424d17df8864e67fULL, 0xd8045870ef14980eULL, 0xc6d7397c85ac3781ULL,
0x21a885e1443273b1ULL, 0x67f8116f893f5c69ULL, 0x24f5efe35706cff6ULL,
0xd56329d076f2ab1aULL, 0x5e1eb9754e66a32dULL, 0x28d2771098bd8902ULL,
0x8f6013f47dfdc190ULL, 0x17a993fdb637553cULL, 0xe0a219397e1012aaULL,
0x786b9930b5da8606ULL, 0x6e82e39e55b0a6daULL, 0x875a0856f72f4ec3ULL,
0x3741ff4fa458536dULL, 0xac4859b3957558fcULL, 0x7ef6d5c75c09a57cULL,
0xc04a758b6c7f14fbULL, 0xf9acdd91ab26ebbfULL, 0x7391a467c5ef9668ULL,
0x335c7c1ee1319acaULL, 0xa91533b18641e4bbULL, 0xe4bf9a683b79db0dULL,
0x8e20faa72ba0b470ULL, 0x51f907737b3a7ae4ULL, 0x2268a314bed5ec8cULL,
0xd944b123b949edeeULL, 0x31dcb3b84d8b7017ULL, 0xd3fe65279f218860ULL,
0x097af2f1dc8ffab3ULL, 0x9b09a6fc312d0b91ULL, 0xcc6ded78a3c4520fULL,
0x3481d9ba5ebfcc50ULL, 0x4f2a667f1182d56bULL, 0xdfd9fdd4509ace94ULL,
0x26752045fbbc252bULL, 0xbffc491f662bc467ULL, 0xdd593272fc202449ULL,
0x3cbbc218d46d4303ULL, 0x91b372f817456e1fULL, 0x681faf69bc6385a0ULL,
0xb686bbeebaa43ed4ULL, 0x1469b5084cd0ca01ULL, 0x98c98009cbca94acULL,
0x6438379a73d8c354ULL, 0xc2caba2dc0c5fe26ULL, 0x3e3b0dbe78d7a9deULL,
0x50b9ee202d670f04ULL, 0x4590b27b37eab0e5ULL, 0x6025b4cb36b10af3ULL,
0xfb2c1237079c0162ULL, 0xa12f28130c936be8ULL, 0x4b37e52e54eb1cccULL,
0x083a1ba28ad28f53ULL, 0xc10a9cd83a22611bULL, 0x9f1425ad7444c236ULL,
0x069d4cf7e9d3237aULL, 0xedc56899e7f621beULL, 0x778c273680865fcfULL,
0x309c5aeb1bd605f7ULL, 0x8de0dc52d1472b4dULL, 0xf8ec34c2fd7b9e5fULL,
0xea18cd3d58787724ULL, 0xaad515447ca67b86ULL, 0x9989695a9d97e14cULL,
0x0000000000000000ULL, 0xf196c63321f464ecULL, 0x71116bc169557cb5ULL,
0xaf887f466f92c7c1ULL, 0x972e3e0ffe964d65ULL, 0x190ec4a8d536f915ULL,
0x95aef1a9522ca7b8ULL, 0xdc19db21aa7d51a9ULL, 0x94ee18fa0471d258ULL,
0x8087adf248a11859ULL, 0xc457f6da2916dd5cULL, 0xfa6cfb6451c17482ULL,
0xf256e0c6db13fbd1ULL, 0x6a9f60cf10d96f7dULL, 0x4daaa9d9bd383fb6ULL,
0x03c026f5fae79f3dULL, 0xde99148706c7bb74ULL, 0x2a52b8b6340763dfULL,
0x6fc20acd03edd33aULL, 0xd423c08320afdefaULL, 0xbbe1ca4e23420dc0ULL,
0x966ed75ca8cb3885ULL, 0xeb58246e0e2502c4ULL, 0x055d6a021334bc47ULL,
0xa47242111fa7d7afULL, 0xe3623fcc84f78d97ULL, 0x81c744a11efc6db9ULL,
0xaec8961539cfb221ULL, 0xf31609958d4e8e31ULL, 0x63e5923ecc5695ceULL,
0x47107ddd9b505a38ULL, 0xa3afe7b5a0298135ULL, 0x792b7063e387f3e6ULL,
0x0140e953565d75e0ULL, 0x12f4f9ffa503e97bULL, 0x750ce8902c3cb512ULL,
0xdbc47e8515f30733ULL, 0x1ed3610c6ab8af8fULL, 0x5239218681dde5d9ULL,
0xe222d69fd2aaf877ULL, 0xfe71783514a8bd25ULL, 0xcaf0a18f4a177175ULL,
0x61655d9860ec7f13ULL, 0xe77fbc9dc19e4430ULL, 0x2ccff441ddd440a5ULL,
0x16e97aaee06a20dcULL, 0xa855dae2d01c915bULL, 0x1d1347f9905f30b2ULL,
0xb7c652bdecf94b34ULL, 0xd03e43d265c6175dULL, 0xfdb15ec0ee4f2218ULL,
0x57644b8492e9599eULL, 0x07dda5a4bf8e569aULL, 0x54a46d71680ec6a3ULL,
0x5624a2d7c4b42c7eULL, 0xbebca04c3076b187ULL, 0x7d36f332a6ee3a41ULL,
0x3b6667bc6be31599ULL, 0x695f463aea3ef040ULL, 0xad08b0e0c3282d1cULL,
0xb15b1e4a052a684eULL, 0x44d05b2861b7c505ULL, 0x15295c5b1a8dbfe1ULL,
0x744c01c37a61c0f2ULL, 0x59c31cd1f1e8f5b7ULL, 0xef45a73f4b4ccb63ULL,
0x6bdf899c46841a9dULL, 0x3dfb2b4b823036e3ULL, 0xa2ef0ee6f674f4d5ULL,
0x184e2dfb836b8cf5ULL, 0x1134df0a5fe47646ULL, 0xbaa1231d751f7820ULL,
0xd17eaa81339b62bdULL, 0xb01bf71953771daeULL, 0x849a2ea30dc8d1feULL,
0x705182923f080955ULL, 0x0ea757556301ac29ULL, 0x041d83514569c9a7ULL,
0x0abad4042668658eULL, 0x49b72a88f851f611ULL, 0x8a3d79f66ec97dd7ULL,
0xcd2d042bf59927efULL, 0xc930877ab0f0ee48ULL, 0x9273540deda2f122ULL,
0xc797d02fd3f14261ULL, 0xe1e2f06a284d674aULL, 0xd2be8c74c97cfd80ULL,
0x9a494faf67707e71ULL, 0xb3dbd1eca9908293ULL, 0x72d14d3493b2e388ULL,
0xd6a30f258c153427ULL
}
}; /* Ax */
static void streebog_xor(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *z)
{
z->qword[0] = x->qword[0] ^ y->qword[0];
z->qword[1] = x->qword[1] ^ y->qword[1];
z->qword[2] = x->qword[2] ^ y->qword[2];
z->qword[3] = x->qword[3] ^ y->qword[3];
z->qword[4] = x->qword[4] ^ y->qword[4];
z->qword[5] = x->qword[5] ^ y->qword[5];
z->qword[6] = x->qword[6] ^ y->qword[6];
z->qword[7] = x->qword[7] ^ y->qword[7];
}
static void streebog_xlps(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *data)
{
u64 r0, r1, r2, r3, r4, r5, r6, r7;
int i;
r0 = le64_to_cpu(x->qword[0] ^ y->qword[0]);
r1 = le64_to_cpu(x->qword[1] ^ y->qword[1]);
r2 = le64_to_cpu(x->qword[2] ^ y->qword[2]);
r3 = le64_to_cpu(x->qword[3] ^ y->qword[3]);
r4 = le64_to_cpu(x->qword[4] ^ y->qword[4]);
r5 = le64_to_cpu(x->qword[5] ^ y->qword[5]);
r6 = le64_to_cpu(x->qword[6] ^ y->qword[6]);
r7 = le64_to_cpu(x->qword[7] ^ y->qword[7]);
for (i = 0; i <= 7; i++) {
data->qword[i] = cpu_to_le64(Ax[0][r0 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[1][r1 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[2][r2 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[3][r3 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[4][r4 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[5][r5 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[6][r6 & 0xFF]);
data->qword[i] ^= cpu_to_le64(Ax[7][r7 & 0xFF]);
r0 >>= 8;
r1 >>= 8;
r2 >>= 8;
r3 >>= 8;
r4 >>= 8;
r5 >>= 8;
r6 >>= 8;
r7 >>= 8;
}
}
static void streebog_round(int i, struct streebog_uint512 *Ki,
struct streebog_uint512 *data)
{
streebog_xlps(Ki, &C[i], Ki);
streebog_xlps(Ki, data, data);
}
static int streebog_init(struct shash_desc *desc)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
unsigned int i;
memset(ctx, 0, sizeof(struct streebog_state));
for (i = 0; i < 8; i++) {
if (digest_size == STREEBOG256_DIGEST_SIZE)
ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL);
}
return 0;
}
static void streebog_pad(struct streebog_state *ctx)
{
if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
return;
memset(ctx->buffer + ctx->fillsize, 0,
sizeof(ctx->buffer) - ctx->fillsize);
ctx->buffer[ctx->fillsize] = 1;
}
static void streebog_add512(const struct streebog_uint512 *x,
const struct streebog_uint512 *y,
struct streebog_uint512 *r)
{
u64 carry = 0;
int i;
for (i = 0; i < 8; i++) {
const u64 left = le64_to_cpu(x->qword[i]);
u64 sum;
sum = left + le64_to_cpu(y->qword[i]) + carry;
if (sum != left)
carry = (sum < left);
r->qword[i] = cpu_to_le64(sum);
}
}
static void streebog_g(struct streebog_uint512 *h,
const struct streebog_uint512 *N,
const struct streebog_uint512 *m)
{
struct streebog_uint512 Ki, data;
unsigned int i;
streebog_xlps(h, N, &data);
/* Starting E() */
Ki = data;
streebog_xlps(&Ki, m, &data);
for (i = 0; i < 11; i++)
streebog_round(i, &Ki, &data);
streebog_xlps(&Ki, &C[11], &Ki);
streebog_xor(&Ki, &data, &data);
/* E() done */
streebog_xor(&data, h, &data);
streebog_xor(&data, m, h);
}
static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
{
struct streebog_uint512 m;
memcpy(&m, data, sizeof(m));
streebog_g(&ctx->h, &ctx->N, &m);
streebog_add512(&ctx->N, &buffer512, &ctx->N);
streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
static void streebog_stage3(struct streebog_state *ctx)
{
struct streebog_uint512 buf = { { 0 } };
buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
streebog_pad(ctx);
streebog_g(&ctx->h, &ctx->N, &ctx->m);
streebog_add512(&ctx->N, &buf, &ctx->N);
streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
streebog_g(&ctx->h, &buffer0, &ctx->N);
streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
}
static int streebog_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
size_t chunksize;
if (ctx->fillsize) {
chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
if (chunksize > len)
chunksize = len;
memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
ctx->fillsize += chunksize;
len -= chunksize;
data += chunksize;
if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
streebog_stage2(ctx, ctx->buffer);
ctx->fillsize = 0;
}
}
while (len >= STREEBOG_BLOCK_SIZE) {
streebog_stage2(ctx, data);
data += STREEBOG_BLOCK_SIZE;
len -= STREEBOG_BLOCK_SIZE;
}
if (len) {
memcpy(&ctx->buffer, data, len);
ctx->fillsize = len;
}
return 0;
}
static int streebog_final(struct shash_desc *desc, u8 *digest)
{
struct streebog_state *ctx = shash_desc_ctx(desc);
streebog_stage3(ctx);
ctx->fillsize = 0;
if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
else
memcpy(digest, &ctx->hash.qword[0], STREEBOG512_DIGEST_SIZE);
return 0;
}
static struct shash_alg algs[2] = { {
.digestsize = STREEBOG256_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
.final = streebog_final,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog256",
.cra_driver_name = "streebog256-generic",
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
}, {
.digestsize = STREEBOG512_DIGEST_SIZE,
.init = streebog_init,
.update = streebog_update,
.final = streebog_final,
.descsize = sizeof(struct streebog_state),
.base = {
.cra_name = "streebog512",
.cra_driver_name = "streebog512-generic",
.cra_blocksize = STREEBOG_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init streebog_mod_init(void)
{
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit streebog_mod_fini(void)
{
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
subsys_initcall(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <[email protected]>");
MODULE_DESCRIPTION("Streebog Hash Function");
MODULE_ALIAS_CRYPTO("streebog256");
MODULE_ALIAS_CRYPTO("streebog256-generic");
MODULE_ALIAS_CRYPTO("streebog512");
MODULE_ALIAS_CRYPTO("streebog512-generic");
| linux-master | crypto/streebog_generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*
* Copyright (c) 2013 Chanho Min <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
struct lz4_ctx {
void *lz4_comp_mem;
};
static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
ctx = vmalloc(LZ4_MEM_COMPRESS);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int lz4_init(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
if (IS_ERR(ctx->lz4_comp_mem))
return -ENOMEM;
return 0;
}
static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
vfree(ctx);
}
static void lz4_exit(struct crypto_tfm *tfm)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
lz4_free_ctx(NULL, ctx->lz4_comp_mem);
}
static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int out_len = LZ4_compress_default(src, dst,
slen, *dlen, ctx);
if (!out_len)
return -EINVAL;
*dlen = out_len;
return 0;
}
static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
}
static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
}
static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
if (out_len < 0)
return -EINVAL;
*dlen = out_len;
return 0;
}
static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
}
static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst,
unsigned int *dlen)
{
return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
}
static struct crypto_alg alg_lz4 = {
.cra_name = "lz4",
.cra_driver_name = "lz4-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lz4_ctx),
.cra_module = THIS_MODULE,
.cra_init = lz4_init,
.cra_exit = lz4_exit,
.cra_u = { .compress = {
.coa_compress = lz4_compress_crypto,
.coa_decompress = lz4_decompress_crypto } }
};
static struct scomp_alg scomp = {
.alloc_ctx = lz4_alloc_ctx,
.free_ctx = lz4_free_ctx,
.compress = lz4_scompress,
.decompress = lz4_sdecompress,
.base = {
.cra_name = "lz4",
.cra_driver_name = "lz4-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init lz4_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg_lz4);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg_lz4);
return ret;
}
return ret;
}
static void __exit lz4_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZ4 Compression Algorithm");
MODULE_ALIAS_CRYPTO("lz4");
| linux-master | crypto/lz4.c |
/*
* Cryptographic API.
*
* Whirlpool hashing Algorithm
*
* The Whirlpool algorithm was developed by Paulo S. L. M. Barreto and
* Vincent Rijmen. It has been selected as one of cryptographic
* primitives by the NESSIE project http://www.cryptonessie.org/
*
* The original authors have disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* By Aaron Grothe [email protected], August 23, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define WP512_DIGEST_SIZE 64
#define WP384_DIGEST_SIZE 48
#define WP256_DIGEST_SIZE 32
#define WP512_BLOCK_SIZE 64
#define WP512_LENGTHBYTES 32
#define WHIRLPOOL_ROUNDS 10
struct wp512_ctx {
u8 bitLength[WP512_LENGTHBYTES];
u8 buffer[WP512_BLOCK_SIZE];
int bufferBits;
int bufferPos;
u64 hash[WP512_DIGEST_SIZE/8];
};
/*
* Though Whirlpool is endianness-neutral, the encryption tables are listed
* in BIG-ENDIAN format, which is adopted throughout this implementation
* (but little-endian notation would be equally suitable if consistently
* employed).
*/
static const u64 C0[256] = {
0x18186018c07830d8ULL, 0x23238c2305af4626ULL, 0xc6c63fc67ef991b8ULL,
0xe8e887e8136fcdfbULL, 0x878726874ca113cbULL, 0xb8b8dab8a9626d11ULL,
0x0101040108050209ULL, 0x4f4f214f426e9e0dULL, 0x3636d836adee6c9bULL,
0xa6a6a2a6590451ffULL, 0xd2d26fd2debdb90cULL, 0xf5f5f3f5fb06f70eULL,
0x7979f979ef80f296ULL, 0x6f6fa16f5fcede30ULL, 0x91917e91fcef3f6dULL,
0x52525552aa07a4f8ULL, 0x60609d6027fdc047ULL, 0xbcbccabc89766535ULL,
0x9b9b569baccd2b37ULL, 0x8e8e028e048c018aULL, 0xa3a3b6a371155bd2ULL,
0x0c0c300c603c186cULL, 0x7b7bf17bff8af684ULL, 0x3535d435b5e16a80ULL,
0x1d1d741de8693af5ULL, 0xe0e0a7e05347ddb3ULL, 0xd7d77bd7f6acb321ULL,
0xc2c22fc25eed999cULL, 0x2e2eb82e6d965c43ULL, 0x4b4b314b627a9629ULL,
0xfefedffea321e15dULL, 0x575741578216aed5ULL, 0x15155415a8412abdULL,
0x7777c1779fb6eee8ULL, 0x3737dc37a5eb6e92ULL, 0xe5e5b3e57b56d79eULL,
0x9f9f469f8cd92313ULL, 0xf0f0e7f0d317fd23ULL, 0x4a4a354a6a7f9420ULL,
0xdada4fda9e95a944ULL, 0x58587d58fa25b0a2ULL, 0xc9c903c906ca8fcfULL,
0x2929a429558d527cULL, 0x0a0a280a5022145aULL, 0xb1b1feb1e14f7f50ULL,
0xa0a0baa0691a5dc9ULL, 0x6b6bb16b7fdad614ULL, 0x85852e855cab17d9ULL,
0xbdbdcebd8173673cULL, 0x5d5d695dd234ba8fULL, 0x1010401080502090ULL,
0xf4f4f7f4f303f507ULL, 0xcbcb0bcb16c08bddULL, 0x3e3ef83eedc67cd3ULL,
0x0505140528110a2dULL, 0x676781671fe6ce78ULL, 0xe4e4b7e47353d597ULL,
0x27279c2725bb4e02ULL, 0x4141194132588273ULL, 0x8b8b168b2c9d0ba7ULL,
0xa7a7a6a7510153f6ULL, 0x7d7de97dcf94fab2ULL, 0x95956e95dcfb3749ULL,
0xd8d847d88e9fad56ULL, 0xfbfbcbfb8b30eb70ULL, 0xeeee9fee2371c1cdULL,
0x7c7ced7cc791f8bbULL, 0x6666856617e3cc71ULL, 0xdddd53dda68ea77bULL,
0x17175c17b84b2eafULL, 0x4747014702468e45ULL, 0x9e9e429e84dc211aULL,
0xcaca0fca1ec589d4ULL, 0x2d2db42d75995a58ULL, 0xbfbfc6bf9179632eULL,
0x07071c07381b0e3fULL, 0xadad8ead012347acULL, 0x5a5a755aea2fb4b0ULL,
0x838336836cb51befULL, 0x3333cc3385ff66b6ULL, 0x636391633ff2c65cULL,
0x02020802100a0412ULL, 0xaaaa92aa39384993ULL, 0x7171d971afa8e2deULL,
0xc8c807c80ecf8dc6ULL, 0x19196419c87d32d1ULL, 0x494939497270923bULL,
0xd9d943d9869aaf5fULL, 0xf2f2eff2c31df931ULL, 0xe3e3abe34b48dba8ULL,
0x5b5b715be22ab6b9ULL, 0x88881a8834920dbcULL, 0x9a9a529aa4c8293eULL,
0x262698262dbe4c0bULL, 0x3232c8328dfa64bfULL, 0xb0b0fab0e94a7d59ULL,
0xe9e983e91b6acff2ULL, 0x0f0f3c0f78331e77ULL, 0xd5d573d5e6a6b733ULL,
0x80803a8074ba1df4ULL, 0xbebec2be997c6127ULL, 0xcdcd13cd26de87ebULL,
0x3434d034bde46889ULL, 0x48483d487a759032ULL, 0xffffdbffab24e354ULL,
0x7a7af57af78ff48dULL, 0x90907a90f4ea3d64ULL, 0x5f5f615fc23ebe9dULL,
0x202080201da0403dULL, 0x6868bd6867d5d00fULL, 0x1a1a681ad07234caULL,
0xaeae82ae192c41b7ULL, 0xb4b4eab4c95e757dULL, 0x54544d549a19a8ceULL,
0x93937693ece53b7fULL, 0x222288220daa442fULL, 0x64648d6407e9c863ULL,
0xf1f1e3f1db12ff2aULL, 0x7373d173bfa2e6ccULL, 0x12124812905a2482ULL,
0x40401d403a5d807aULL, 0x0808200840281048ULL, 0xc3c32bc356e89b95ULL,
0xecec97ec337bc5dfULL, 0xdbdb4bdb9690ab4dULL, 0xa1a1bea1611f5fc0ULL,
0x8d8d0e8d1c830791ULL, 0x3d3df43df5c97ac8ULL, 0x97976697ccf1335bULL,
0x0000000000000000ULL, 0xcfcf1bcf36d483f9ULL, 0x2b2bac2b4587566eULL,
0x7676c57697b3ece1ULL, 0x8282328264b019e6ULL, 0xd6d67fd6fea9b128ULL,
0x1b1b6c1bd87736c3ULL, 0xb5b5eeb5c15b7774ULL, 0xafaf86af112943beULL,
0x6a6ab56a77dfd41dULL, 0x50505d50ba0da0eaULL, 0x45450945124c8a57ULL,
0xf3f3ebf3cb18fb38ULL, 0x3030c0309df060adULL, 0xefef9bef2b74c3c4ULL,
0x3f3ffc3fe5c37edaULL, 0x55554955921caac7ULL, 0xa2a2b2a2791059dbULL,
0xeaea8fea0365c9e9ULL, 0x656589650fecca6aULL, 0xbabad2bab9686903ULL,
0x2f2fbc2f65935e4aULL, 0xc0c027c04ee79d8eULL, 0xdede5fdebe81a160ULL,
0x1c1c701ce06c38fcULL, 0xfdfdd3fdbb2ee746ULL, 0x4d4d294d52649a1fULL,
0x92927292e4e03976ULL, 0x7575c9758fbceafaULL, 0x06061806301e0c36ULL,
0x8a8a128a249809aeULL, 0xb2b2f2b2f940794bULL, 0xe6e6bfe66359d185ULL,
0x0e0e380e70361c7eULL, 0x1f1f7c1ff8633ee7ULL, 0x6262956237f7c455ULL,
0xd4d477d4eea3b53aULL, 0xa8a89aa829324d81ULL, 0x96966296c4f43152ULL,
0xf9f9c3f99b3aef62ULL, 0xc5c533c566f697a3ULL, 0x2525942535b14a10ULL,
0x59597959f220b2abULL, 0x84842a8454ae15d0ULL, 0x7272d572b7a7e4c5ULL,
0x3939e439d5dd72ecULL, 0x4c4c2d4c5a619816ULL, 0x5e5e655eca3bbc94ULL,
0x7878fd78e785f09fULL, 0x3838e038ddd870e5ULL, 0x8c8c0a8c14860598ULL,
0xd1d163d1c6b2bf17ULL, 0xa5a5aea5410b57e4ULL, 0xe2e2afe2434dd9a1ULL,
0x616199612ff8c24eULL, 0xb3b3f6b3f1457b42ULL, 0x2121842115a54234ULL,
0x9c9c4a9c94d62508ULL, 0x1e1e781ef0663ceeULL, 0x4343114322528661ULL,
0xc7c73bc776fc93b1ULL, 0xfcfcd7fcb32be54fULL, 0x0404100420140824ULL,
0x51515951b208a2e3ULL, 0x99995e99bcc72f25ULL, 0x6d6da96d4fc4da22ULL,
0x0d0d340d68391a65ULL, 0xfafacffa8335e979ULL, 0xdfdf5bdfb684a369ULL,
0x7e7ee57ed79bfca9ULL, 0x242490243db44819ULL, 0x3b3bec3bc5d776feULL,
0xabab96ab313d4b9aULL, 0xcece1fce3ed181f0ULL, 0x1111441188552299ULL,
0x8f8f068f0c890383ULL, 0x4e4e254e4a6b9c04ULL, 0xb7b7e6b7d1517366ULL,
0xebeb8beb0b60cbe0ULL, 0x3c3cf03cfdcc78c1ULL, 0x81813e817cbf1ffdULL,
0x94946a94d4fe3540ULL, 0xf7f7fbf7eb0cf31cULL, 0xb9b9deb9a1676f18ULL,
0x13134c13985f268bULL, 0x2c2cb02c7d9c5851ULL, 0xd3d36bd3d6b8bb05ULL,
0xe7e7bbe76b5cd38cULL, 0x6e6ea56e57cbdc39ULL, 0xc4c437c46ef395aaULL,
0x03030c03180f061bULL, 0x565645568a13acdcULL, 0x44440d441a49885eULL,
0x7f7fe17fdf9efea0ULL, 0xa9a99ea921374f88ULL, 0x2a2aa82a4d825467ULL,
0xbbbbd6bbb16d6b0aULL, 0xc1c123c146e29f87ULL, 0x53535153a202a6f1ULL,
0xdcdc57dcae8ba572ULL, 0x0b0b2c0b58271653ULL, 0x9d9d4e9d9cd32701ULL,
0x6c6cad6c47c1d82bULL, 0x3131c43195f562a4ULL, 0x7474cd7487b9e8f3ULL,
0xf6f6fff6e309f115ULL, 0x464605460a438c4cULL, 0xacac8aac092645a5ULL,
0x89891e893c970fb5ULL, 0x14145014a04428b4ULL, 0xe1e1a3e15b42dfbaULL,
0x16165816b04e2ca6ULL, 0x3a3ae83acdd274f7ULL, 0x6969b9696fd0d206ULL,
0x09092409482d1241ULL, 0x7070dd70a7ade0d7ULL, 0xb6b6e2b6d954716fULL,
0xd0d067d0ceb7bd1eULL, 0xeded93ed3b7ec7d6ULL, 0xcccc17cc2edb85e2ULL,
0x424215422a578468ULL, 0x98985a98b4c22d2cULL, 0xa4a4aaa4490e55edULL,
0x2828a0285d885075ULL, 0x5c5c6d5cda31b886ULL, 0xf8f8c7f8933fed6bULL,
0x8686228644a411c2ULL,
};
static const u64 C1[256] = {
0xd818186018c07830ULL, 0x2623238c2305af46ULL, 0xb8c6c63fc67ef991ULL,
0xfbe8e887e8136fcdULL, 0xcb878726874ca113ULL, 0x11b8b8dab8a9626dULL,
0x0901010401080502ULL, 0x0d4f4f214f426e9eULL, 0x9b3636d836adee6cULL,
0xffa6a6a2a6590451ULL, 0x0cd2d26fd2debdb9ULL, 0x0ef5f5f3f5fb06f7ULL,
0x967979f979ef80f2ULL, 0x306f6fa16f5fcedeULL, 0x6d91917e91fcef3fULL,
0xf852525552aa07a4ULL, 0x4760609d6027fdc0ULL, 0x35bcbccabc897665ULL,
0x379b9b569baccd2bULL, 0x8a8e8e028e048c01ULL, 0xd2a3a3b6a371155bULL,
0x6c0c0c300c603c18ULL, 0x847b7bf17bff8af6ULL, 0x803535d435b5e16aULL,
0xf51d1d741de8693aULL, 0xb3e0e0a7e05347ddULL, 0x21d7d77bd7f6acb3ULL,
0x9cc2c22fc25eed99ULL, 0x432e2eb82e6d965cULL, 0x294b4b314b627a96ULL,
0x5dfefedffea321e1ULL, 0xd5575741578216aeULL, 0xbd15155415a8412aULL,
0xe87777c1779fb6eeULL, 0x923737dc37a5eb6eULL, 0x9ee5e5b3e57b56d7ULL,
0x139f9f469f8cd923ULL, 0x23f0f0e7f0d317fdULL, 0x204a4a354a6a7f94ULL,
0x44dada4fda9e95a9ULL, 0xa258587d58fa25b0ULL, 0xcfc9c903c906ca8fULL,
0x7c2929a429558d52ULL, 0x5a0a0a280a502214ULL, 0x50b1b1feb1e14f7fULL,
0xc9a0a0baa0691a5dULL, 0x146b6bb16b7fdad6ULL, 0xd985852e855cab17ULL,
0x3cbdbdcebd817367ULL, 0x8f5d5d695dd234baULL, 0x9010104010805020ULL,
0x07f4f4f7f4f303f5ULL, 0xddcbcb0bcb16c08bULL, 0xd33e3ef83eedc67cULL,
0x2d0505140528110aULL, 0x78676781671fe6ceULL, 0x97e4e4b7e47353d5ULL,
0x0227279c2725bb4eULL, 0x7341411941325882ULL, 0xa78b8b168b2c9d0bULL,
0xf6a7a7a6a7510153ULL, 0xb27d7de97dcf94faULL, 0x4995956e95dcfb37ULL,
0x56d8d847d88e9fadULL, 0x70fbfbcbfb8b30ebULL, 0xcdeeee9fee2371c1ULL,
0xbb7c7ced7cc791f8ULL, 0x716666856617e3ccULL, 0x7bdddd53dda68ea7ULL,
0xaf17175c17b84b2eULL, 0x454747014702468eULL, 0x1a9e9e429e84dc21ULL,
0xd4caca0fca1ec589ULL, 0x582d2db42d75995aULL, 0x2ebfbfc6bf917963ULL,
0x3f07071c07381b0eULL, 0xacadad8ead012347ULL, 0xb05a5a755aea2fb4ULL,
0xef838336836cb51bULL, 0xb63333cc3385ff66ULL, 0x5c636391633ff2c6ULL,
0x1202020802100a04ULL, 0x93aaaa92aa393849ULL, 0xde7171d971afa8e2ULL,
0xc6c8c807c80ecf8dULL, 0xd119196419c87d32ULL, 0x3b49493949727092ULL,
0x5fd9d943d9869aafULL, 0x31f2f2eff2c31df9ULL, 0xa8e3e3abe34b48dbULL,
0xb95b5b715be22ab6ULL, 0xbc88881a8834920dULL, 0x3e9a9a529aa4c829ULL,
0x0b262698262dbe4cULL, 0xbf3232c8328dfa64ULL, 0x59b0b0fab0e94a7dULL,
0xf2e9e983e91b6acfULL, 0x770f0f3c0f78331eULL, 0x33d5d573d5e6a6b7ULL,
0xf480803a8074ba1dULL, 0x27bebec2be997c61ULL, 0xebcdcd13cd26de87ULL,
0x893434d034bde468ULL, 0x3248483d487a7590ULL, 0x54ffffdbffab24e3ULL,
0x8d7a7af57af78ff4ULL, 0x6490907a90f4ea3dULL, 0x9d5f5f615fc23ebeULL,
0x3d202080201da040ULL, 0x0f6868bd6867d5d0ULL, 0xca1a1a681ad07234ULL,
0xb7aeae82ae192c41ULL, 0x7db4b4eab4c95e75ULL, 0xce54544d549a19a8ULL,
0x7f93937693ece53bULL, 0x2f222288220daa44ULL, 0x6364648d6407e9c8ULL,
0x2af1f1e3f1db12ffULL, 0xcc7373d173bfa2e6ULL, 0x8212124812905a24ULL,
0x7a40401d403a5d80ULL, 0x4808082008402810ULL, 0x95c3c32bc356e89bULL,
0xdfecec97ec337bc5ULL, 0x4ddbdb4bdb9690abULL, 0xc0a1a1bea1611f5fULL,
0x918d8d0e8d1c8307ULL, 0xc83d3df43df5c97aULL, 0x5b97976697ccf133ULL,
0x0000000000000000ULL, 0xf9cfcf1bcf36d483ULL, 0x6e2b2bac2b458756ULL,
0xe17676c57697b3ecULL, 0xe68282328264b019ULL, 0x28d6d67fd6fea9b1ULL,
0xc31b1b6c1bd87736ULL, 0x74b5b5eeb5c15b77ULL, 0xbeafaf86af112943ULL,
0x1d6a6ab56a77dfd4ULL, 0xea50505d50ba0da0ULL, 0x5745450945124c8aULL,
0x38f3f3ebf3cb18fbULL, 0xad3030c0309df060ULL, 0xc4efef9bef2b74c3ULL,
0xda3f3ffc3fe5c37eULL, 0xc755554955921caaULL, 0xdba2a2b2a2791059ULL,
0xe9eaea8fea0365c9ULL, 0x6a656589650feccaULL, 0x03babad2bab96869ULL,
0x4a2f2fbc2f65935eULL, 0x8ec0c027c04ee79dULL, 0x60dede5fdebe81a1ULL,
0xfc1c1c701ce06c38ULL, 0x46fdfdd3fdbb2ee7ULL, 0x1f4d4d294d52649aULL,
0x7692927292e4e039ULL, 0xfa7575c9758fbceaULL, 0x3606061806301e0cULL,
0xae8a8a128a249809ULL, 0x4bb2b2f2b2f94079ULL, 0x85e6e6bfe66359d1ULL,
0x7e0e0e380e70361cULL, 0xe71f1f7c1ff8633eULL, 0x556262956237f7c4ULL,
0x3ad4d477d4eea3b5ULL, 0x81a8a89aa829324dULL, 0x5296966296c4f431ULL,
0x62f9f9c3f99b3aefULL, 0xa3c5c533c566f697ULL, 0x102525942535b14aULL,
0xab59597959f220b2ULL, 0xd084842a8454ae15ULL, 0xc57272d572b7a7e4ULL,
0xec3939e439d5dd72ULL, 0x164c4c2d4c5a6198ULL, 0x945e5e655eca3bbcULL,
0x9f7878fd78e785f0ULL, 0xe53838e038ddd870ULL, 0x988c8c0a8c148605ULL,
0x17d1d163d1c6b2bfULL, 0xe4a5a5aea5410b57ULL, 0xa1e2e2afe2434dd9ULL,
0x4e616199612ff8c2ULL, 0x42b3b3f6b3f1457bULL, 0x342121842115a542ULL,
0x089c9c4a9c94d625ULL, 0xee1e1e781ef0663cULL, 0x6143431143225286ULL,
0xb1c7c73bc776fc93ULL, 0x4ffcfcd7fcb32be5ULL, 0x2404041004201408ULL,
0xe351515951b208a2ULL, 0x2599995e99bcc72fULL, 0x226d6da96d4fc4daULL,
0x650d0d340d68391aULL, 0x79fafacffa8335e9ULL, 0x69dfdf5bdfb684a3ULL,
0xa97e7ee57ed79bfcULL, 0x19242490243db448ULL, 0xfe3b3bec3bc5d776ULL,
0x9aabab96ab313d4bULL, 0xf0cece1fce3ed181ULL, 0x9911114411885522ULL,
0x838f8f068f0c8903ULL, 0x044e4e254e4a6b9cULL, 0x66b7b7e6b7d15173ULL,
0xe0ebeb8beb0b60cbULL, 0xc13c3cf03cfdcc78ULL, 0xfd81813e817cbf1fULL,
0x4094946a94d4fe35ULL, 0x1cf7f7fbf7eb0cf3ULL, 0x18b9b9deb9a1676fULL,
0x8b13134c13985f26ULL, 0x512c2cb02c7d9c58ULL, 0x05d3d36bd3d6b8bbULL,
0x8ce7e7bbe76b5cd3ULL, 0x396e6ea56e57cbdcULL, 0xaac4c437c46ef395ULL,
0x1b03030c03180f06ULL, 0xdc565645568a13acULL, 0x5e44440d441a4988ULL,
0xa07f7fe17fdf9efeULL, 0x88a9a99ea921374fULL, 0x672a2aa82a4d8254ULL,
0x0abbbbd6bbb16d6bULL, 0x87c1c123c146e29fULL, 0xf153535153a202a6ULL,
0x72dcdc57dcae8ba5ULL, 0x530b0b2c0b582716ULL, 0x019d9d4e9d9cd327ULL,
0x2b6c6cad6c47c1d8ULL, 0xa43131c43195f562ULL, 0xf37474cd7487b9e8ULL,
0x15f6f6fff6e309f1ULL, 0x4c464605460a438cULL, 0xa5acac8aac092645ULL,
0xb589891e893c970fULL, 0xb414145014a04428ULL, 0xbae1e1a3e15b42dfULL,
0xa616165816b04e2cULL, 0xf73a3ae83acdd274ULL, 0x066969b9696fd0d2ULL,
0x4109092409482d12ULL, 0xd77070dd70a7ade0ULL, 0x6fb6b6e2b6d95471ULL,
0x1ed0d067d0ceb7bdULL, 0xd6eded93ed3b7ec7ULL, 0xe2cccc17cc2edb85ULL,
0x68424215422a5784ULL, 0x2c98985a98b4c22dULL, 0xeda4a4aaa4490e55ULL,
0x752828a0285d8850ULL, 0x865c5c6d5cda31b8ULL, 0x6bf8f8c7f8933fedULL,
0xc28686228644a411ULL,
};
static const u64 C2[256] = {
0x30d818186018c078ULL, 0x462623238c2305afULL, 0x91b8c6c63fc67ef9ULL,
0xcdfbe8e887e8136fULL, 0x13cb878726874ca1ULL, 0x6d11b8b8dab8a962ULL,
0x0209010104010805ULL, 0x9e0d4f4f214f426eULL, 0x6c9b3636d836adeeULL,
0x51ffa6a6a2a65904ULL, 0xb90cd2d26fd2debdULL, 0xf70ef5f5f3f5fb06ULL,
0xf2967979f979ef80ULL, 0xde306f6fa16f5fceULL, 0x3f6d91917e91fcefULL,
0xa4f852525552aa07ULL, 0xc04760609d6027fdULL, 0x6535bcbccabc8976ULL,
0x2b379b9b569baccdULL, 0x018a8e8e028e048cULL, 0x5bd2a3a3b6a37115ULL,
0x186c0c0c300c603cULL, 0xf6847b7bf17bff8aULL, 0x6a803535d435b5e1ULL,
0x3af51d1d741de869ULL, 0xddb3e0e0a7e05347ULL, 0xb321d7d77bd7f6acULL,
0x999cc2c22fc25eedULL, 0x5c432e2eb82e6d96ULL, 0x96294b4b314b627aULL,
0xe15dfefedffea321ULL, 0xaed5575741578216ULL, 0x2abd15155415a841ULL,
0xeee87777c1779fb6ULL, 0x6e923737dc37a5ebULL, 0xd79ee5e5b3e57b56ULL,
0x23139f9f469f8cd9ULL, 0xfd23f0f0e7f0d317ULL, 0x94204a4a354a6a7fULL,
0xa944dada4fda9e95ULL, 0xb0a258587d58fa25ULL, 0x8fcfc9c903c906caULL,
0x527c2929a429558dULL, 0x145a0a0a280a5022ULL, 0x7f50b1b1feb1e14fULL,
0x5dc9a0a0baa0691aULL, 0xd6146b6bb16b7fdaULL, 0x17d985852e855cabULL,
0x673cbdbdcebd8173ULL, 0xba8f5d5d695dd234ULL, 0x2090101040108050ULL,
0xf507f4f4f7f4f303ULL, 0x8bddcbcb0bcb16c0ULL, 0x7cd33e3ef83eedc6ULL,
0x0a2d050514052811ULL, 0xce78676781671fe6ULL, 0xd597e4e4b7e47353ULL,
0x4e0227279c2725bbULL, 0x8273414119413258ULL, 0x0ba78b8b168b2c9dULL,
0x53f6a7a7a6a75101ULL, 0xfab27d7de97dcf94ULL, 0x374995956e95dcfbULL,
0xad56d8d847d88e9fULL, 0xeb70fbfbcbfb8b30ULL, 0xc1cdeeee9fee2371ULL,
0xf8bb7c7ced7cc791ULL, 0xcc716666856617e3ULL, 0xa77bdddd53dda68eULL,
0x2eaf17175c17b84bULL, 0x8e45474701470246ULL, 0x211a9e9e429e84dcULL,
0x89d4caca0fca1ec5ULL, 0x5a582d2db42d7599ULL, 0x632ebfbfc6bf9179ULL,
0x0e3f07071c07381bULL, 0x47acadad8ead0123ULL, 0xb4b05a5a755aea2fULL,
0x1bef838336836cb5ULL, 0x66b63333cc3385ffULL, 0xc65c636391633ff2ULL,
0x041202020802100aULL, 0x4993aaaa92aa3938ULL, 0xe2de7171d971afa8ULL,
0x8dc6c8c807c80ecfULL, 0x32d119196419c87dULL, 0x923b494939497270ULL,
0xaf5fd9d943d9869aULL, 0xf931f2f2eff2c31dULL, 0xdba8e3e3abe34b48ULL,
0xb6b95b5b715be22aULL, 0x0dbc88881a883492ULL, 0x293e9a9a529aa4c8ULL,
0x4c0b262698262dbeULL, 0x64bf3232c8328dfaULL, 0x7d59b0b0fab0e94aULL,
0xcff2e9e983e91b6aULL, 0x1e770f0f3c0f7833ULL, 0xb733d5d573d5e6a6ULL,
0x1df480803a8074baULL, 0x6127bebec2be997cULL, 0x87ebcdcd13cd26deULL,
0x68893434d034bde4ULL, 0x903248483d487a75ULL, 0xe354ffffdbffab24ULL,
0xf48d7a7af57af78fULL, 0x3d6490907a90f4eaULL, 0xbe9d5f5f615fc23eULL,
0x403d202080201da0ULL, 0xd00f6868bd6867d5ULL, 0x34ca1a1a681ad072ULL,
0x41b7aeae82ae192cULL, 0x757db4b4eab4c95eULL, 0xa8ce54544d549a19ULL,
0x3b7f93937693ece5ULL, 0x442f222288220daaULL, 0xc86364648d6407e9ULL,
0xff2af1f1e3f1db12ULL, 0xe6cc7373d173bfa2ULL, 0x248212124812905aULL,
0x807a40401d403a5dULL, 0x1048080820084028ULL, 0x9b95c3c32bc356e8ULL,
0xc5dfecec97ec337bULL, 0xab4ddbdb4bdb9690ULL, 0x5fc0a1a1bea1611fULL,
0x07918d8d0e8d1c83ULL, 0x7ac83d3df43df5c9ULL, 0x335b97976697ccf1ULL,
0x0000000000000000ULL, 0x83f9cfcf1bcf36d4ULL, 0x566e2b2bac2b4587ULL,
0xece17676c57697b3ULL, 0x19e68282328264b0ULL, 0xb128d6d67fd6fea9ULL,
0x36c31b1b6c1bd877ULL, 0x7774b5b5eeb5c15bULL, 0x43beafaf86af1129ULL,
0xd41d6a6ab56a77dfULL, 0xa0ea50505d50ba0dULL, 0x8a5745450945124cULL,
0xfb38f3f3ebf3cb18ULL, 0x60ad3030c0309df0ULL, 0xc3c4efef9bef2b74ULL,
0x7eda3f3ffc3fe5c3ULL, 0xaac755554955921cULL, 0x59dba2a2b2a27910ULL,
0xc9e9eaea8fea0365ULL, 0xca6a656589650fecULL, 0x6903babad2bab968ULL,
0x5e4a2f2fbc2f6593ULL, 0x9d8ec0c027c04ee7ULL, 0xa160dede5fdebe81ULL,
0x38fc1c1c701ce06cULL, 0xe746fdfdd3fdbb2eULL, 0x9a1f4d4d294d5264ULL,
0x397692927292e4e0ULL, 0xeafa7575c9758fbcULL, 0x0c3606061806301eULL,
0x09ae8a8a128a2498ULL, 0x794bb2b2f2b2f940ULL, 0xd185e6e6bfe66359ULL,
0x1c7e0e0e380e7036ULL, 0x3ee71f1f7c1ff863ULL, 0xc4556262956237f7ULL,
0xb53ad4d477d4eea3ULL, 0x4d81a8a89aa82932ULL, 0x315296966296c4f4ULL,
0xef62f9f9c3f99b3aULL, 0x97a3c5c533c566f6ULL, 0x4a102525942535b1ULL,
0xb2ab59597959f220ULL, 0x15d084842a8454aeULL, 0xe4c57272d572b7a7ULL,
0x72ec3939e439d5ddULL, 0x98164c4c2d4c5a61ULL, 0xbc945e5e655eca3bULL,
0xf09f7878fd78e785ULL, 0x70e53838e038ddd8ULL, 0x05988c8c0a8c1486ULL,
0xbf17d1d163d1c6b2ULL, 0x57e4a5a5aea5410bULL, 0xd9a1e2e2afe2434dULL,
0xc24e616199612ff8ULL, 0x7b42b3b3f6b3f145ULL, 0x42342121842115a5ULL,
0x25089c9c4a9c94d6ULL, 0x3cee1e1e781ef066ULL, 0x8661434311432252ULL,
0x93b1c7c73bc776fcULL, 0xe54ffcfcd7fcb32bULL, 0x0824040410042014ULL,
0xa2e351515951b208ULL, 0x2f2599995e99bcc7ULL, 0xda226d6da96d4fc4ULL,
0x1a650d0d340d6839ULL, 0xe979fafacffa8335ULL, 0xa369dfdf5bdfb684ULL,
0xfca97e7ee57ed79bULL, 0x4819242490243db4ULL, 0x76fe3b3bec3bc5d7ULL,
0x4b9aabab96ab313dULL, 0x81f0cece1fce3ed1ULL, 0x2299111144118855ULL,
0x03838f8f068f0c89ULL, 0x9c044e4e254e4a6bULL, 0x7366b7b7e6b7d151ULL,
0xcbe0ebeb8beb0b60ULL, 0x78c13c3cf03cfdccULL, 0x1ffd81813e817cbfULL,
0x354094946a94d4feULL, 0xf31cf7f7fbf7eb0cULL, 0x6f18b9b9deb9a167ULL,
0x268b13134c13985fULL, 0x58512c2cb02c7d9cULL, 0xbb05d3d36bd3d6b8ULL,
0xd38ce7e7bbe76b5cULL, 0xdc396e6ea56e57cbULL, 0x95aac4c437c46ef3ULL,
0x061b03030c03180fULL, 0xacdc565645568a13ULL, 0x885e44440d441a49ULL,
0xfea07f7fe17fdf9eULL, 0x4f88a9a99ea92137ULL, 0x54672a2aa82a4d82ULL,
0x6b0abbbbd6bbb16dULL, 0x9f87c1c123c146e2ULL, 0xa6f153535153a202ULL,
0xa572dcdc57dcae8bULL, 0x16530b0b2c0b5827ULL, 0x27019d9d4e9d9cd3ULL,
0xd82b6c6cad6c47c1ULL, 0x62a43131c43195f5ULL, 0xe8f37474cd7487b9ULL,
0xf115f6f6fff6e309ULL, 0x8c4c464605460a43ULL, 0x45a5acac8aac0926ULL,
0x0fb589891e893c97ULL, 0x28b414145014a044ULL, 0xdfbae1e1a3e15b42ULL,
0x2ca616165816b04eULL, 0x74f73a3ae83acdd2ULL, 0xd2066969b9696fd0ULL,
0x124109092409482dULL, 0xe0d77070dd70a7adULL, 0x716fb6b6e2b6d954ULL,
0xbd1ed0d067d0ceb7ULL, 0xc7d6eded93ed3b7eULL, 0x85e2cccc17cc2edbULL,
0x8468424215422a57ULL, 0x2d2c98985a98b4c2ULL, 0x55eda4a4aaa4490eULL,
0x50752828a0285d88ULL, 0xb8865c5c6d5cda31ULL, 0xed6bf8f8c7f8933fULL,
0x11c28686228644a4ULL,
};
static const u64 C3[256] = {
0x7830d818186018c0ULL, 0xaf462623238c2305ULL, 0xf991b8c6c63fc67eULL,
0x6fcdfbe8e887e813ULL, 0xa113cb878726874cULL, 0x626d11b8b8dab8a9ULL,
0x0502090101040108ULL, 0x6e9e0d4f4f214f42ULL, 0xee6c9b3636d836adULL,
0x0451ffa6a6a2a659ULL, 0xbdb90cd2d26fd2deULL, 0x06f70ef5f5f3f5fbULL,
0x80f2967979f979efULL, 0xcede306f6fa16f5fULL, 0xef3f6d91917e91fcULL,
0x07a4f852525552aaULL, 0xfdc04760609d6027ULL, 0x766535bcbccabc89ULL,
0xcd2b379b9b569bacULL, 0x8c018a8e8e028e04ULL, 0x155bd2a3a3b6a371ULL,
0x3c186c0c0c300c60ULL, 0x8af6847b7bf17bffULL, 0xe16a803535d435b5ULL,
0x693af51d1d741de8ULL, 0x47ddb3e0e0a7e053ULL, 0xacb321d7d77bd7f6ULL,
0xed999cc2c22fc25eULL, 0x965c432e2eb82e6dULL, 0x7a96294b4b314b62ULL,
0x21e15dfefedffea3ULL, 0x16aed55757415782ULL, 0x412abd15155415a8ULL,
0xb6eee87777c1779fULL, 0xeb6e923737dc37a5ULL, 0x56d79ee5e5b3e57bULL,
0xd923139f9f469f8cULL, 0x17fd23f0f0e7f0d3ULL, 0x7f94204a4a354a6aULL,
0x95a944dada4fda9eULL, 0x25b0a258587d58faULL, 0xca8fcfc9c903c906ULL,
0x8d527c2929a42955ULL, 0x22145a0a0a280a50ULL, 0x4f7f50b1b1feb1e1ULL,
0x1a5dc9a0a0baa069ULL, 0xdad6146b6bb16b7fULL, 0xab17d985852e855cULL,
0x73673cbdbdcebd81ULL, 0x34ba8f5d5d695dd2ULL, 0x5020901010401080ULL,
0x03f507f4f4f7f4f3ULL, 0xc08bddcbcb0bcb16ULL, 0xc67cd33e3ef83eedULL,
0x110a2d0505140528ULL, 0xe6ce78676781671fULL, 0x53d597e4e4b7e473ULL,
0xbb4e0227279c2725ULL, 0x5882734141194132ULL, 0x9d0ba78b8b168b2cULL,
0x0153f6a7a7a6a751ULL, 0x94fab27d7de97dcfULL, 0xfb374995956e95dcULL,
0x9fad56d8d847d88eULL, 0x30eb70fbfbcbfb8bULL, 0x71c1cdeeee9fee23ULL,
0x91f8bb7c7ced7cc7ULL, 0xe3cc716666856617ULL, 0x8ea77bdddd53dda6ULL,
0x4b2eaf17175c17b8ULL, 0x468e454747014702ULL, 0xdc211a9e9e429e84ULL,
0xc589d4caca0fca1eULL, 0x995a582d2db42d75ULL, 0x79632ebfbfc6bf91ULL,
0x1b0e3f07071c0738ULL, 0x2347acadad8ead01ULL, 0x2fb4b05a5a755aeaULL,
0xb51bef838336836cULL, 0xff66b63333cc3385ULL, 0xf2c65c636391633fULL,
0x0a04120202080210ULL, 0x384993aaaa92aa39ULL, 0xa8e2de7171d971afULL,
0xcf8dc6c8c807c80eULL, 0x7d32d119196419c8ULL, 0x70923b4949394972ULL,
0x9aaf5fd9d943d986ULL, 0x1df931f2f2eff2c3ULL, 0x48dba8e3e3abe34bULL,
0x2ab6b95b5b715be2ULL, 0x920dbc88881a8834ULL, 0xc8293e9a9a529aa4ULL,
0xbe4c0b262698262dULL, 0xfa64bf3232c8328dULL, 0x4a7d59b0b0fab0e9ULL,
0x6acff2e9e983e91bULL, 0x331e770f0f3c0f78ULL, 0xa6b733d5d573d5e6ULL,
0xba1df480803a8074ULL, 0x7c6127bebec2be99ULL, 0xde87ebcdcd13cd26ULL,
0xe468893434d034bdULL, 0x75903248483d487aULL, 0x24e354ffffdbffabULL,
0x8ff48d7a7af57af7ULL, 0xea3d6490907a90f4ULL, 0x3ebe9d5f5f615fc2ULL,
0xa0403d202080201dULL, 0xd5d00f6868bd6867ULL, 0x7234ca1a1a681ad0ULL,
0x2c41b7aeae82ae19ULL, 0x5e757db4b4eab4c9ULL, 0x19a8ce54544d549aULL,
0xe53b7f93937693ecULL, 0xaa442f222288220dULL, 0xe9c86364648d6407ULL,
0x12ff2af1f1e3f1dbULL, 0xa2e6cc7373d173bfULL, 0x5a24821212481290ULL,
0x5d807a40401d403aULL, 0x2810480808200840ULL, 0xe89b95c3c32bc356ULL,
0x7bc5dfecec97ec33ULL, 0x90ab4ddbdb4bdb96ULL, 0x1f5fc0a1a1bea161ULL,
0x8307918d8d0e8d1cULL, 0xc97ac83d3df43df5ULL, 0xf1335b97976697ccULL,
0x0000000000000000ULL, 0xd483f9cfcf1bcf36ULL, 0x87566e2b2bac2b45ULL,
0xb3ece17676c57697ULL, 0xb019e68282328264ULL, 0xa9b128d6d67fd6feULL,
0x7736c31b1b6c1bd8ULL, 0x5b7774b5b5eeb5c1ULL, 0x2943beafaf86af11ULL,
0xdfd41d6a6ab56a77ULL, 0x0da0ea50505d50baULL, 0x4c8a574545094512ULL,
0x18fb38f3f3ebf3cbULL, 0xf060ad3030c0309dULL, 0x74c3c4efef9bef2bULL,
0xc37eda3f3ffc3fe5ULL, 0x1caac75555495592ULL, 0x1059dba2a2b2a279ULL,
0x65c9e9eaea8fea03ULL, 0xecca6a656589650fULL, 0x686903babad2bab9ULL,
0x935e4a2f2fbc2f65ULL, 0xe79d8ec0c027c04eULL, 0x81a160dede5fdebeULL,
0x6c38fc1c1c701ce0ULL, 0x2ee746fdfdd3fdbbULL, 0x649a1f4d4d294d52ULL,
0xe0397692927292e4ULL, 0xbceafa7575c9758fULL, 0x1e0c360606180630ULL,
0x9809ae8a8a128a24ULL, 0x40794bb2b2f2b2f9ULL, 0x59d185e6e6bfe663ULL,
0x361c7e0e0e380e70ULL, 0x633ee71f1f7c1ff8ULL, 0xf7c4556262956237ULL,
0xa3b53ad4d477d4eeULL, 0x324d81a8a89aa829ULL, 0xf4315296966296c4ULL,
0x3aef62f9f9c3f99bULL, 0xf697a3c5c533c566ULL, 0xb14a102525942535ULL,
0x20b2ab59597959f2ULL, 0xae15d084842a8454ULL, 0xa7e4c57272d572b7ULL,
0xdd72ec3939e439d5ULL, 0x6198164c4c2d4c5aULL, 0x3bbc945e5e655ecaULL,
0x85f09f7878fd78e7ULL, 0xd870e53838e038ddULL, 0x8605988c8c0a8c14ULL,
0xb2bf17d1d163d1c6ULL, 0x0b57e4a5a5aea541ULL, 0x4dd9a1e2e2afe243ULL,
0xf8c24e616199612fULL, 0x457b42b3b3f6b3f1ULL, 0xa542342121842115ULL,
0xd625089c9c4a9c94ULL, 0x663cee1e1e781ef0ULL, 0x5286614343114322ULL,
0xfc93b1c7c73bc776ULL, 0x2be54ffcfcd7fcb3ULL, 0x1408240404100420ULL,
0x08a2e351515951b2ULL, 0xc72f2599995e99bcULL, 0xc4da226d6da96d4fULL,
0x391a650d0d340d68ULL, 0x35e979fafacffa83ULL, 0x84a369dfdf5bdfb6ULL,
0x9bfca97e7ee57ed7ULL, 0xb44819242490243dULL, 0xd776fe3b3bec3bc5ULL,
0x3d4b9aabab96ab31ULL, 0xd181f0cece1fce3eULL, 0x5522991111441188ULL,
0x8903838f8f068f0cULL, 0x6b9c044e4e254e4aULL, 0x517366b7b7e6b7d1ULL,
0x60cbe0ebeb8beb0bULL, 0xcc78c13c3cf03cfdULL, 0xbf1ffd81813e817cULL,
0xfe354094946a94d4ULL, 0x0cf31cf7f7fbf7ebULL, 0x676f18b9b9deb9a1ULL,
0x5f268b13134c1398ULL, 0x9c58512c2cb02c7dULL, 0xb8bb05d3d36bd3d6ULL,
0x5cd38ce7e7bbe76bULL, 0xcbdc396e6ea56e57ULL, 0xf395aac4c437c46eULL,
0x0f061b03030c0318ULL, 0x13acdc565645568aULL, 0x49885e44440d441aULL,
0x9efea07f7fe17fdfULL, 0x374f88a9a99ea921ULL, 0x8254672a2aa82a4dULL,
0x6d6b0abbbbd6bbb1ULL, 0xe29f87c1c123c146ULL, 0x02a6f153535153a2ULL,
0x8ba572dcdc57dcaeULL, 0x2716530b0b2c0b58ULL, 0xd327019d9d4e9d9cULL,
0xc1d82b6c6cad6c47ULL, 0xf562a43131c43195ULL, 0xb9e8f37474cd7487ULL,
0x09f115f6f6fff6e3ULL, 0x438c4c464605460aULL, 0x2645a5acac8aac09ULL,
0x970fb589891e893cULL, 0x4428b414145014a0ULL, 0x42dfbae1e1a3e15bULL,
0x4e2ca616165816b0ULL, 0xd274f73a3ae83acdULL, 0xd0d2066969b9696fULL,
0x2d12410909240948ULL, 0xade0d77070dd70a7ULL, 0x54716fb6b6e2b6d9ULL,
0xb7bd1ed0d067d0ceULL, 0x7ec7d6eded93ed3bULL, 0xdb85e2cccc17cc2eULL,
0x578468424215422aULL, 0xc22d2c98985a98b4ULL, 0x0e55eda4a4aaa449ULL,
0x8850752828a0285dULL, 0x31b8865c5c6d5cdaULL, 0x3fed6bf8f8c7f893ULL,
0xa411c28686228644ULL,
};
static const u64 C4[256] = {
0xc07830d818186018ULL, 0x05af462623238c23ULL, 0x7ef991b8c6c63fc6ULL,
0x136fcdfbe8e887e8ULL, 0x4ca113cb87872687ULL, 0xa9626d11b8b8dab8ULL,
0x0805020901010401ULL, 0x426e9e0d4f4f214fULL, 0xadee6c9b3636d836ULL,
0x590451ffa6a6a2a6ULL, 0xdebdb90cd2d26fd2ULL, 0xfb06f70ef5f5f3f5ULL,
0xef80f2967979f979ULL, 0x5fcede306f6fa16fULL, 0xfcef3f6d91917e91ULL,
0xaa07a4f852525552ULL, 0x27fdc04760609d60ULL, 0x89766535bcbccabcULL,
0xaccd2b379b9b569bULL, 0x048c018a8e8e028eULL, 0x71155bd2a3a3b6a3ULL,
0x603c186c0c0c300cULL, 0xff8af6847b7bf17bULL, 0xb5e16a803535d435ULL,
0xe8693af51d1d741dULL, 0x5347ddb3e0e0a7e0ULL, 0xf6acb321d7d77bd7ULL,
0x5eed999cc2c22fc2ULL, 0x6d965c432e2eb82eULL, 0x627a96294b4b314bULL,
0xa321e15dfefedffeULL, 0x8216aed557574157ULL, 0xa8412abd15155415ULL,
0x9fb6eee87777c177ULL, 0xa5eb6e923737dc37ULL, 0x7b56d79ee5e5b3e5ULL,
0x8cd923139f9f469fULL, 0xd317fd23f0f0e7f0ULL, 0x6a7f94204a4a354aULL,
0x9e95a944dada4fdaULL, 0xfa25b0a258587d58ULL, 0x06ca8fcfc9c903c9ULL,
0x558d527c2929a429ULL, 0x5022145a0a0a280aULL, 0xe14f7f50b1b1feb1ULL,
0x691a5dc9a0a0baa0ULL, 0x7fdad6146b6bb16bULL, 0x5cab17d985852e85ULL,
0x8173673cbdbdcebdULL, 0xd234ba8f5d5d695dULL, 0x8050209010104010ULL,
0xf303f507f4f4f7f4ULL, 0x16c08bddcbcb0bcbULL, 0xedc67cd33e3ef83eULL,
0x28110a2d05051405ULL, 0x1fe6ce7867678167ULL, 0x7353d597e4e4b7e4ULL,
0x25bb4e0227279c27ULL, 0x3258827341411941ULL, 0x2c9d0ba78b8b168bULL,
0x510153f6a7a7a6a7ULL, 0xcf94fab27d7de97dULL, 0xdcfb374995956e95ULL,
0x8e9fad56d8d847d8ULL, 0x8b30eb70fbfbcbfbULL, 0x2371c1cdeeee9feeULL,
0xc791f8bb7c7ced7cULL, 0x17e3cc7166668566ULL, 0xa68ea77bdddd53ddULL,
0xb84b2eaf17175c17ULL, 0x02468e4547470147ULL, 0x84dc211a9e9e429eULL,
0x1ec589d4caca0fcaULL, 0x75995a582d2db42dULL, 0x9179632ebfbfc6bfULL,
0x381b0e3f07071c07ULL, 0x012347acadad8eadULL, 0xea2fb4b05a5a755aULL,
0x6cb51bef83833683ULL, 0x85ff66b63333cc33ULL, 0x3ff2c65c63639163ULL,
0x100a041202020802ULL, 0x39384993aaaa92aaULL, 0xafa8e2de7171d971ULL,
0x0ecf8dc6c8c807c8ULL, 0xc87d32d119196419ULL, 0x7270923b49493949ULL,
0x869aaf5fd9d943d9ULL, 0xc31df931f2f2eff2ULL, 0x4b48dba8e3e3abe3ULL,
0xe22ab6b95b5b715bULL, 0x34920dbc88881a88ULL, 0xa4c8293e9a9a529aULL,
0x2dbe4c0b26269826ULL, 0x8dfa64bf3232c832ULL, 0xe94a7d59b0b0fab0ULL,
0x1b6acff2e9e983e9ULL, 0x78331e770f0f3c0fULL, 0xe6a6b733d5d573d5ULL,
0x74ba1df480803a80ULL, 0x997c6127bebec2beULL, 0x26de87ebcdcd13cdULL,
0xbde468893434d034ULL, 0x7a75903248483d48ULL, 0xab24e354ffffdbffULL,
0xf78ff48d7a7af57aULL, 0xf4ea3d6490907a90ULL, 0xc23ebe9d5f5f615fULL,
0x1da0403d20208020ULL, 0x67d5d00f6868bd68ULL, 0xd07234ca1a1a681aULL,
0x192c41b7aeae82aeULL, 0xc95e757db4b4eab4ULL, 0x9a19a8ce54544d54ULL,
0xece53b7f93937693ULL, 0x0daa442f22228822ULL, 0x07e9c86364648d64ULL,
0xdb12ff2af1f1e3f1ULL, 0xbfa2e6cc7373d173ULL, 0x905a248212124812ULL,
0x3a5d807a40401d40ULL, 0x4028104808082008ULL, 0x56e89b95c3c32bc3ULL,
0x337bc5dfecec97ecULL, 0x9690ab4ddbdb4bdbULL, 0x611f5fc0a1a1bea1ULL,
0x1c8307918d8d0e8dULL, 0xf5c97ac83d3df43dULL, 0xccf1335b97976697ULL,
0x0000000000000000ULL, 0x36d483f9cfcf1bcfULL, 0x4587566e2b2bac2bULL,
0x97b3ece17676c576ULL, 0x64b019e682823282ULL, 0xfea9b128d6d67fd6ULL,
0xd87736c31b1b6c1bULL, 0xc15b7774b5b5eeb5ULL, 0x112943beafaf86afULL,
0x77dfd41d6a6ab56aULL, 0xba0da0ea50505d50ULL, 0x124c8a5745450945ULL,
0xcb18fb38f3f3ebf3ULL, 0x9df060ad3030c030ULL, 0x2b74c3c4efef9befULL,
0xe5c37eda3f3ffc3fULL, 0x921caac755554955ULL, 0x791059dba2a2b2a2ULL,
0x0365c9e9eaea8feaULL, 0x0fecca6a65658965ULL, 0xb9686903babad2baULL,
0x65935e4a2f2fbc2fULL, 0x4ee79d8ec0c027c0ULL, 0xbe81a160dede5fdeULL,
0xe06c38fc1c1c701cULL, 0xbb2ee746fdfdd3fdULL, 0x52649a1f4d4d294dULL,
0xe4e0397692927292ULL, 0x8fbceafa7575c975ULL, 0x301e0c3606061806ULL,
0x249809ae8a8a128aULL, 0xf940794bb2b2f2b2ULL, 0x6359d185e6e6bfe6ULL,
0x70361c7e0e0e380eULL, 0xf8633ee71f1f7c1fULL, 0x37f7c45562629562ULL,
0xeea3b53ad4d477d4ULL, 0x29324d81a8a89aa8ULL, 0xc4f4315296966296ULL,
0x9b3aef62f9f9c3f9ULL, 0x66f697a3c5c533c5ULL, 0x35b14a1025259425ULL,
0xf220b2ab59597959ULL, 0x54ae15d084842a84ULL, 0xb7a7e4c57272d572ULL,
0xd5dd72ec3939e439ULL, 0x5a6198164c4c2d4cULL, 0xca3bbc945e5e655eULL,
0xe785f09f7878fd78ULL, 0xddd870e53838e038ULL, 0x148605988c8c0a8cULL,
0xc6b2bf17d1d163d1ULL, 0x410b57e4a5a5aea5ULL, 0x434dd9a1e2e2afe2ULL,
0x2ff8c24e61619961ULL, 0xf1457b42b3b3f6b3ULL, 0x15a5423421218421ULL,
0x94d625089c9c4a9cULL, 0xf0663cee1e1e781eULL, 0x2252866143431143ULL,
0x76fc93b1c7c73bc7ULL, 0xb32be54ffcfcd7fcULL, 0x2014082404041004ULL,
0xb208a2e351515951ULL, 0xbcc72f2599995e99ULL, 0x4fc4da226d6da96dULL,
0x68391a650d0d340dULL, 0x8335e979fafacffaULL, 0xb684a369dfdf5bdfULL,
0xd79bfca97e7ee57eULL, 0x3db4481924249024ULL, 0xc5d776fe3b3bec3bULL,
0x313d4b9aabab96abULL, 0x3ed181f0cece1fceULL, 0x8855229911114411ULL,
0x0c8903838f8f068fULL, 0x4a6b9c044e4e254eULL, 0xd1517366b7b7e6b7ULL,
0x0b60cbe0ebeb8bebULL, 0xfdcc78c13c3cf03cULL, 0x7cbf1ffd81813e81ULL,
0xd4fe354094946a94ULL, 0xeb0cf31cf7f7fbf7ULL, 0xa1676f18b9b9deb9ULL,
0x985f268b13134c13ULL, 0x7d9c58512c2cb02cULL, 0xd6b8bb05d3d36bd3ULL,
0x6b5cd38ce7e7bbe7ULL, 0x57cbdc396e6ea56eULL, 0x6ef395aac4c437c4ULL,
0x180f061b03030c03ULL, 0x8a13acdc56564556ULL, 0x1a49885e44440d44ULL,
0xdf9efea07f7fe17fULL, 0x21374f88a9a99ea9ULL, 0x4d8254672a2aa82aULL,
0xb16d6b0abbbbd6bbULL, 0x46e29f87c1c123c1ULL, 0xa202a6f153535153ULL,
0xae8ba572dcdc57dcULL, 0x582716530b0b2c0bULL, 0x9cd327019d9d4e9dULL,
0x47c1d82b6c6cad6cULL, 0x95f562a43131c431ULL, 0x87b9e8f37474cd74ULL,
0xe309f115f6f6fff6ULL, 0x0a438c4c46460546ULL, 0x092645a5acac8aacULL,
0x3c970fb589891e89ULL, 0xa04428b414145014ULL, 0x5b42dfbae1e1a3e1ULL,
0xb04e2ca616165816ULL, 0xcdd274f73a3ae83aULL, 0x6fd0d2066969b969ULL,
0x482d124109092409ULL, 0xa7ade0d77070dd70ULL, 0xd954716fb6b6e2b6ULL,
0xceb7bd1ed0d067d0ULL, 0x3b7ec7d6eded93edULL, 0x2edb85e2cccc17ccULL,
0x2a57846842421542ULL, 0xb4c22d2c98985a98ULL, 0x490e55eda4a4aaa4ULL,
0x5d8850752828a028ULL, 0xda31b8865c5c6d5cULL, 0x933fed6bf8f8c7f8ULL,
0x44a411c286862286ULL,
};
static const u64 C5[256] = {
0x18c07830d8181860ULL, 0x2305af462623238cULL, 0xc67ef991b8c6c63fULL,
0xe8136fcdfbe8e887ULL, 0x874ca113cb878726ULL, 0xb8a9626d11b8b8daULL,
0x0108050209010104ULL, 0x4f426e9e0d4f4f21ULL, 0x36adee6c9b3636d8ULL,
0xa6590451ffa6a6a2ULL, 0xd2debdb90cd2d26fULL, 0xf5fb06f70ef5f5f3ULL,
0x79ef80f2967979f9ULL, 0x6f5fcede306f6fa1ULL, 0x91fcef3f6d91917eULL,
0x52aa07a4f8525255ULL, 0x6027fdc04760609dULL, 0xbc89766535bcbccaULL,
0x9baccd2b379b9b56ULL, 0x8e048c018a8e8e02ULL, 0xa371155bd2a3a3b6ULL,
0x0c603c186c0c0c30ULL, 0x7bff8af6847b7bf1ULL, 0x35b5e16a803535d4ULL,
0x1de8693af51d1d74ULL, 0xe05347ddb3e0e0a7ULL, 0xd7f6acb321d7d77bULL,
0xc25eed999cc2c22fULL, 0x2e6d965c432e2eb8ULL, 0x4b627a96294b4b31ULL,
0xfea321e15dfefedfULL, 0x578216aed5575741ULL, 0x15a8412abd151554ULL,
0x779fb6eee87777c1ULL, 0x37a5eb6e923737dcULL, 0xe57b56d79ee5e5b3ULL,
0x9f8cd923139f9f46ULL, 0xf0d317fd23f0f0e7ULL, 0x4a6a7f94204a4a35ULL,
0xda9e95a944dada4fULL, 0x58fa25b0a258587dULL, 0xc906ca8fcfc9c903ULL,
0x29558d527c2929a4ULL, 0x0a5022145a0a0a28ULL, 0xb1e14f7f50b1b1feULL,
0xa0691a5dc9a0a0baULL, 0x6b7fdad6146b6bb1ULL, 0x855cab17d985852eULL,
0xbd8173673cbdbdceULL, 0x5dd234ba8f5d5d69ULL, 0x1080502090101040ULL,
0xf4f303f507f4f4f7ULL, 0xcb16c08bddcbcb0bULL, 0x3eedc67cd33e3ef8ULL,
0x0528110a2d050514ULL, 0x671fe6ce78676781ULL, 0xe47353d597e4e4b7ULL,
0x2725bb4e0227279cULL, 0x4132588273414119ULL, 0x8b2c9d0ba78b8b16ULL,
0xa7510153f6a7a7a6ULL, 0x7dcf94fab27d7de9ULL, 0x95dcfb374995956eULL,
0xd88e9fad56d8d847ULL, 0xfb8b30eb70fbfbcbULL, 0xee2371c1cdeeee9fULL,
0x7cc791f8bb7c7cedULL, 0x6617e3cc71666685ULL, 0xdda68ea77bdddd53ULL,
0x17b84b2eaf17175cULL, 0x4702468e45474701ULL, 0x9e84dc211a9e9e42ULL,
0xca1ec589d4caca0fULL, 0x2d75995a582d2db4ULL, 0xbf9179632ebfbfc6ULL,
0x07381b0e3f07071cULL, 0xad012347acadad8eULL, 0x5aea2fb4b05a5a75ULL,
0x836cb51bef838336ULL, 0x3385ff66b63333ccULL, 0x633ff2c65c636391ULL,
0x02100a0412020208ULL, 0xaa39384993aaaa92ULL, 0x71afa8e2de7171d9ULL,
0xc80ecf8dc6c8c807ULL, 0x19c87d32d1191964ULL, 0x497270923b494939ULL,
0xd9869aaf5fd9d943ULL, 0xf2c31df931f2f2efULL, 0xe34b48dba8e3e3abULL,
0x5be22ab6b95b5b71ULL, 0x8834920dbc88881aULL, 0x9aa4c8293e9a9a52ULL,
0x262dbe4c0b262698ULL, 0x328dfa64bf3232c8ULL, 0xb0e94a7d59b0b0faULL,
0xe91b6acff2e9e983ULL, 0x0f78331e770f0f3cULL, 0xd5e6a6b733d5d573ULL,
0x8074ba1df480803aULL, 0xbe997c6127bebec2ULL, 0xcd26de87ebcdcd13ULL,
0x34bde468893434d0ULL, 0x487a75903248483dULL, 0xffab24e354ffffdbULL,
0x7af78ff48d7a7af5ULL, 0x90f4ea3d6490907aULL, 0x5fc23ebe9d5f5f61ULL,
0x201da0403d202080ULL, 0x6867d5d00f6868bdULL, 0x1ad07234ca1a1a68ULL,
0xae192c41b7aeae82ULL, 0xb4c95e757db4b4eaULL, 0x549a19a8ce54544dULL,
0x93ece53b7f939376ULL, 0x220daa442f222288ULL, 0x6407e9c86364648dULL,
0xf1db12ff2af1f1e3ULL, 0x73bfa2e6cc7373d1ULL, 0x12905a2482121248ULL,
0x403a5d807a40401dULL, 0x0840281048080820ULL, 0xc356e89b95c3c32bULL,
0xec337bc5dfecec97ULL, 0xdb9690ab4ddbdb4bULL, 0xa1611f5fc0a1a1beULL,
0x8d1c8307918d8d0eULL, 0x3df5c97ac83d3df4ULL, 0x97ccf1335b979766ULL,
0x0000000000000000ULL, 0xcf36d483f9cfcf1bULL, 0x2b4587566e2b2bacULL,
0x7697b3ece17676c5ULL, 0x8264b019e6828232ULL, 0xd6fea9b128d6d67fULL,
0x1bd87736c31b1b6cULL, 0xb5c15b7774b5b5eeULL, 0xaf112943beafaf86ULL,
0x6a77dfd41d6a6ab5ULL, 0x50ba0da0ea50505dULL, 0x45124c8a57454509ULL,
0xf3cb18fb38f3f3ebULL, 0x309df060ad3030c0ULL, 0xef2b74c3c4efef9bULL,
0x3fe5c37eda3f3ffcULL, 0x55921caac7555549ULL, 0xa2791059dba2a2b2ULL,
0xea0365c9e9eaea8fULL, 0x650fecca6a656589ULL, 0xbab9686903babad2ULL,
0x2f65935e4a2f2fbcULL, 0xc04ee79d8ec0c027ULL, 0xdebe81a160dede5fULL,
0x1ce06c38fc1c1c70ULL, 0xfdbb2ee746fdfdd3ULL, 0x4d52649a1f4d4d29ULL,
0x92e4e03976929272ULL, 0x758fbceafa7575c9ULL, 0x06301e0c36060618ULL,
0x8a249809ae8a8a12ULL, 0xb2f940794bb2b2f2ULL, 0xe66359d185e6e6bfULL,
0x0e70361c7e0e0e38ULL, 0x1ff8633ee71f1f7cULL, 0x6237f7c455626295ULL,
0xd4eea3b53ad4d477ULL, 0xa829324d81a8a89aULL, 0x96c4f43152969662ULL,
0xf99b3aef62f9f9c3ULL, 0xc566f697a3c5c533ULL, 0x2535b14a10252594ULL,
0x59f220b2ab595979ULL, 0x8454ae15d084842aULL, 0x72b7a7e4c57272d5ULL,
0x39d5dd72ec3939e4ULL, 0x4c5a6198164c4c2dULL, 0x5eca3bbc945e5e65ULL,
0x78e785f09f7878fdULL, 0x38ddd870e53838e0ULL, 0x8c148605988c8c0aULL,
0xd1c6b2bf17d1d163ULL, 0xa5410b57e4a5a5aeULL, 0xe2434dd9a1e2e2afULL,
0x612ff8c24e616199ULL, 0xb3f1457b42b3b3f6ULL, 0x2115a54234212184ULL,
0x9c94d625089c9c4aULL, 0x1ef0663cee1e1e78ULL, 0x4322528661434311ULL,
0xc776fc93b1c7c73bULL, 0xfcb32be54ffcfcd7ULL, 0x0420140824040410ULL,
0x51b208a2e3515159ULL, 0x99bcc72f2599995eULL, 0x6d4fc4da226d6da9ULL,
0x0d68391a650d0d34ULL, 0xfa8335e979fafacfULL, 0xdfb684a369dfdf5bULL,
0x7ed79bfca97e7ee5ULL, 0x243db44819242490ULL, 0x3bc5d776fe3b3becULL,
0xab313d4b9aabab96ULL, 0xce3ed181f0cece1fULL, 0x1188552299111144ULL,
0x8f0c8903838f8f06ULL, 0x4e4a6b9c044e4e25ULL, 0xb7d1517366b7b7e6ULL,
0xeb0b60cbe0ebeb8bULL, 0x3cfdcc78c13c3cf0ULL, 0x817cbf1ffd81813eULL,
0x94d4fe354094946aULL, 0xf7eb0cf31cf7f7fbULL, 0xb9a1676f18b9b9deULL,
0x13985f268b13134cULL, 0x2c7d9c58512c2cb0ULL, 0xd3d6b8bb05d3d36bULL,
0xe76b5cd38ce7e7bbULL, 0x6e57cbdc396e6ea5ULL, 0xc46ef395aac4c437ULL,
0x03180f061b03030cULL, 0x568a13acdc565645ULL, 0x441a49885e44440dULL,
0x7fdf9efea07f7fe1ULL, 0xa921374f88a9a99eULL, 0x2a4d8254672a2aa8ULL,
0xbbb16d6b0abbbbd6ULL, 0xc146e29f87c1c123ULL, 0x53a202a6f1535351ULL,
0xdcae8ba572dcdc57ULL, 0x0b582716530b0b2cULL, 0x9d9cd327019d9d4eULL,
0x6c47c1d82b6c6cadULL, 0x3195f562a43131c4ULL, 0x7487b9e8f37474cdULL,
0xf6e309f115f6f6ffULL, 0x460a438c4c464605ULL, 0xac092645a5acac8aULL,
0x893c970fb589891eULL, 0x14a04428b4141450ULL, 0xe15b42dfbae1e1a3ULL,
0x16b04e2ca6161658ULL, 0x3acdd274f73a3ae8ULL, 0x696fd0d2066969b9ULL,
0x09482d1241090924ULL, 0x70a7ade0d77070ddULL, 0xb6d954716fb6b6e2ULL,
0xd0ceb7bd1ed0d067ULL, 0xed3b7ec7d6eded93ULL, 0xcc2edb85e2cccc17ULL,
0x422a578468424215ULL, 0x98b4c22d2c98985aULL, 0xa4490e55eda4a4aaULL,
0x285d8850752828a0ULL, 0x5cda31b8865c5c6dULL, 0xf8933fed6bf8f8c7ULL,
0x8644a411c2868622ULL,
};
static const u64 C6[256] = {
0x6018c07830d81818ULL, 0x8c2305af46262323ULL, 0x3fc67ef991b8c6c6ULL,
0x87e8136fcdfbe8e8ULL, 0x26874ca113cb8787ULL, 0xdab8a9626d11b8b8ULL,
0x0401080502090101ULL, 0x214f426e9e0d4f4fULL, 0xd836adee6c9b3636ULL,
0xa2a6590451ffa6a6ULL, 0x6fd2debdb90cd2d2ULL, 0xf3f5fb06f70ef5f5ULL,
0xf979ef80f2967979ULL, 0xa16f5fcede306f6fULL, 0x7e91fcef3f6d9191ULL,
0x5552aa07a4f85252ULL, 0x9d6027fdc0476060ULL, 0xcabc89766535bcbcULL,
0x569baccd2b379b9bULL, 0x028e048c018a8e8eULL, 0xb6a371155bd2a3a3ULL,
0x300c603c186c0c0cULL, 0xf17bff8af6847b7bULL, 0xd435b5e16a803535ULL,
0x741de8693af51d1dULL, 0xa7e05347ddb3e0e0ULL, 0x7bd7f6acb321d7d7ULL,
0x2fc25eed999cc2c2ULL, 0xb82e6d965c432e2eULL, 0x314b627a96294b4bULL,
0xdffea321e15dfefeULL, 0x41578216aed55757ULL, 0x5415a8412abd1515ULL,
0xc1779fb6eee87777ULL, 0xdc37a5eb6e923737ULL, 0xb3e57b56d79ee5e5ULL,
0x469f8cd923139f9fULL, 0xe7f0d317fd23f0f0ULL, 0x354a6a7f94204a4aULL,
0x4fda9e95a944dadaULL, 0x7d58fa25b0a25858ULL, 0x03c906ca8fcfc9c9ULL,
0xa429558d527c2929ULL, 0x280a5022145a0a0aULL, 0xfeb1e14f7f50b1b1ULL,
0xbaa0691a5dc9a0a0ULL, 0xb16b7fdad6146b6bULL, 0x2e855cab17d98585ULL,
0xcebd8173673cbdbdULL, 0x695dd234ba8f5d5dULL, 0x4010805020901010ULL,
0xf7f4f303f507f4f4ULL, 0x0bcb16c08bddcbcbULL, 0xf83eedc67cd33e3eULL,
0x140528110a2d0505ULL, 0x81671fe6ce786767ULL, 0xb7e47353d597e4e4ULL,
0x9c2725bb4e022727ULL, 0x1941325882734141ULL, 0x168b2c9d0ba78b8bULL,
0xa6a7510153f6a7a7ULL, 0xe97dcf94fab27d7dULL, 0x6e95dcfb37499595ULL,
0x47d88e9fad56d8d8ULL, 0xcbfb8b30eb70fbfbULL, 0x9fee2371c1cdeeeeULL,
0xed7cc791f8bb7c7cULL, 0x856617e3cc716666ULL, 0x53dda68ea77bddddULL,
0x5c17b84b2eaf1717ULL, 0x014702468e454747ULL, 0x429e84dc211a9e9eULL,
0x0fca1ec589d4cacaULL, 0xb42d75995a582d2dULL, 0xc6bf9179632ebfbfULL,
0x1c07381b0e3f0707ULL, 0x8ead012347acadadULL, 0x755aea2fb4b05a5aULL,
0x36836cb51bef8383ULL, 0xcc3385ff66b63333ULL, 0x91633ff2c65c6363ULL,
0x0802100a04120202ULL, 0x92aa39384993aaaaULL, 0xd971afa8e2de7171ULL,
0x07c80ecf8dc6c8c8ULL, 0x6419c87d32d11919ULL, 0x39497270923b4949ULL,
0x43d9869aaf5fd9d9ULL, 0xeff2c31df931f2f2ULL, 0xabe34b48dba8e3e3ULL,
0x715be22ab6b95b5bULL, 0x1a8834920dbc8888ULL, 0x529aa4c8293e9a9aULL,
0x98262dbe4c0b2626ULL, 0xc8328dfa64bf3232ULL, 0xfab0e94a7d59b0b0ULL,
0x83e91b6acff2e9e9ULL, 0x3c0f78331e770f0fULL, 0x73d5e6a6b733d5d5ULL,
0x3a8074ba1df48080ULL, 0xc2be997c6127bebeULL, 0x13cd26de87ebcdcdULL,
0xd034bde468893434ULL, 0x3d487a7590324848ULL, 0xdbffab24e354ffffULL,
0xf57af78ff48d7a7aULL, 0x7a90f4ea3d649090ULL, 0x615fc23ebe9d5f5fULL,
0x80201da0403d2020ULL, 0xbd6867d5d00f6868ULL, 0x681ad07234ca1a1aULL,
0x82ae192c41b7aeaeULL, 0xeab4c95e757db4b4ULL, 0x4d549a19a8ce5454ULL,
0x7693ece53b7f9393ULL, 0x88220daa442f2222ULL, 0x8d6407e9c8636464ULL,
0xe3f1db12ff2af1f1ULL, 0xd173bfa2e6cc7373ULL, 0x4812905a24821212ULL,
0x1d403a5d807a4040ULL, 0x2008402810480808ULL, 0x2bc356e89b95c3c3ULL,
0x97ec337bc5dfececULL, 0x4bdb9690ab4ddbdbULL, 0xbea1611f5fc0a1a1ULL,
0x0e8d1c8307918d8dULL, 0xf43df5c97ac83d3dULL, 0x6697ccf1335b9797ULL,
0x0000000000000000ULL, 0x1bcf36d483f9cfcfULL, 0xac2b4587566e2b2bULL,
0xc57697b3ece17676ULL, 0x328264b019e68282ULL, 0x7fd6fea9b128d6d6ULL,
0x6c1bd87736c31b1bULL, 0xeeb5c15b7774b5b5ULL, 0x86af112943beafafULL,
0xb56a77dfd41d6a6aULL, 0x5d50ba0da0ea5050ULL, 0x0945124c8a574545ULL,
0xebf3cb18fb38f3f3ULL, 0xc0309df060ad3030ULL, 0x9bef2b74c3c4efefULL,
0xfc3fe5c37eda3f3fULL, 0x4955921caac75555ULL, 0xb2a2791059dba2a2ULL,
0x8fea0365c9e9eaeaULL, 0x89650fecca6a6565ULL, 0xd2bab9686903babaULL,
0xbc2f65935e4a2f2fULL, 0x27c04ee79d8ec0c0ULL, 0x5fdebe81a160dedeULL,
0x701ce06c38fc1c1cULL, 0xd3fdbb2ee746fdfdULL, 0x294d52649a1f4d4dULL,
0x7292e4e039769292ULL, 0xc9758fbceafa7575ULL, 0x1806301e0c360606ULL,
0x128a249809ae8a8aULL, 0xf2b2f940794bb2b2ULL, 0xbfe66359d185e6e6ULL,
0x380e70361c7e0e0eULL, 0x7c1ff8633ee71f1fULL, 0x956237f7c4556262ULL,
0x77d4eea3b53ad4d4ULL, 0x9aa829324d81a8a8ULL, 0x6296c4f431529696ULL,
0xc3f99b3aef62f9f9ULL, 0x33c566f697a3c5c5ULL, 0x942535b14a102525ULL,
0x7959f220b2ab5959ULL, 0x2a8454ae15d08484ULL, 0xd572b7a7e4c57272ULL,
0xe439d5dd72ec3939ULL, 0x2d4c5a6198164c4cULL, 0x655eca3bbc945e5eULL,
0xfd78e785f09f7878ULL, 0xe038ddd870e53838ULL, 0x0a8c148605988c8cULL,
0x63d1c6b2bf17d1d1ULL, 0xaea5410b57e4a5a5ULL, 0xafe2434dd9a1e2e2ULL,
0x99612ff8c24e6161ULL, 0xf6b3f1457b42b3b3ULL, 0x842115a542342121ULL,
0x4a9c94d625089c9cULL, 0x781ef0663cee1e1eULL, 0x1143225286614343ULL,
0x3bc776fc93b1c7c7ULL, 0xd7fcb32be54ffcfcULL, 0x1004201408240404ULL,
0x5951b208a2e35151ULL, 0x5e99bcc72f259999ULL, 0xa96d4fc4da226d6dULL,
0x340d68391a650d0dULL, 0xcffa8335e979fafaULL, 0x5bdfb684a369dfdfULL,
0xe57ed79bfca97e7eULL, 0x90243db448192424ULL, 0xec3bc5d776fe3b3bULL,
0x96ab313d4b9aababULL, 0x1fce3ed181f0ceceULL, 0x4411885522991111ULL,
0x068f0c8903838f8fULL, 0x254e4a6b9c044e4eULL, 0xe6b7d1517366b7b7ULL,
0x8beb0b60cbe0ebebULL, 0xf03cfdcc78c13c3cULL, 0x3e817cbf1ffd8181ULL,
0x6a94d4fe35409494ULL, 0xfbf7eb0cf31cf7f7ULL, 0xdeb9a1676f18b9b9ULL,
0x4c13985f268b1313ULL, 0xb02c7d9c58512c2cULL, 0x6bd3d6b8bb05d3d3ULL,
0xbbe76b5cd38ce7e7ULL, 0xa56e57cbdc396e6eULL, 0x37c46ef395aac4c4ULL,
0x0c03180f061b0303ULL, 0x45568a13acdc5656ULL, 0x0d441a49885e4444ULL,
0xe17fdf9efea07f7fULL, 0x9ea921374f88a9a9ULL, 0xa82a4d8254672a2aULL,
0xd6bbb16d6b0abbbbULL, 0x23c146e29f87c1c1ULL, 0x5153a202a6f15353ULL,
0x57dcae8ba572dcdcULL, 0x2c0b582716530b0bULL, 0x4e9d9cd327019d9dULL,
0xad6c47c1d82b6c6cULL, 0xc43195f562a43131ULL, 0xcd7487b9e8f37474ULL,
0xfff6e309f115f6f6ULL, 0x05460a438c4c4646ULL, 0x8aac092645a5acacULL,
0x1e893c970fb58989ULL, 0x5014a04428b41414ULL, 0xa3e15b42dfbae1e1ULL,
0x5816b04e2ca61616ULL, 0xe83acdd274f73a3aULL, 0xb9696fd0d2066969ULL,
0x2409482d12410909ULL, 0xdd70a7ade0d77070ULL, 0xe2b6d954716fb6b6ULL,
0x67d0ceb7bd1ed0d0ULL, 0x93ed3b7ec7d6ededULL, 0x17cc2edb85e2ccccULL,
0x15422a5784684242ULL, 0x5a98b4c22d2c9898ULL, 0xaaa4490e55eda4a4ULL,
0xa0285d8850752828ULL, 0x6d5cda31b8865c5cULL, 0xc7f8933fed6bf8f8ULL,
0x228644a411c28686ULL,
};
static const u64 C7[256] = {
0x186018c07830d818ULL, 0x238c2305af462623ULL, 0xc63fc67ef991b8c6ULL,
0xe887e8136fcdfbe8ULL, 0x8726874ca113cb87ULL, 0xb8dab8a9626d11b8ULL,
0x0104010805020901ULL, 0x4f214f426e9e0d4fULL, 0x36d836adee6c9b36ULL,
0xa6a2a6590451ffa6ULL, 0xd26fd2debdb90cd2ULL, 0xf5f3f5fb06f70ef5ULL,
0x79f979ef80f29679ULL, 0x6fa16f5fcede306fULL, 0x917e91fcef3f6d91ULL,
0x525552aa07a4f852ULL, 0x609d6027fdc04760ULL, 0xbccabc89766535bcULL,
0x9b569baccd2b379bULL, 0x8e028e048c018a8eULL, 0xa3b6a371155bd2a3ULL,
0x0c300c603c186c0cULL, 0x7bf17bff8af6847bULL, 0x35d435b5e16a8035ULL,
0x1d741de8693af51dULL, 0xe0a7e05347ddb3e0ULL, 0xd77bd7f6acb321d7ULL,
0xc22fc25eed999cc2ULL, 0x2eb82e6d965c432eULL, 0x4b314b627a96294bULL,
0xfedffea321e15dfeULL, 0x5741578216aed557ULL, 0x155415a8412abd15ULL,
0x77c1779fb6eee877ULL, 0x37dc37a5eb6e9237ULL, 0xe5b3e57b56d79ee5ULL,
0x9f469f8cd923139fULL, 0xf0e7f0d317fd23f0ULL, 0x4a354a6a7f94204aULL,
0xda4fda9e95a944daULL, 0x587d58fa25b0a258ULL, 0xc903c906ca8fcfc9ULL,
0x29a429558d527c29ULL, 0x0a280a5022145a0aULL, 0xb1feb1e14f7f50b1ULL,
0xa0baa0691a5dc9a0ULL, 0x6bb16b7fdad6146bULL, 0x852e855cab17d985ULL,
0xbdcebd8173673cbdULL, 0x5d695dd234ba8f5dULL, 0x1040108050209010ULL,
0xf4f7f4f303f507f4ULL, 0xcb0bcb16c08bddcbULL, 0x3ef83eedc67cd33eULL,
0x05140528110a2d05ULL, 0x6781671fe6ce7867ULL, 0xe4b7e47353d597e4ULL,
0x279c2725bb4e0227ULL, 0x4119413258827341ULL, 0x8b168b2c9d0ba78bULL,
0xa7a6a7510153f6a7ULL, 0x7de97dcf94fab27dULL, 0x956e95dcfb374995ULL,
0xd847d88e9fad56d8ULL, 0xfbcbfb8b30eb70fbULL, 0xee9fee2371c1cdeeULL,
0x7ced7cc791f8bb7cULL, 0x66856617e3cc7166ULL, 0xdd53dda68ea77bddULL,
0x175c17b84b2eaf17ULL, 0x47014702468e4547ULL, 0x9e429e84dc211a9eULL,
0xca0fca1ec589d4caULL, 0x2db42d75995a582dULL, 0xbfc6bf9179632ebfULL,
0x071c07381b0e3f07ULL, 0xad8ead012347acadULL, 0x5a755aea2fb4b05aULL,
0x8336836cb51bef83ULL, 0x33cc3385ff66b633ULL, 0x6391633ff2c65c63ULL,
0x020802100a041202ULL, 0xaa92aa39384993aaULL, 0x71d971afa8e2de71ULL,
0xc807c80ecf8dc6c8ULL, 0x196419c87d32d119ULL, 0x4939497270923b49ULL,
0xd943d9869aaf5fd9ULL, 0xf2eff2c31df931f2ULL, 0xe3abe34b48dba8e3ULL,
0x5b715be22ab6b95bULL, 0x881a8834920dbc88ULL, 0x9a529aa4c8293e9aULL,
0x2698262dbe4c0b26ULL, 0x32c8328dfa64bf32ULL, 0xb0fab0e94a7d59b0ULL,
0xe983e91b6acff2e9ULL, 0x0f3c0f78331e770fULL, 0xd573d5e6a6b733d5ULL,
0x803a8074ba1df480ULL, 0xbec2be997c6127beULL, 0xcd13cd26de87ebcdULL,
0x34d034bde4688934ULL, 0x483d487a75903248ULL, 0xffdbffab24e354ffULL,
0x7af57af78ff48d7aULL, 0x907a90f4ea3d6490ULL, 0x5f615fc23ebe9d5fULL,
0x2080201da0403d20ULL, 0x68bd6867d5d00f68ULL, 0x1a681ad07234ca1aULL,
0xae82ae192c41b7aeULL, 0xb4eab4c95e757db4ULL, 0x544d549a19a8ce54ULL,
0x937693ece53b7f93ULL, 0x2288220daa442f22ULL, 0x648d6407e9c86364ULL,
0xf1e3f1db12ff2af1ULL, 0x73d173bfa2e6cc73ULL, 0x124812905a248212ULL,
0x401d403a5d807a40ULL, 0x0820084028104808ULL, 0xc32bc356e89b95c3ULL,
0xec97ec337bc5dfecULL, 0xdb4bdb9690ab4ddbULL, 0xa1bea1611f5fc0a1ULL,
0x8d0e8d1c8307918dULL, 0x3df43df5c97ac83dULL, 0x976697ccf1335b97ULL,
0x0000000000000000ULL, 0xcf1bcf36d483f9cfULL, 0x2bac2b4587566e2bULL,
0x76c57697b3ece176ULL, 0x82328264b019e682ULL, 0xd67fd6fea9b128d6ULL,
0x1b6c1bd87736c31bULL, 0xb5eeb5c15b7774b5ULL, 0xaf86af112943beafULL,
0x6ab56a77dfd41d6aULL, 0x505d50ba0da0ea50ULL, 0x450945124c8a5745ULL,
0xf3ebf3cb18fb38f3ULL, 0x30c0309df060ad30ULL, 0xef9bef2b74c3c4efULL,
0x3ffc3fe5c37eda3fULL, 0x554955921caac755ULL, 0xa2b2a2791059dba2ULL,
0xea8fea0365c9e9eaULL, 0x6589650fecca6a65ULL, 0xbad2bab9686903baULL,
0x2fbc2f65935e4a2fULL, 0xc027c04ee79d8ec0ULL, 0xde5fdebe81a160deULL,
0x1c701ce06c38fc1cULL, 0xfdd3fdbb2ee746fdULL, 0x4d294d52649a1f4dULL,
0x927292e4e0397692ULL, 0x75c9758fbceafa75ULL, 0x061806301e0c3606ULL,
0x8a128a249809ae8aULL, 0xb2f2b2f940794bb2ULL, 0xe6bfe66359d185e6ULL,
0x0e380e70361c7e0eULL, 0x1f7c1ff8633ee71fULL, 0x62956237f7c45562ULL,
0xd477d4eea3b53ad4ULL, 0xa89aa829324d81a8ULL, 0x966296c4f4315296ULL,
0xf9c3f99b3aef62f9ULL, 0xc533c566f697a3c5ULL, 0x25942535b14a1025ULL,
0x597959f220b2ab59ULL, 0x842a8454ae15d084ULL, 0x72d572b7a7e4c572ULL,
0x39e439d5dd72ec39ULL, 0x4c2d4c5a6198164cULL, 0x5e655eca3bbc945eULL,
0x78fd78e785f09f78ULL, 0x38e038ddd870e538ULL, 0x8c0a8c148605988cULL,
0xd163d1c6b2bf17d1ULL, 0xa5aea5410b57e4a5ULL, 0xe2afe2434dd9a1e2ULL,
0x6199612ff8c24e61ULL, 0xb3f6b3f1457b42b3ULL, 0x21842115a5423421ULL,
0x9c4a9c94d625089cULL, 0x1e781ef0663cee1eULL, 0x4311432252866143ULL,
0xc73bc776fc93b1c7ULL, 0xfcd7fcb32be54ffcULL, 0x0410042014082404ULL,
0x515951b208a2e351ULL, 0x995e99bcc72f2599ULL, 0x6da96d4fc4da226dULL,
0x0d340d68391a650dULL, 0xfacffa8335e979faULL, 0xdf5bdfb684a369dfULL,
0x7ee57ed79bfca97eULL, 0x2490243db4481924ULL, 0x3bec3bc5d776fe3bULL,
0xab96ab313d4b9aabULL, 0xce1fce3ed181f0ceULL, 0x1144118855229911ULL,
0x8f068f0c8903838fULL, 0x4e254e4a6b9c044eULL, 0xb7e6b7d1517366b7ULL,
0xeb8beb0b60cbe0ebULL, 0x3cf03cfdcc78c13cULL, 0x813e817cbf1ffd81ULL,
0x946a94d4fe354094ULL, 0xf7fbf7eb0cf31cf7ULL, 0xb9deb9a1676f18b9ULL,
0x134c13985f268b13ULL, 0x2cb02c7d9c58512cULL, 0xd36bd3d6b8bb05d3ULL,
0xe7bbe76b5cd38ce7ULL, 0x6ea56e57cbdc396eULL, 0xc437c46ef395aac4ULL,
0x030c03180f061b03ULL, 0x5645568a13acdc56ULL, 0x440d441a49885e44ULL,
0x7fe17fdf9efea07fULL, 0xa99ea921374f88a9ULL, 0x2aa82a4d8254672aULL,
0xbbd6bbb16d6b0abbULL, 0xc123c146e29f87c1ULL, 0x535153a202a6f153ULL,
0xdc57dcae8ba572dcULL, 0x0b2c0b582716530bULL, 0x9d4e9d9cd327019dULL,
0x6cad6c47c1d82b6cULL, 0x31c43195f562a431ULL, 0x74cd7487b9e8f374ULL,
0xf6fff6e309f115f6ULL, 0x4605460a438c4c46ULL, 0xac8aac092645a5acULL,
0x891e893c970fb589ULL, 0x145014a04428b414ULL, 0xe1a3e15b42dfbae1ULL,
0x165816b04e2ca616ULL, 0x3ae83acdd274f73aULL, 0x69b9696fd0d20669ULL,
0x092409482d124109ULL, 0x70dd70a7ade0d770ULL, 0xb6e2b6d954716fb6ULL,
0xd067d0ceb7bd1ed0ULL, 0xed93ed3b7ec7d6edULL, 0xcc17cc2edb85e2ccULL,
0x4215422a57846842ULL, 0x985a98b4c22d2c98ULL, 0xa4aaa4490e55eda4ULL,
0x28a0285d88507528ULL, 0x5c6d5cda31b8865cULL, 0xf8c7f8933fed6bf8ULL,
0x86228644a411c286ULL,
};
static const u64 rc[WHIRLPOOL_ROUNDS] = {
0x1823c6e887b8014fULL,
0x36a6d2f5796f9152ULL,
0x60bc9b8ea30c7b35ULL,
0x1de0d7c22e4bfe57ULL,
0x157737e59ff04adaULL,
0x58c9290ab1a06b85ULL,
0xbd5d10f4cb3e0567ULL,
0xe427418ba77d95d8ULL,
0xfbee7c66dd17479eULL,
0xca2dbf07ad5a8333ULL,
};
/*
* The core Whirlpool transform.
*/
static __no_kmsan_checks void wp512_process_buffer(struct wp512_ctx *wctx) {
int i, r;
u64 K[8]; /* the round key */
u64 block[8]; /* mu(buffer) */
u64 state[8]; /* the cipher state */
u64 L[8];
const __be64 *buffer = (const __be64 *)wctx->buffer;
for (i = 0; i < 8; i++)
block[i] = be64_to_cpu(buffer[i]);
state[0] = block[0] ^ (K[0] = wctx->hash[0]);
state[1] = block[1] ^ (K[1] = wctx->hash[1]);
state[2] = block[2] ^ (K[2] = wctx->hash[2]);
state[3] = block[3] ^ (K[3] = wctx->hash[3]);
state[4] = block[4] ^ (K[4] = wctx->hash[4]);
state[5] = block[5] ^ (K[5] = wctx->hash[5]);
state[6] = block[6] ^ (K[6] = wctx->hash[6]);
state[7] = block[7] ^ (K[7] = wctx->hash[7]);
for (r = 0; r < WHIRLPOOL_ROUNDS; r++) {
L[0] = C0[(int)(K[0] >> 56) ] ^
C1[(int)(K[7] >> 48) & 0xff] ^
C2[(int)(K[6] >> 40) & 0xff] ^
C3[(int)(K[5] >> 32) & 0xff] ^
C4[(int)(K[4] >> 24) & 0xff] ^
C5[(int)(K[3] >> 16) & 0xff] ^
C6[(int)(K[2] >> 8) & 0xff] ^
C7[(int)(K[1] ) & 0xff] ^
rc[r];
L[1] = C0[(int)(K[1] >> 56) ] ^
C1[(int)(K[0] >> 48) & 0xff] ^
C2[(int)(K[7] >> 40) & 0xff] ^
C3[(int)(K[6] >> 32) & 0xff] ^
C4[(int)(K[5] >> 24) & 0xff] ^
C5[(int)(K[4] >> 16) & 0xff] ^
C6[(int)(K[3] >> 8) & 0xff] ^
C7[(int)(K[2] ) & 0xff];
L[2] = C0[(int)(K[2] >> 56) ] ^
C1[(int)(K[1] >> 48) & 0xff] ^
C2[(int)(K[0] >> 40) & 0xff] ^
C3[(int)(K[7] >> 32) & 0xff] ^
C4[(int)(K[6] >> 24) & 0xff] ^
C5[(int)(K[5] >> 16) & 0xff] ^
C6[(int)(K[4] >> 8) & 0xff] ^
C7[(int)(K[3] ) & 0xff];
L[3] = C0[(int)(K[3] >> 56) ] ^
C1[(int)(K[2] >> 48) & 0xff] ^
C2[(int)(K[1] >> 40) & 0xff] ^
C3[(int)(K[0] >> 32) & 0xff] ^
C4[(int)(K[7] >> 24) & 0xff] ^
C5[(int)(K[6] >> 16) & 0xff] ^
C6[(int)(K[5] >> 8) & 0xff] ^
C7[(int)(K[4] ) & 0xff];
L[4] = C0[(int)(K[4] >> 56) ] ^
C1[(int)(K[3] >> 48) & 0xff] ^
C2[(int)(K[2] >> 40) & 0xff] ^
C3[(int)(K[1] >> 32) & 0xff] ^
C4[(int)(K[0] >> 24) & 0xff] ^
C5[(int)(K[7] >> 16) & 0xff] ^
C6[(int)(K[6] >> 8) & 0xff] ^
C7[(int)(K[5] ) & 0xff];
L[5] = C0[(int)(K[5] >> 56) ] ^
C1[(int)(K[4] >> 48) & 0xff] ^
C2[(int)(K[3] >> 40) & 0xff] ^
C3[(int)(K[2] >> 32) & 0xff] ^
C4[(int)(K[1] >> 24) & 0xff] ^
C5[(int)(K[0] >> 16) & 0xff] ^
C6[(int)(K[7] >> 8) & 0xff] ^
C7[(int)(K[6] ) & 0xff];
L[6] = C0[(int)(K[6] >> 56) ] ^
C1[(int)(K[5] >> 48) & 0xff] ^
C2[(int)(K[4] >> 40) & 0xff] ^
C3[(int)(K[3] >> 32) & 0xff] ^
C4[(int)(K[2] >> 24) & 0xff] ^
C5[(int)(K[1] >> 16) & 0xff] ^
C6[(int)(K[0] >> 8) & 0xff] ^
C7[(int)(K[7] ) & 0xff];
L[7] = C0[(int)(K[7] >> 56) ] ^
C1[(int)(K[6] >> 48) & 0xff] ^
C2[(int)(K[5] >> 40) & 0xff] ^
C3[(int)(K[4] >> 32) & 0xff] ^
C4[(int)(K[3] >> 24) & 0xff] ^
C5[(int)(K[2] >> 16) & 0xff] ^
C6[(int)(K[1] >> 8) & 0xff] ^
C7[(int)(K[0] ) & 0xff];
K[0] = L[0];
K[1] = L[1];
K[2] = L[2];
K[3] = L[3];
K[4] = L[4];
K[5] = L[5];
K[6] = L[6];
K[7] = L[7];
L[0] = C0[(int)(state[0] >> 56) ] ^
C1[(int)(state[7] >> 48) & 0xff] ^
C2[(int)(state[6] >> 40) & 0xff] ^
C3[(int)(state[5] >> 32) & 0xff] ^
C4[(int)(state[4] >> 24) & 0xff] ^
C5[(int)(state[3] >> 16) & 0xff] ^
C6[(int)(state[2] >> 8) & 0xff] ^
C7[(int)(state[1] ) & 0xff] ^
K[0];
L[1] = C0[(int)(state[1] >> 56) ] ^
C1[(int)(state[0] >> 48) & 0xff] ^
C2[(int)(state[7] >> 40) & 0xff] ^
C3[(int)(state[6] >> 32) & 0xff] ^
C4[(int)(state[5] >> 24) & 0xff] ^
C5[(int)(state[4] >> 16) & 0xff] ^
C6[(int)(state[3] >> 8) & 0xff] ^
C7[(int)(state[2] ) & 0xff] ^
K[1];
L[2] = C0[(int)(state[2] >> 56) ] ^
C1[(int)(state[1] >> 48) & 0xff] ^
C2[(int)(state[0] >> 40) & 0xff] ^
C3[(int)(state[7] >> 32) & 0xff] ^
C4[(int)(state[6] >> 24) & 0xff] ^
C5[(int)(state[5] >> 16) & 0xff] ^
C6[(int)(state[4] >> 8) & 0xff] ^
C7[(int)(state[3] ) & 0xff] ^
K[2];
L[3] = C0[(int)(state[3] >> 56) ] ^
C1[(int)(state[2] >> 48) & 0xff] ^
C2[(int)(state[1] >> 40) & 0xff] ^
C3[(int)(state[0] >> 32) & 0xff] ^
C4[(int)(state[7] >> 24) & 0xff] ^
C5[(int)(state[6] >> 16) & 0xff] ^
C6[(int)(state[5] >> 8) & 0xff] ^
C7[(int)(state[4] ) & 0xff] ^
K[3];
L[4] = C0[(int)(state[4] >> 56) ] ^
C1[(int)(state[3] >> 48) & 0xff] ^
C2[(int)(state[2] >> 40) & 0xff] ^
C3[(int)(state[1] >> 32) & 0xff] ^
C4[(int)(state[0] >> 24) & 0xff] ^
C5[(int)(state[7] >> 16) & 0xff] ^
C6[(int)(state[6] >> 8) & 0xff] ^
C7[(int)(state[5] ) & 0xff] ^
K[4];
L[5] = C0[(int)(state[5] >> 56) ] ^
C1[(int)(state[4] >> 48) & 0xff] ^
C2[(int)(state[3] >> 40) & 0xff] ^
C3[(int)(state[2] >> 32) & 0xff] ^
C4[(int)(state[1] >> 24) & 0xff] ^
C5[(int)(state[0] >> 16) & 0xff] ^
C6[(int)(state[7] >> 8) & 0xff] ^
C7[(int)(state[6] ) & 0xff] ^
K[5];
L[6] = C0[(int)(state[6] >> 56) ] ^
C1[(int)(state[5] >> 48) & 0xff] ^
C2[(int)(state[4] >> 40) & 0xff] ^
C3[(int)(state[3] >> 32) & 0xff] ^
C4[(int)(state[2] >> 24) & 0xff] ^
C5[(int)(state[1] >> 16) & 0xff] ^
C6[(int)(state[0] >> 8) & 0xff] ^
C7[(int)(state[7] ) & 0xff] ^
K[6];
L[7] = C0[(int)(state[7] >> 56) ] ^
C1[(int)(state[6] >> 48) & 0xff] ^
C2[(int)(state[5] >> 40) & 0xff] ^
C3[(int)(state[4] >> 32) & 0xff] ^
C4[(int)(state[3] >> 24) & 0xff] ^
C5[(int)(state[2] >> 16) & 0xff] ^
C6[(int)(state[1] >> 8) & 0xff] ^
C7[(int)(state[0] ) & 0xff] ^
K[7];
state[0] = L[0];
state[1] = L[1];
state[2] = L[2];
state[3] = L[3];
state[4] = L[4];
state[5] = L[5];
state[6] = L[6];
state[7] = L[7];
}
/*
* apply the Miyaguchi-Preneel compression function:
*/
wctx->hash[0] ^= state[0] ^ block[0];
wctx->hash[1] ^= state[1] ^ block[1];
wctx->hash[2] ^= state[2] ^ block[2];
wctx->hash[3] ^= state[3] ^ block[3];
wctx->hash[4] ^= state[4] ^ block[4];
wctx->hash[5] ^= state[5] ^ block[5];
wctx->hash[6] ^= state[6] ^ block[6];
wctx->hash[7] ^= state[7] ^ block[7];
}
static int wp512_init(struct shash_desc *desc) {
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
memset(wctx->bitLength, 0, 32);
wctx->bufferBits = wctx->bufferPos = 0;
wctx->buffer[0] = 0;
for (i = 0; i < 8; i++) {
wctx->hash[i] = 0L;
}
return 0;
}
static int wp512_update(struct shash_desc *desc, const u8 *source,
unsigned int len)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int sourcePos = 0;
unsigned int bits_len = len * 8; // convert to number of bits
int sourceGap = (8 - ((int)bits_len & 7)) & 7;
int bufferRem = wctx->bufferBits & 7;
int i;
u32 b, carry;
u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos;
u64 value = bits_len;
for (i = 31, carry = 0; i >= 0 && (carry != 0 || value != 0ULL); i--) {
carry += bitLength[i] + ((u32)value & 0xff);
bitLength[i] = (u8)carry;
carry >>= 8;
value >>= 8;
}
while (bits_len > 8) {
b = ((source[sourcePos] << sourceGap) & 0xff) |
((source[sourcePos + 1] & 0xff) >> (8 - sourceGap));
buffer[bufferPos++] |= (u8)(b >> bufferRem);
bufferBits += 8 - bufferRem;
if (bufferBits == WP512_BLOCK_SIZE * 8) {
wp512_process_buffer(wctx);
bufferBits = bufferPos = 0;
}
buffer[bufferPos] = b << (8 - bufferRem);
bufferBits += bufferRem;
bits_len -= 8;
sourcePos++;
}
if (bits_len > 0) {
b = (source[sourcePos] << sourceGap) & 0xff;
buffer[bufferPos] |= b >> bufferRem;
} else {
b = 0;
}
if (bufferRem + bits_len < 8) {
bufferBits += bits_len;
} else {
bufferPos++;
bufferBits += 8 - bufferRem;
bits_len -= 8 - bufferRem;
if (bufferBits == WP512_BLOCK_SIZE * 8) {
wp512_process_buffer(wctx);
bufferBits = bufferPos = 0;
}
buffer[bufferPos] = b << (8 - bufferRem);
bufferBits += (int)bits_len;
}
wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos;
return 0;
}
static int wp512_final(struct shash_desc *desc, u8 *out)
{
struct wp512_ctx *wctx = shash_desc_ctx(desc);
int i;
u8 *buffer = wctx->buffer;
u8 *bitLength = wctx->bitLength;
int bufferBits = wctx->bufferBits;
int bufferPos = wctx->bufferPos;
__be64 *digest = (__be64 *)out;
buffer[bufferPos] |= 0x80U >> (bufferBits & 7);
bufferPos++;
if (bufferPos > WP512_BLOCK_SIZE - WP512_LENGTHBYTES) {
if (bufferPos < WP512_BLOCK_SIZE)
memset(&buffer[bufferPos], 0, WP512_BLOCK_SIZE - bufferPos);
wp512_process_buffer(wctx);
bufferPos = 0;
}
if (bufferPos < WP512_BLOCK_SIZE - WP512_LENGTHBYTES)
memset(&buffer[bufferPos], 0,
(WP512_BLOCK_SIZE - WP512_LENGTHBYTES) - bufferPos);
bufferPos = WP512_BLOCK_SIZE - WP512_LENGTHBYTES;
memcpy(&buffer[WP512_BLOCK_SIZE - WP512_LENGTHBYTES],
bitLength, WP512_LENGTHBYTES);
wp512_process_buffer(wctx);
for (i = 0; i < WP512_DIGEST_SIZE/8; i++)
digest[i] = cpu_to_be64(wctx->hash[i]);
wctx->bufferBits = bufferBits;
wctx->bufferPos = bufferPos;
return 0;
}
static int wp384_final(struct shash_desc *desc, u8 *out)
{
u8 D[64];
wp512_final(desc, D);
memcpy(out, D, WP384_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
return 0;
}
static int wp256_final(struct shash_desc *desc, u8 *out)
{
u8 D[64];
wp512_final(desc, D);
memcpy(out, D, WP256_DIGEST_SIZE);
memzero_explicit(D, WP512_DIGEST_SIZE);
return 0;
}
static struct shash_alg wp_algs[3] = { {
.digestsize = WP512_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp512_final,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp512",
.cra_driver_name = "wp512-generic",
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = WP384_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp384_final,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp384",
.cra_driver_name = "wp384-generic",
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = WP256_DIGEST_SIZE,
.init = wp512_init,
.update = wp512_update,
.final = wp256_final,
.descsize = sizeof(struct wp512_ctx),
.base = {
.cra_name = "wp256",
.cra_driver_name = "wp256-generic",
.cra_blocksize = WP512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init wp512_mod_init(void)
{
return crypto_register_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
static void __exit wp512_mod_fini(void)
{
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
subsys_initcall(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Whirlpool Message Digest Algorithm");
| linux-master | crypto/wp512.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/string.h>
#include <crypto/dh.h>
#include <crypto/kpp.h>
#define DH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 3 * sizeof(int))
static inline u8 *dh_pack_data(u8 *dst, u8 *end, const void *src, size_t size)
{
if (!dst || size > end - dst)
return NULL;
memcpy(dst, src, size);
return dst + size;
}
static inline const u8 *dh_unpack_data(void *dst, const void *src, size_t size)
{
memcpy(dst, src, size);
return src + size;
}
static inline unsigned int dh_data_size(const struct dh *p)
{
return p->key_size + p->p_size + p->g_size;
}
unsigned int crypto_dh_key_len(const struct dh *p)
{
return DH_KPP_SECRET_MIN_SIZE + dh_data_size(p);
}
EXPORT_SYMBOL_GPL(crypto_dh_key_len);
int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params)
{
u8 *ptr = buf;
u8 * const end = ptr + len;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_DH,
.len = len
};
if (unlikely(!len))
return -EINVAL;
ptr = dh_pack_data(ptr, end, &secret, sizeof(secret));
ptr = dh_pack_data(ptr, end, ¶ms->key_size,
sizeof(params->key_size));
ptr = dh_pack_data(ptr, end, ¶ms->p_size, sizeof(params->p_size));
ptr = dh_pack_data(ptr, end, ¶ms->g_size, sizeof(params->g_size));
ptr = dh_pack_data(ptr, end, params->key, params->key_size);
ptr = dh_pack_data(ptr, end, params->p, params->p_size);
ptr = dh_pack_data(ptr, end, params->g, params->g_size);
if (ptr != end)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_encode_key);
int __crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
{
const u8 *ptr = buf;
struct kpp_secret secret;
if (unlikely(!buf || len < DH_KPP_SECRET_MIN_SIZE))
return -EINVAL;
ptr = dh_unpack_data(&secret, ptr, sizeof(secret));
if (secret.type != CRYPTO_KPP_SECRET_TYPE_DH)
return -EINVAL;
ptr = dh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size));
ptr = dh_unpack_data(¶ms->p_size, ptr, sizeof(params->p_size));
ptr = dh_unpack_data(¶ms->g_size, ptr, sizeof(params->g_size));
if (secret.len != crypto_dh_key_len(params))
return -EINVAL;
/* Don't allocate memory. Set pointers to data within
* the given buffer
*/
params->key = (void *)ptr;
params->p = (void *)(ptr + params->key_size);
params->g = (void *)(ptr + params->key_size + params->p_size);
return 0;
}
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params)
{
int err;
err = __crypto_dh_decode_key(buf, len, params);
if (err)
return err;
/*
* Don't permit the buffer for 'key' or 'g' to be larger than 'p', since
* some drivers assume otherwise.
*/
if (params->key_size > params->p_size ||
params->g_size > params->p_size)
return -EINVAL;
/*
* Don't permit 'p' to be 0. It's not a prime number, and it's subject
* to corner cases such as 'mod 0' being undefined or
* crypto_kpp_maxsize() returning 0.
*/
if (memchr_inv(params->p, 0, params->p_size) == NULL)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_dh_decode_key);
| linux-master | crypto/dh_helper.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PRNG: Pseudo Random Number Generator
* Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
* AES 128 cipher
*
* (C) Neil Horman <[email protected]>
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/rng.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#define DEFAULT_PRNG_KEY "0123456789abcdef"
#define DEFAULT_PRNG_KSZ 16
#define DEFAULT_BLK_SZ 16
#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
/*
* Flags for the prng_context flags field
*/
#define PRNG_FIXED_SIZE 0x1
#define PRNG_NEED_RESET 0x2
/*
* Note: DT is our counter value
* I is our intermediate value
* V is our seed vector
* See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
* for implementation details
*/
struct prng_context {
spinlock_t prng_lock;
unsigned char rand_data[DEFAULT_BLK_SZ];
unsigned char last_rand_data[DEFAULT_BLK_SZ];
unsigned char DT[DEFAULT_BLK_SZ];
unsigned char I[DEFAULT_BLK_SZ];
unsigned char V[DEFAULT_BLK_SZ];
u32 rand_data_valid;
struct crypto_cipher *tfm;
u32 flags;
};
static int dbg;
static void hexdump(char *note, unsigned char *buf, unsigned int len)
{
if (dbg) {
printk(KERN_CRIT "%s", note);
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
}
#define dbgprint(format, args...) do {\
if (dbg)\
printk(format, ##args);\
} while (0)
static void xor_vectors(unsigned char *in1, unsigned char *in2,
unsigned char *out, unsigned int size)
{
int i;
for (i = 0; i < size; i++)
out[i] = in1[i] ^ in2[i];
}
/*
* Returns DEFAULT_BLK_SZ bytes of random data per call
* returns 0 if generation succeeded, <0 if something went wrong
*/
static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test)
{
int i;
unsigned char tmp[DEFAULT_BLK_SZ];
unsigned char *output = NULL;
dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",
ctx);
hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
/*
* This algorithm is a 3 stage state machine
*/
for (i = 0; i < 3; i++) {
switch (i) {
case 0:
/*
* Start by encrypting the counter value
* This gives us an intermediate value I
*/
memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
output = ctx->I;
hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
break;
case 1:
/*
* Next xor I with our secret vector V
* encrypt that result to obtain our
* pseudo random data which we output
*/
xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
output = ctx->rand_data;
break;
case 2:
/*
* First check that we didn't produce the same
* random data that we did last time around through this
*/
if (!memcmp(ctx->rand_data, ctx->last_rand_data,
DEFAULT_BLK_SZ)) {
if (cont_test) {
panic("cprng %p Failed repetition check!\n",
ctx);
}
printk(KERN_ERR
"ctx %p Failed repetition check!\n",
ctx);
ctx->flags |= PRNG_NEED_RESET;
return -EINVAL;
}
memcpy(ctx->last_rand_data, ctx->rand_data,
DEFAULT_BLK_SZ);
/*
* Lastly xor the random data with I
* and encrypt that to obtain a new secret vector V
*/
xor_vectors(ctx->rand_data, ctx->I, tmp,
DEFAULT_BLK_SZ);
output = ctx->V;
hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
break;
}
/* do the encryption */
crypto_cipher_encrypt_one(ctx->tfm, output, tmp);
}
/*
* Now update our DT value
*/
for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) {
ctx->DT[i] += 1;
if (ctx->DT[i] != 0)
break;
}
dbgprint("Returning new block for context %p\n", ctx);
ctx->rand_data_valid = 0;
hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
return 0;
}
/* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
int do_cont_test)
{
unsigned char *ptr = buf;
unsigned int byte_count = (unsigned int)nbytes;
int err;
spin_lock_bh(&ctx->prng_lock);
err = -EINVAL;
if (ctx->flags & PRNG_NEED_RESET)
goto done;
/*
* If the FIXED_SIZE flag is on, only return whole blocks of
* pseudo random data
*/
err = -EINVAL;
if (ctx->flags & PRNG_FIXED_SIZE) {
if (nbytes < DEFAULT_BLK_SZ)
goto done;
byte_count = DEFAULT_BLK_SZ;
}
/*
* Return 0 in case of success as mandated by the kernel
* crypto API interface definition.
*/
err = 0;
dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
byte_count, ctx);
remainder:
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
}
}
/*
* Copy any data less than an entire block
*/
if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
*ptr = ctx->rand_data[ctx->rand_data_valid];
ptr++;
byte_count--;
ctx->rand_data_valid++;
if (byte_count == 0)
goto done;
}
}
/*
* Now copy whole blocks
*/
for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
memset(buf, 0, nbytes);
err = -EINVAL;
goto done;
}
}
if (ctx->rand_data_valid > 0)
goto empty_rbuf;
memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
ctx->rand_data_valid += DEFAULT_BLK_SZ;
ptr += DEFAULT_BLK_SZ;
}
/*
* Now go back and get any remaining partial block
*/
if (byte_count)
goto remainder;
done:
spin_unlock_bh(&ctx->prng_lock);
dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
err, ctx);
return err;
}
static void free_prng_context(struct prng_context *ctx)
{
crypto_free_cipher(ctx->tfm);
}
static int reset_prng_context(struct prng_context *ctx,
const unsigned char *key, size_t klen,
const unsigned char *V, const unsigned char *DT)
{
int ret;
const unsigned char *prng_key;
spin_lock_bh(&ctx->prng_lock);
ctx->flags |= PRNG_NEED_RESET;
prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
if (!key)
klen = DEFAULT_PRNG_KSZ;
if (V)
memcpy(ctx->V, V, DEFAULT_BLK_SZ);
else
memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ);
if (DT)
memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
else
memset(ctx->DT, 0, DEFAULT_BLK_SZ);
memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
ctx->rand_data_valid = DEFAULT_BLK_SZ;
ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
if (ret) {
dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
crypto_cipher_get_flags(ctx->tfm));
goto out;
}
ret = 0;
ctx->flags &= ~PRNG_NEED_RESET;
out:
spin_unlock_bh(&ctx->prng_lock);
return ret;
}
static int cprng_init(struct crypto_tfm *tfm)
{
struct prng_context *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->prng_lock);
ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tfm)) {
dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
ctx);
return PTR_ERR(ctx->tfm);
}
if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
return -EINVAL;
/*
* after allocation, we should always force the user to reset
* so they don't inadvertently use the insecure default values
* without specifying them intentially
*/
ctx->flags |= PRNG_NEED_RESET;
return 0;
}
static void cprng_exit(struct crypto_tfm *tfm)
{
free_prng_context(crypto_tfm_ctx(tfm));
}
static int cprng_get_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
return get_prng_bytes(rdata, dlen, prng, 0);
}
/*
* This is the cprng_registered reset method the seed value is
* interpreted as the tuple { V KEY DT}
* V and KEY are required during reset, and DT is optional, detected
* as being present by testing the length of the seed
*/
static int cprng_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
const u8 *key = seed + DEFAULT_BLK_SZ;
const u8 *dt = NULL;
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ))
dt = key + DEFAULT_PRNG_KSZ;
reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt);
if (prng->flags & PRNG_NEED_RESET)
return -EINVAL;
return 0;
}
#ifdef CONFIG_CRYPTO_FIPS
static int fips_cprng_get_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *rdata, unsigned int dlen)
{
struct prng_context *prng = crypto_rng_ctx(tfm);
return get_prng_bytes(rdata, dlen, prng, 1);
}
static int fips_cprng_reset(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
u8 rdata[DEFAULT_BLK_SZ];
const u8 *key = seed + DEFAULT_BLK_SZ;
int rc;
struct prng_context *prng = crypto_rng_ctx(tfm);
if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ)
return -EINVAL;
/* fips strictly requires seed != key */
if (!memcmp(seed, key, DEFAULT_PRNG_KSZ))
return -EINVAL;
rc = cprng_reset(tfm, seed, slen);
if (!rc)
goto out;
/* this primes our continuity test */
rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0);
prng->rand_data_valid = DEFAULT_BLK_SZ;
out:
return rc;
}
#endif
static struct rng_alg rng_algs[] = { {
.generate = cprng_get_random,
.seed = cprng_reset,
.seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
.base = {
.cra_name = "stdrng",
.cra_driver_name = "ansi_cprng",
.cra_priority = 100,
.cra_ctxsize = sizeof(struct prng_context),
.cra_module = THIS_MODULE,
.cra_init = cprng_init,
.cra_exit = cprng_exit,
}
#ifdef CONFIG_CRYPTO_FIPS
}, {
.generate = fips_cprng_get_random,
.seed = fips_cprng_reset,
.seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ,
.base = {
.cra_name = "fips(ansi_cprng)",
.cra_driver_name = "fips_ansi_cprng",
.cra_priority = 300,
.cra_ctxsize = sizeof(struct prng_context),
.cra_module = THIS_MODULE,
.cra_init = cprng_init,
.cra_exit = cprng_exit,
}
#endif
} };
/* Module initalization */
static int __init prng_mod_init(void)
{
return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
}
static void __exit prng_mod_fini(void)
{
crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <[email protected]>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
subsys_initcall(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/ansi_cprng.c |
// SPDX-License-Identifier: GPL-2.0
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/xxhash.h>
#include <asm/unaligned.h>
#define XXHASH64_BLOCK_SIZE 32
#define XXHASH64_DIGEST_SIZE 8
struct xxhash64_tfm_ctx {
u64 seed;
};
struct xxhash64_desc_ctx {
struct xxh64_state xxhstate;
};
static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm);
if (keylen != sizeof(tctx->seed))
return -EINVAL;
tctx->seed = get_unaligned_le64(key);
return 0;
}
static int xxhash64_init(struct shash_desc *desc)
{
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
xxh64_reset(&dctx->xxhstate, tctx->seed);
return 0;
}
static int xxhash64_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
xxh64_update(&dctx->xxhstate, data, length);
return 0;
}
static int xxhash64_final(struct shash_desc *desc, u8 *out)
{
struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc);
put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out);
return 0;
}
static int xxhash64_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
put_unaligned_le64(xxh64(data, length, tctx->seed), out);
return 0;
}
static struct shash_alg alg = {
.digestsize = XXHASH64_DIGEST_SIZE,
.setkey = xxhash64_setkey,
.init = xxhash64_init,
.update = xxhash64_update,
.final = xxhash64_final,
.digest = xxhash64_digest,
.descsize = sizeof(struct xxhash64_desc_ctx),
.base = {
.cra_name = "xxhash64",
.cra_driver_name = "xxhash64-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = XXHASH64_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct xxhash64_tfm_ctx),
.cra_module = THIS_MODULE,
}
};
static int __init xxhash_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit xxhash_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(xxhash_mod_init);
module_exit(xxhash_mod_fini);
MODULE_AUTHOR("Nikolay Borisov <[email protected]>");
MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("xxhash64");
MODULE_ALIAS_CRYPTO("xxhash64-generic");
| linux-master | crypto/xxhash_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Authenc: Simple AEAD wrapper for IPsec
*
* Copyright (c) 2007-2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct authenc_instance_ctx {
struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
unsigned int reqoff;
};
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
char tail[];
};
static void authenc_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen)
{
struct rtattr *rta = (struct rtattr *)key;
struct crypto_authenc_key_param *param;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
key += rta->rta_len;
keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
keys->authkeylen = keylen - keys->enckeylen;
keys->authkey = key;
keys->enckey = key + keys->authkeylen;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
unsigned int keylen)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct crypto_ahash *auth = ctx->auth;
struct crypto_skcipher *enc = ctx->enc;
struct crypto_authenc_keys keys;
int err = -EINVAL;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto out;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
if (err)
goto out;
crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
out:
memzero_explicit(&keys, sizeof(keys));
return err;
}
static void authenc_geniv_ahash_done(void *data, int err)
{
struct aead_request *req = data;
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
out:
aead_request_complete(req, err);
}
static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->dst, hash,
req->assoclen + req->cryptlen);
ahash_request_set_callback(ahreq, flags,
authenc_geniv_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
return err;
scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
crypto_aead_authsize(authenc), 1);
return 0;
}
static void crypto_authenc_encrypt_done(void *data, int err)
{
struct aead_request *areq = data;
if (err)
goto out;
err = crypto_authenc_genicv(areq, 0);
out:
authenc_request_complete(areq, err);
}
static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_skcipher *enc = ctx->enc;
unsigned int cryptlen = req->cryptlen;
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
struct scatterlist *src, *dst;
int err;
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst) {
err = crypto_authenc_copy_assoc(req);
if (err)
return err;
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
skcipher_request_set_tfm(skreq, enc);
skcipher_request_set_callback(skreq, aead_request_flags(req),
crypto_authenc_encrypt_done, req);
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
return crypto_authenc_genicv(req, aead_request_flags(req));
}
static int crypto_authenc_decrypt_tail(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
struct skcipher_request *skreq = (void *)(areq_ctx->tail +
ictx->reqoff);
unsigned int authsize = crypto_aead_authsize(authenc);
u8 *ihash = ahreq->result + authsize;
struct scatterlist *src, *dst;
scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
if (crypto_memneq(ihash, ahreq->result, authsize))
return -EBADMSG;
src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst)
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
skcipher_request_set_crypt(skreq, src, dst,
req->cryptlen - authsize, req->iv);
return crypto_skcipher_decrypt(skreq);
}
static void authenc_verify_ahash_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = crypto_authenc_decrypt_tail(req, 0);
out:
authenc_request_complete(req, err);
}
static int crypto_authenc_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(authenc);
struct aead_instance *inst = aead_alg_instance(authenc);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ahash *auth = ctx->auth;
struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, req->src, hash,
req->assoclen + req->cryptlen - authsize);
ahash_request_set_callback(ahreq, aead_request_flags(req),
authenc_verify_ahash_done, req);
err = crypto_ahash_digest(ahreq);
if (err)
return err;
return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
}
static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
crypto_aead_set_reqsize(
tfm,
sizeof(struct authenc_request_ctx) +
ictx->reqoff +
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(enc)));
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
}
static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
{
struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
static int crypto_authenc_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
u32 mask;
struct aead_instance *inst;
struct authenc_instance_ctx *ctx;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct skcipher_alg *enc;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ctx->auth, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
auth = crypto_spawn_ahash_alg(&ctx->auth);
auth_base = &auth->base;
err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
enc = crypto_spawn_skcipher_alg(&ctx->enc);
ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
auth_base->cra_alignmask + 1);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_name,
enc->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authenc(%s,%s)", auth_base->cra_driver_name,
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
enc->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
inst->alg.maxauthsize = auth->digestsize;
inst->alg.init = crypto_authenc_init_tfm;
inst->alg.exit = crypto_authenc_exit_tfm;
inst->alg.setkey = crypto_authenc_setkey;
inst->alg.encrypt = crypto_authenc_encrypt;
inst->alg.decrypt = crypto_authenc_decrypt;
inst->free = crypto_authenc_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_authenc_free(inst);
}
return err;
}
static struct crypto_template crypto_authenc_tmpl = {
.name = "authenc",
.create = crypto_authenc_create,
.module = THIS_MODULE,
};
static int __init crypto_authenc_module_init(void)
{
return crypto_register_template(&crypto_authenc_tmpl);
}
static void __exit crypto_authenc_module_exit(void)
{
crypto_unregister_template(&crypto_authenc_tmpl);
}
subsys_initcall(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
MODULE_ALIAS_CRYPTO("authenc");
| linux-master | crypto/authenc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Diffie-Hellman Key Agreement Method [RFC2631]
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <[email protected]>
*/
#include <linux/fips.h>
#include <linux/module.h>
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/dh.h>
#include <crypto/rng.h>
#include <linux/mpi.h>
struct dh_ctx {
MPI p; /* Value is guaranteed to be set. */
MPI g; /* Value is guaranteed to be set. */
MPI xa; /* Value is guaranteed to be set. */
};
static void dh_clear_ctx(struct dh_ctx *ctx)
{
mpi_free(ctx->p);
mpi_free(ctx->g);
mpi_free(ctx->xa);
memset(ctx, 0, sizeof(*ctx));
}
/*
* If base is g we compute the public key
* ya = g^xa mod p; [RFC2631 sec 2.1.1]
* else if base if the counterpart public key we compute the shared secret
* ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
*/
static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val)
{
/* val = base^xa mod p */
return mpi_powm(val, base, ctx->xa, ctx->p);
}
static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm)
{
return kpp_tfm_ctx(tfm);
}
static int dh_check_params_length(unsigned int p_len)
{
if (fips_enabled)
return (p_len < 2048) ? -EINVAL : 0;
return (p_len < 1536) ? -EINVAL : 0;
}
static int dh_set_params(struct dh_ctx *ctx, struct dh *params)
{
if (dh_check_params_length(params->p_size << 3))
return -EINVAL;
ctx->p = mpi_read_raw_data(params->p, params->p_size);
if (!ctx->p)
return -EINVAL;
ctx->g = mpi_read_raw_data(params->g, params->g_size);
if (!ctx->g)
return -EINVAL;
return 0;
}
static int dh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
struct dh params;
/* Free the old MPI key if any */
dh_clear_ctx(ctx);
if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
goto err_clear_ctx;
if (dh_set_params(ctx, ¶ms) < 0)
goto err_clear_ctx;
ctx->xa = mpi_read_raw_data(params.key, params.key_size);
if (!ctx->xa)
goto err_clear_ctx;
return 0;
err_clear_ctx:
dh_clear_ctx(ctx);
return -EINVAL;
}
/*
* SP800-56A public key verification:
*
* * For the safe-prime groups in FIPS mode, Q can be computed
* trivially from P and a full validation according to SP800-56A
* section 5.6.2.3.1 is performed.
*
* * For all other sets of group parameters, only a partial validation
* according to SP800-56A section 5.6.2.3.2 is performed.
*/
static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
{
if (unlikely(!ctx->p))
return -EINVAL;
/*
* Step 1: Verify that 2 <= y <= p - 2.
*
* The upper limit check is actually y < p instead of y < p - 1
* in order to save one mpi_sub_ui() invocation here. Note that
* p - 1 is the non-trivial element of the subgroup of order 2 and
* thus, the check on y^q below would fail if y == p - 1.
*/
if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0)
return -EINVAL;
/*
* Step 2: Verify that 1 = y^q mod p
*
* For the safe-prime groups q = (p - 1)/2.
*/
if (fips_enabled) {
MPI val, q;
int ret;
val = mpi_alloc(0);
if (!val)
return -ENOMEM;
q = mpi_alloc(mpi_get_nlimbs(ctx->p));
if (!q) {
mpi_free(val);
return -ENOMEM;
}
/*
* ->p is odd, so no need to explicitly subtract one
* from it before shifting to the right.
*/
mpi_rshift(q, ctx->p, 1);
ret = mpi_powm(val, y, q, ctx->p);
mpi_free(q);
if (ret) {
mpi_free(val);
return ret;
}
ret = mpi_cmp_ui(val, 1);
mpi_free(val);
if (ret != 0)
return -EINVAL;
}
return 0;
}
static int dh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct dh_ctx *ctx = dh_get_ctx(tfm);
MPI base, val = mpi_alloc(0);
int ret = 0;
int sign;
if (!val)
return -ENOMEM;
if (unlikely(!ctx->xa)) {
ret = -EINVAL;
goto err_free_val;
}
if (req->src) {
base = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!base) {
ret = -EINVAL;
goto err_free_val;
}
ret = dh_is_pubkey_valid(ctx, base);
if (ret)
goto err_free_base;
} else {
base = ctx->g;
}
ret = _compute_val(ctx, base, val);
if (ret)
goto err_free_base;
if (fips_enabled) {
/* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */
if (req->src) {
MPI pone;
/* z <= 1 */
if (mpi_cmp_ui(val, 1) < 1) {
ret = -EBADMSG;
goto err_free_base;
}
/* z == p - 1 */
pone = mpi_alloc(0);
if (!pone) {
ret = -ENOMEM;
goto err_free_base;
}
ret = mpi_sub_ui(pone, ctx->p, 1);
if (!ret && !mpi_cmp(pone, val))
ret = -EBADMSG;
mpi_free(pone);
if (ret)
goto err_free_base;
/* SP800-56A rev 3 5.6.2.1.3 key check */
} else {
if (dh_is_pubkey_valid(ctx, val)) {
ret = -EAGAIN;
goto err_free_val;
}
}
}
ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_base;
if (sign < 0)
ret = -EBADMSG;
err_free_base:
if (req->src)
mpi_free(base);
err_free_val:
mpi_free(val);
return ret;
}
static unsigned int dh_max_size(struct crypto_kpp *tfm)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
return mpi_get_size(ctx->p);
}
static void dh_exit_tfm(struct crypto_kpp *tfm)
{
struct dh_ctx *ctx = dh_get_ctx(tfm);
dh_clear_ctx(ctx);
}
static struct kpp_alg dh = {
.set_secret = dh_set_secret,
.generate_public_key = dh_compute_value,
.compute_shared_secret = dh_compute_value,
.max_size = dh_max_size,
.exit = dh_exit_tfm,
.base = {
.cra_name = "dh",
.cra_driver_name = "dh-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct dh_ctx),
},
};
struct dh_safe_prime {
unsigned int max_strength;
unsigned int p_size;
const char *p;
};
static const char safe_prime_g[] = { 2 };
struct dh_safe_prime_instance_ctx {
struct crypto_kpp_spawn dh_spawn;
const struct dh_safe_prime *safe_prime;
};
struct dh_safe_prime_tfm_ctx {
struct crypto_kpp *dh_tfm;
};
static void dh_safe_prime_free_instance(struct kpp_instance *inst)
{
struct dh_safe_prime_instance_ctx *ctx = kpp_instance_ctx(inst);
crypto_drop_kpp(&ctx->dh_spawn);
kfree(inst);
}
static inline struct dh_safe_prime_instance_ctx *dh_safe_prime_instance_ctx(
struct crypto_kpp *tfm)
{
return kpp_instance_ctx(kpp_alg_instance(tfm));
}
static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm)
{
struct dh_safe_prime_instance_ctx *inst_ctx =
dh_safe_prime_instance_ctx(tfm);
struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
tfm_ctx->dh_tfm = crypto_spawn_kpp(&inst_ctx->dh_spawn);
if (IS_ERR(tfm_ctx->dh_tfm))
return PTR_ERR(tfm_ctx->dh_tfm);
kpp_set_reqsize(tfm, sizeof(struct kpp_request) +
crypto_kpp_reqsize(tfm_ctx->dh_tfm));
return 0;
}
static void dh_safe_prime_exit_tfm(struct crypto_kpp *tfm)
{
struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
crypto_free_kpp(tfm_ctx->dh_tfm);
}
static u64 __add_u64_to_be(__be64 *dst, unsigned int n, u64 val)
{
unsigned int i;
for (i = n; val && i > 0; --i) {
u64 tmp = be64_to_cpu(dst[i - 1]);
tmp += val;
val = tmp >= val ? 0 : 1;
dst[i - 1] = cpu_to_be64(tmp);
}
return val;
}
static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime,
unsigned int *key_size)
{
unsigned int n, oversampling_size;
__be64 *key;
int err;
u64 h, o;
/*
* Generate a private key following NIST SP800-56Ar3,
* sec. 5.6.1.1.1 and 5.6.1.1.3 resp..
*
* 5.6.1.1.1: choose key length N such that
* 2 * ->max_strength <= N <= log2(q) + 1 = ->p_size * 8 - 1
* with q = (p - 1) / 2 for the safe-prime groups.
* Choose the lower bound's next power of two for N in order to
* avoid excessively large private keys while still
* maintaining some extra reserve beyond the bare minimum in
* most cases. Note that for each entry in safe_prime_groups[],
* the following holds for such N:
* - N >= 256, in particular it is a multiple of 2^6 = 64
* bits and
* - N < log2(q) + 1, i.e. N respects the upper bound.
*/
n = roundup_pow_of_two(2 * safe_prime->max_strength);
WARN_ON_ONCE(n & ((1u << 6) - 1));
n >>= 6; /* Convert N into units of u64. */
/*
* Reserve one extra u64 to hold the extra random bits
* required as per 5.6.1.1.3.
*/
oversampling_size = (n + 1) * sizeof(__be64);
key = kmalloc(oversampling_size, GFP_KERNEL);
if (!key)
return ERR_PTR(-ENOMEM);
/*
* 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64
* random bits and interpret them as a big endian integer.
*/
err = -EFAULT;
if (crypto_get_default_rng())
goto out_err;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key,
oversampling_size);
crypto_put_default_rng();
if (err)
goto out_err;
/*
* 5.6.1.1.3, step 5 is implicit: 2^N < q and thus,
* M = min(2^N, q) = 2^N.
*
* For step 6, calculate
* key = (key[] mod (M - 1)) + 1 = (key[] mod (2^N - 1)) + 1.
*
* In order to avoid expensive divisions, note that
* 2^N mod (2^N - 1) = 1 and thus, for any integer h,
* 2^N * h mod (2^N - 1) = h mod (2^N - 1) always holds.
* The big endian integer key[] composed of n + 1 64bit words
* may be written as key[] = h * 2^N + l, with h = key[0]
* representing the 64 most significant bits and l
* corresponding to the remaining 2^N bits. With the remark
* from above,
* h * 2^N + l mod (2^N - 1) = l + h mod (2^N - 1).
* As both, l and h are less than 2^N, their sum after
* this first reduction is guaranteed to be <= 2^(N + 1) - 2.
* Or equivalently, that their sum can again be written as
* h' * 2^N + l' with h' now either zero or one and if one,
* then l' <= 2^N - 2. Thus, all bits at positions >= N will
* be zero after a second reduction:
* h' * 2^N + l' mod (2^N - 1) = l' + h' mod (2^N - 1).
* At this point, it is still possible that
* l' + h' = 2^N - 1, i.e. that l' + h' mod (2^N - 1)
* is zero. This condition will be detected below by means of
* the final increment overflowing in this case.
*/
h = be64_to_cpu(key[0]);
h = __add_u64_to_be(key + 1, n, h);
h = __add_u64_to_be(key + 1, n, h);
WARN_ON_ONCE(h);
/* Increment to obtain the final result. */
o = __add_u64_to_be(key + 1, n, 1);
/*
* The overflow bit o from the increment is either zero or
* one. If zero, key[1:n] holds the final result in big-endian
* order. If one, key[1:n] is zero now, but needs to be set to
* one, c.f. above.
*/
if (o)
key[n] = cpu_to_be64(1);
/* n is in units of u64, convert to bytes. */
*key_size = n << 3;
/* Strip the leading extra __be64, which is (virtually) zero by now. */
memmove(key, &key[1], *key_size);
return key;
out_err:
kfree_sensitive(key);
return ERR_PTR(err);
}
static int dh_safe_prime_set_secret(struct crypto_kpp *tfm, const void *buffer,
unsigned int len)
{
struct dh_safe_prime_instance_ctx *inst_ctx =
dh_safe_prime_instance_ctx(tfm);
struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
struct dh params = {};
void *buf = NULL, *key = NULL;
unsigned int buf_size;
int err;
if (buffer) {
err = __crypto_dh_decode_key(buffer, len, ¶ms);
if (err)
return err;
if (params.p_size || params.g_size)
return -EINVAL;
}
params.p = inst_ctx->safe_prime->p;
params.p_size = inst_ctx->safe_prime->p_size;
params.g = safe_prime_g;
params.g_size = sizeof(safe_prime_g);
if (!params.key_size) {
key = dh_safe_prime_gen_privkey(inst_ctx->safe_prime,
¶ms.key_size);
if (IS_ERR(key))
return PTR_ERR(key);
params.key = key;
}
buf_size = crypto_dh_key_len(¶ms);
buf = kmalloc(buf_size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto out;
}
err = crypto_dh_encode_key(buf, buf_size, ¶ms);
if (err)
goto out;
err = crypto_kpp_set_secret(tfm_ctx->dh_tfm, buf, buf_size);
out:
kfree_sensitive(buf);
kfree_sensitive(key);
return err;
}
static void dh_safe_prime_complete_req(void *data, int err)
{
struct kpp_request *req = data;
kpp_request_complete(req, err);
}
static struct kpp_request *dh_safe_prime_prepare_dh_req(struct kpp_request *req)
{
struct dh_safe_prime_tfm_ctx *tfm_ctx =
kpp_tfm_ctx(crypto_kpp_reqtfm(req));
struct kpp_request *dh_req = kpp_request_ctx(req);
kpp_request_set_tfm(dh_req, tfm_ctx->dh_tfm);
kpp_request_set_callback(dh_req, req->base.flags,
dh_safe_prime_complete_req, req);
kpp_request_set_input(dh_req, req->src, req->src_len);
kpp_request_set_output(dh_req, req->dst, req->dst_len);
return dh_req;
}
static int dh_safe_prime_generate_public_key(struct kpp_request *req)
{
struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req);
return crypto_kpp_generate_public_key(dh_req);
}
static int dh_safe_prime_compute_shared_secret(struct kpp_request *req)
{
struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req);
return crypto_kpp_compute_shared_secret(dh_req);
}
static unsigned int dh_safe_prime_max_size(struct crypto_kpp *tfm)
{
struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm);
return crypto_kpp_maxsize(tfm_ctx->dh_tfm);
}
static int __maybe_unused __dh_safe_prime_create(
struct crypto_template *tmpl, struct rtattr **tb,
const struct dh_safe_prime *safe_prime)
{
struct kpp_instance *inst;
struct dh_safe_prime_instance_ctx *ctx;
const char *dh_name;
struct kpp_alg *dh_alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_KPP, &mask);
if (err)
return err;
dh_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(dh_name))
return PTR_ERR(dh_name);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = kpp_instance_ctx(inst);
err = crypto_grab_kpp(&ctx->dh_spawn, kpp_crypto_instance(inst),
dh_name, 0, mask);
if (err)
goto err_free_inst;
err = -EINVAL;
dh_alg = crypto_spawn_kpp_alg(&ctx->dh_spawn);
if (strcmp(dh_alg->base.cra_name, "dh"))
goto err_free_inst;
ctx->safe_prime = safe_prime;
err = crypto_inst_setname(kpp_crypto_instance(inst),
tmpl->name, &dh_alg->base);
if (err)
goto err_free_inst;
inst->alg.set_secret = dh_safe_prime_set_secret;
inst->alg.generate_public_key = dh_safe_prime_generate_public_key;
inst->alg.compute_shared_secret = dh_safe_prime_compute_shared_secret;
inst->alg.max_size = dh_safe_prime_max_size;
inst->alg.init = dh_safe_prime_init_tfm;
inst->alg.exit = dh_safe_prime_exit_tfm;
inst->alg.base.cra_priority = dh_alg->base.cra_priority;
inst->alg.base.cra_module = THIS_MODULE;
inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx);
inst->free = dh_safe_prime_free_instance;
err = kpp_register_instance(tmpl, inst);
if (err)
goto err_free_inst;
return 0;
err_free_inst:
dh_safe_prime_free_instance(inst);
return err;
}
#ifdef CONFIG_CRYPTO_DH_RFC7919_GROUPS
static const struct dh_safe_prime ffdhe2048_prime = {
.max_strength = 112,
.p_size = 256,
.p =
"\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
"\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
"\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
"\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
"\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
"\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
"\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
"\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
"\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
"\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
"\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
"\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
"\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
"\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
"\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
"\x88\x6b\x42\x38\x61\x28\x5c\x97\xff\xff\xff\xff\xff\xff\xff\xff",
};
static const struct dh_safe_prime ffdhe3072_prime = {
.max_strength = 128,
.p_size = 384,
.p =
"\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
"\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
"\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
"\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
"\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
"\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
"\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
"\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
"\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
"\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
"\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
"\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
"\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
"\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
"\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
"\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
"\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
"\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
"\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
"\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
"\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
"\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
"\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
"\x25\xe4\x1d\x2b\x66\xc6\x2e\x37\xff\xff\xff\xff\xff\xff\xff\xff",
};
static const struct dh_safe_prime ffdhe4096_prime = {
.max_strength = 152,
.p_size = 512,
.p =
"\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
"\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
"\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
"\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
"\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
"\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
"\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
"\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
"\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
"\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
"\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
"\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
"\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
"\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
"\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
"\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
"\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
"\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
"\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
"\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
"\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
"\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
"\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
"\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
"\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
"\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
"\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
"\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
"\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
"\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
"\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
"\xc6\x8a\x00\x7e\x5e\x65\x5f\x6a\xff\xff\xff\xff\xff\xff\xff\xff",
};
static const struct dh_safe_prime ffdhe6144_prime = {
.max_strength = 176,
.p_size = 768,
.p =
"\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
"\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
"\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
"\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
"\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
"\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
"\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
"\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
"\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
"\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
"\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
"\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
"\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
"\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
"\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
"\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
"\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
"\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
"\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
"\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
"\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
"\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
"\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
"\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
"\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
"\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
"\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
"\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
"\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
"\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
"\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
"\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a"
"\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6"
"\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c"
"\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71"
"\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77"
"\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8"
"\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e"
"\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4"
"\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92"
"\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82"
"\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c"
"\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46"
"\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17"
"\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04"
"\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69"
"\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4"
"\xa4\x0e\x32\x9c\xd0\xe4\x0e\x65\xff\xff\xff\xff\xff\xff\xff\xff",
};
static const struct dh_safe_prime ffdhe8192_prime = {
.max_strength = 200,
.p_size = 1024,
.p =
"\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a"
"\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95"
"\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9"
"\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a"
"\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0"
"\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35"
"\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72"
"\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a"
"\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb"
"\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4"
"\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70"
"\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61"
"\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83"
"\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05"
"\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa"
"\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b"
"\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07"
"\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c"
"\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44"
"\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff"
"\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d"
"\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e"
"\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c"
"\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb"
"\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18"
"\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a"
"\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32"
"\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38"
"\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c"
"\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf"
"\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1"
"\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a"
"\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6"
"\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c"
"\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71"
"\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77"
"\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8"
"\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e"
"\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4"
"\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92"
"\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82"
"\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c"
"\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46"
"\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17"
"\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04"
"\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69"
"\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4"
"\xa4\x0e\x32\x9c\xcf\xf4\x6a\xaa\x36\xad\x00\x4c\xf6\x00\xc8\x38"
"\x1e\x42\x5a\x31\xd9\x51\xae\x64\xfd\xb2\x3f\xce\xc9\x50\x9d\x43"
"\x68\x7f\xeb\x69\xed\xd1\xcc\x5e\x0b\x8c\xc3\xbd\xf6\x4b\x10\xef"
"\x86\xb6\x31\x42\xa3\xab\x88\x29\x55\x5b\x2f\x74\x7c\x93\x26\x65"
"\xcb\x2c\x0f\x1c\xc0\x1b\xd7\x02\x29\x38\x88\x39\xd2\xaf\x05\xe4"
"\x54\x50\x4a\xc7\x8b\x75\x82\x82\x28\x46\xc0\xba\x35\xc3\x5f\x5c"
"\x59\x16\x0c\xc0\x46\xfd\x82\x51\x54\x1f\xc6\x8c\x9c\x86\xb0\x22"
"\xbb\x70\x99\x87\x6a\x46\x0e\x74\x51\xa8\xa9\x31\x09\x70\x3f\xee"
"\x1c\x21\x7e\x6c\x38\x26\xe5\x2c\x51\xaa\x69\x1e\x0e\x42\x3c\xfc"
"\x99\xe9\xe3\x16\x50\xc1\x21\x7b\x62\x48\x16\xcd\xad\x9a\x95\xf9"
"\xd5\xb8\x01\x94\x88\xd9\xc0\xa0\xa1\xfe\x30\x75\xa5\x77\xe2\x31"
"\x83\xf8\x1d\x4a\x3f\x2f\xa4\x57\x1e\xfc\x8c\xe0\xba\x8a\x4f\xe8"
"\xb6\x85\x5d\xfe\x72\xb0\xa6\x6e\xde\xd2\xfb\xab\xfb\xe5\x8a\x30"
"\xfa\xfa\xbe\x1c\x5d\x71\xa8\x7e\x2f\x74\x1e\xf8\xc1\xfe\x86\xfe"
"\xa6\xbb\xfd\xe5\x30\x67\x7f\x0d\x97\xd1\x1d\x49\xf7\xa8\x44\x3d"
"\x08\x22\xe5\x06\xa9\xf4\x61\x4e\x01\x1e\x2a\x94\x83\x8f\xf8\x8c"
"\xd6\x8c\x8b\xb7\xc5\xc6\x42\x4c\xff\xff\xff\xff\xff\xff\xff\xff",
};
static int dh_ffdhe2048_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return __dh_safe_prime_create(tmpl, tb, &ffdhe2048_prime);
}
static int dh_ffdhe3072_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return __dh_safe_prime_create(tmpl, tb, &ffdhe3072_prime);
}
static int dh_ffdhe4096_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return __dh_safe_prime_create(tmpl, tb, &ffdhe4096_prime);
}
static int dh_ffdhe6144_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return __dh_safe_prime_create(tmpl, tb, &ffdhe6144_prime);
}
static int dh_ffdhe8192_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
return __dh_safe_prime_create(tmpl, tb, &ffdhe8192_prime);
}
static struct crypto_template crypto_ffdhe_templates[] = {
{
.name = "ffdhe2048",
.create = dh_ffdhe2048_create,
.module = THIS_MODULE,
},
{
.name = "ffdhe3072",
.create = dh_ffdhe3072_create,
.module = THIS_MODULE,
},
{
.name = "ffdhe4096",
.create = dh_ffdhe4096_create,
.module = THIS_MODULE,
},
{
.name = "ffdhe6144",
.create = dh_ffdhe6144_create,
.module = THIS_MODULE,
},
{
.name = "ffdhe8192",
.create = dh_ffdhe8192_create,
.module = THIS_MODULE,
},
};
#else /* ! CONFIG_CRYPTO_DH_RFC7919_GROUPS */
static struct crypto_template crypto_ffdhe_templates[] = {};
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
static int __init dh_init(void)
{
int err;
err = crypto_register_kpp(&dh);
if (err)
return err;
err = crypto_register_templates(crypto_ffdhe_templates,
ARRAY_SIZE(crypto_ffdhe_templates));
if (err) {
crypto_unregister_kpp(&dh);
return err;
}
return 0;
}
static void __exit dh_exit(void)
{
crypto_unregister_templates(crypto_ffdhe_templates,
ARRAY_SIZE(crypto_ffdhe_templates));
crypto_unregister_kpp(&dh);
}
subsys_initcall(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DH generic algorithm");
| linux-master | crypto/dh.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Handle async block request by crypto hardware engine.
*
* Copyright (C) 2016 Linaro, Inc.
*
* Author: Baolin Wang <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
/* Temporary algorithm flag used to indicate an updated driver. */
#define CRYPTO_ALG_ENGINE 0x200
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
};
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
/*
* If hardware cannot enqueue more requests
* and retry mechanism is not supported
* make sure we are completing the current request
*/
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
lockdep_assert_in_softirq();
crypto_request_complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests);
}
/**
* crypto_pump_requests - dequeue one request from engine queue to process
* @engine: the hardware engine
* @in_kthread: true if we are in the context of the request pump thread
*
* This function checks if there is any request in the engine queue that
* needs processing and if so call out to the driver to initialize hardware
* and handle each request.
*/
static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
struct crypto_engine_alg *alg;
struct crypto_engine_op *op;
unsigned long flags;
bool was_busy = false;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */
if (!engine->retry_support && engine->cur_req)
goto out;
/* If another context is idling then defer */
if (engine->idling) {
kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
/* Check if the engine queue is idle */
if (!crypto_queue_len(&engine->queue) || !engine->running) {
if (!engine->busy)
goto out;
/* Only do teardown in the thread */
if (!in_kthread) {
kthread_queue_work(engine->kworker,
&engine->pump_requests);
goto out;
}
engine->busy = false;
engine->idling = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (engine->unprepare_crypt_hardware &&
engine->unprepare_crypt_hardware(engine))
dev_err(engine->dev, "failed to unprepare crypt hardware\n");
spin_lock_irqsave(&engine->queue_lock, flags);
engine->idling = false;
goto out;
}
start_request:
/* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue);
if (!async_req)
goto out;
/*
* If hardware doesn't support the retry mechanism,
* keep track of the request we are processing now.
* We'll need it on completion (crypto_finalize_request).
*/
if (!engine->retry_support)
engine->cur_req = async_req;
if (engine->busy)
was_busy = true;
else
engine->busy = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
/* Until here we get the request need to be encrypted successfully */
if (!was_busy && engine->prepare_crypt_hardware) {
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err_1;
}
}
if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
alg = container_of(async_req->tfm->__crt_alg,
struct crypto_engine_alg, base);
op = &alg->op;
} else {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
/*
* If hardware queue is full (-ENOSPC), requeue request
* regardless of backlog flag.
* Otherwise, unprepare and complete the request.
*/
if (!engine->retry_support ||
(ret != -ENOSPC)) {
dev_err(engine->dev,
"Failed to do one request from queue: %d\n",
ret);
goto req_err_1;
}
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
* back in front of crypto-engine queue, to keep the order
* of requests.
*/
crypto_enqueue_request_head(&engine->queue, async_req);
kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
goto retry;
req_err_1:
crypto_request_complete(async_req, ret);
retry:
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
goto start_request;
}
return;
out:
spin_unlock_irqrestore(&engine->queue_lock, flags);
/*
* Batch requests is possible only if
* hardware can enqueue multiple requests
*/
if (engine->do_batch_requests) {
ret = engine->do_batch_requests(engine);
if (ret)
dev_err(engine->dev, "failed to do batch requests: %d\n",
ret);
}
return;
}
static void crypto_pump_work(struct kthread_work *work)
{
struct crypto_engine *engine =
container_of(work, struct crypto_engine, pump_requests);
crypto_pump_requests(engine, true);
}
/**
* crypto_transfer_request - transfer the new request into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
* @need_pump: indicates whether queue the pump of request to kthread_work
*/
static int crypto_transfer_request(struct crypto_engine *engine,
struct crypto_async_request *req,
bool need_pump)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (!engine->running) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
return -ESHUTDOWN;
}
ret = crypto_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
kthread_queue_work(engine->kworker, &engine->pump_requests);
spin_unlock_irqrestore(&engine->queue_lock, flags);
return ret;
}
/**
* crypto_transfer_request_to_engine - transfer one request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
struct crypto_async_request *req)
{
return crypto_transfer_request(engine, req, true);
}
/**
* crypto_transfer_aead_request_to_engine - transfer one aead_request
* to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
struct aead_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
/**
* crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
* to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
/**
* crypto_transfer_hash_request_to_engine - transfer one ahash_request
* to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
* crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
struct kpp_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
/**
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
* to list into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
/**
* crypto_finalize_aead_request - finalize one aead_request if
* the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_aead_request(struct crypto_engine *engine,
struct aead_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
/**
* crypto_finalize_akcipher_request - finalize one akcipher_request if
* the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
/**
* crypto_finalize_hash_request - finalize one ahash_request if
* the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_finalize_kpp_request - finalize one kpp_request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_kpp_request(struct crypto_engine *engine,
struct kpp_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
/**
* crypto_finalize_skcipher_request - finalize one skcipher_request if
* the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
/**
* crypto_engine_start - start the hardware engine
* @engine: the hardware engine need to be started
*
* Return 0 on success, else on fail.
*/
int crypto_engine_start(struct crypto_engine *engine)
{
unsigned long flags;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->running || engine->busy) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
return -EBUSY;
}
engine->running = true;
spin_unlock_irqrestore(&engine->queue_lock, flags);
kthread_queue_work(engine->kworker, &engine->pump_requests);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_engine_start);
/**
* crypto_engine_stop - stop the hardware engine
* @engine: the hardware engine need to be stopped
*
* Return 0 on success, else on fail.
*/
int crypto_engine_stop(struct crypto_engine *engine)
{
unsigned long flags;
unsigned int limit = 500;
int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If the engine queue is not empty or the engine is on busy state,
* we need to wait for a while to pump the requests of engine queue.
*/
while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
msleep(20);
spin_lock_irqsave(&engine->queue_lock, flags);
}
if (crypto_queue_len(&engine->queue) || engine->busy)
ret = -EBUSY;
else
engine->running = false;
spin_unlock_irqrestore(&engine->queue_lock, flags);
if (ret)
dev_warn(engine->dev, "could not stop engine\n");
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_stop);
/**
* crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
* and initialize it by setting the maximum number of entries in the software
* crypto-engine queue.
* @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
* @cbk_do_batch: pointer to a callback function to be invoked when executing
* a batch of requests.
* This has the form:
* callback(struct crypto_engine *engine)
* where:
* engine: the crypto engine structure.
* @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen)
{
struct crypto_engine *engine;
if (!dev)
return NULL;
engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
if (!engine)
return NULL;
engine->dev = dev;
engine->rt = rt;
engine->running = false;
engine->busy = false;
engine->idling = false;
engine->retry_support = retry_support;
engine->priv_data = dev;
/*
* Batch requests is possible only if
* hardware has support for retry mechanism.
*/
engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev));
crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name);
if (IS_ERR(engine->kworker)) {
dev_err(dev, "failed to create crypto request pump task\n");
return NULL;
}
kthread_init_work(&engine->pump_requests, crypto_pump_work);
if (engine->rt) {
dev_info(dev, "will run requests pump with realtime priority\n");
sched_set_fifo(engine->kworker->task);
}
return engine;
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it.
* @dev: the device attached with one hardware engine
* @rt: whether this queue is set to run as a realtime task
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/**
* crypto_engine_exit - free the resources of hardware engine when exit
* @engine: the hardware engine need to be freed
*
* Return 0 for success.
*/
int crypto_engine_exit(struct crypto_engine *engine)
{
int ret;
ret = crypto_engine_stop(engine);
if (ret)
return ret;
kthread_destroy_worker(engine->kworker);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
{
crypto_unregister_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_aead(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_aeads(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_aead(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
{
crypto_unregister_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_ahash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_ahashes(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
{
crypto_unregister_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
{
crypto_unregister_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
{
return crypto_unregister_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_skcipher(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_skciphers(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_skcipher(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");
| linux-master | crypto/crypto_engine.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ChaCha20-Poly1305 AEAD, RFC7539
*
* Copyright (C) 2015 Martin Willi
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
struct crypto_ahash_spawn poly;
unsigned int saltlen;
};
struct chachapoly_ctx {
struct crypto_skcipher *chacha;
struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
u8 salt[];
};
struct poly_req {
/* zero byte padding for AD/ciphertext, as needed */
u8 pad[POLY1305_BLOCK_SIZE];
/* tail data with AD/ciphertext lengths */
struct {
__le64 assoclen;
__le64 cryptlen;
} tail;
struct scatterlist src[1];
struct ahash_request req; /* must be last member */
};
struct chacha_req {
u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
struct skcipher_request req; /* must be last member */
};
struct chachapoly_req_ctx {
struct scatterlist src[2];
struct scatterlist dst[2];
/* the key we generate for Poly1305 using Chacha20 */
u8 key[POLY1305_KEY_SIZE];
/* calculated Poly1305 tag */
u8 tag[POLY1305_DIGEST_SIZE];
/* length of data to en/decrypt, without ICV */
unsigned int cryptlen;
/* Actual AD, excluding IV */
unsigned int assoclen;
/* request flags, with MAY_SLEEP cleared if needed */
u32 flags;
union {
struct poly_req poly;
struct chacha_req chacha;
} u;
};
static inline void async_done_continue(struct aead_request *req, int err,
int (*cont)(struct aead_request *))
{
if (!err) {
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
err = cont(req);
}
if (err != -EINPROGRESS && err != -EBUSY)
aead_request_complete(req, err);
}
static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
__le32 leicb = cpu_to_le32(icb);
memcpy(iv, &leicb, sizeof(leicb));
memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen);
memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
CHACHA_IV_SIZE - sizeof(leicb) - ctx->saltlen);
}
static int poly_verify_tag(struct aead_request *req)
{
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
u8 tag[sizeof(rctx->tag)];
scatterwalk_map_and_copy(tag, req->src,
req->assoclen + rctx->cryptlen,
sizeof(tag), 0);
if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
return -EBADMSG;
return 0;
}
static int poly_copy_tag(struct aead_request *req)
{
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
scatterwalk_map_and_copy(rctx->tag, req->dst,
req->assoclen + rctx->cryptlen,
sizeof(rctx->tag), 1);
return 0;
}
static void chacha_decrypt_done(void *data, int err)
{
async_done_continue(data, err, poly_verify_tag);
}
static int chacha_decrypt(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct chacha_req *creq = &rctx->u.chacha;
struct scatterlist *src, *dst;
int err;
if (rctx->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_decrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
rctx->cryptlen, creq->iv);
err = crypto_skcipher_decrypt(&creq->req);
if (err)
return err;
skip:
return poly_verify_tag(req);
}
static int poly_tail_continue(struct aead_request *req)
{
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
if (rctx->cryptlen == req->cryptlen) /* encrypting */
return poly_copy_tag(req);
return chacha_decrypt(req);
}
static void poly_tail_done(void *data, int err)
{
async_done_continue(data, err, poly_tail_continue);
}
static int poly_tail(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
int err;
preq->tail.assoclen = cpu_to_le64(rctx->assoclen);
preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen);
sg_init_one(preq->src, &preq->tail, sizeof(preq->tail));
ahash_request_set_callback(&preq->req, rctx->flags,
poly_tail_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src,
rctx->tag, sizeof(preq->tail));
err = crypto_ahash_finup(&preq->req);
if (err)
return err;
return poly_tail_continue(req);
}
static void poly_cipherpad_done(void *data, int err)
{
async_done_continue(data, err, poly_tail);
}
static int poly_cipherpad(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
unsigned int padlen;
int err;
padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE;
memset(preq->pad, 0, sizeof(preq->pad));
sg_init_one(preq->src, preq->pad, padlen);
ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipherpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
err = crypto_ahash_update(&preq->req);
if (err)
return err;
return poly_tail(req);
}
static void poly_cipher_done(void *data, int err)
{
async_done_continue(data, err, poly_cipherpad);
}
static int poly_cipher(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
struct scatterlist *crypt = req->src;
int err;
if (rctx->cryptlen == req->cryptlen) /* encrypting */
crypt = req->dst;
crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
ahash_request_set_callback(&preq->req, rctx->flags,
poly_cipher_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
err = crypto_ahash_update(&preq->req);
if (err)
return err;
return poly_cipherpad(req);
}
static void poly_adpad_done(void *data, int err)
{
async_done_continue(data, err, poly_cipher);
}
static int poly_adpad(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
unsigned int padlen;
int err;
padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE;
memset(preq->pad, 0, sizeof(preq->pad));
sg_init_one(preq->src, preq->pad, padlen);
ahash_request_set_callback(&preq->req, rctx->flags,
poly_adpad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
err = crypto_ahash_update(&preq->req);
if (err)
return err;
return poly_cipher(req);
}
static void poly_ad_done(void *data, int err)
{
async_done_continue(data, err, poly_adpad);
}
static int poly_ad(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
int err;
ahash_request_set_callback(&preq->req, rctx->flags,
poly_ad_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
err = crypto_ahash_update(&preq->req);
if (err)
return err;
return poly_adpad(req);
}
static void poly_setkey_done(void *data, int err)
{
async_done_continue(data, err, poly_ad);
}
static int poly_setkey(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
int err;
sg_init_one(preq->src, rctx->key, sizeof(rctx->key));
ahash_request_set_callback(&preq->req, rctx->flags,
poly_setkey_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
err = crypto_ahash_update(&preq->req);
if (err)
return err;
return poly_ad(req);
}
static void poly_init_done(void *data, int err)
{
async_done_continue(data, err, poly_setkey);
}
static int poly_init(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct poly_req *preq = &rctx->u.poly;
int err;
ahash_request_set_callback(&preq->req, rctx->flags,
poly_init_done, req);
ahash_request_set_tfm(&preq->req, ctx->poly);
err = crypto_ahash_init(&preq->req);
if (err)
return err;
return poly_setkey(req);
}
static void poly_genkey_done(void *data, int err)
{
async_done_continue(data, err, poly_init);
}
static int poly_genkey(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct chacha_req *creq = &rctx->u.chacha;
int err;
rctx->assoclen = req->assoclen;
if (crypto_aead_ivsize(tfm) == 8) {
if (rctx->assoclen < 8)
return -EINVAL;
rctx->assoclen -= 8;
}
memset(rctx->key, 0, sizeof(rctx->key));
sg_init_one(creq->src, rctx->key, sizeof(rctx->key));
chacha_iv(creq->iv, req, 0);
skcipher_request_set_callback(&creq->req, rctx->flags,
poly_genkey_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
POLY1305_KEY_SIZE, creq->iv);
err = crypto_skcipher_decrypt(&creq->req);
if (err)
return err;
return poly_init(req);
}
static void chacha_encrypt_done(void *data, int err)
{
async_done_continue(data, err, poly_genkey);
}
static int chacha_encrypt(struct aead_request *req)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
struct chacha_req *creq = &rctx->u.chacha;
struct scatterlist *src, *dst;
int err;
if (req->cryptlen == 0)
goto skip;
chacha_iv(creq->iv, req, 1);
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
dst = src;
if (req->src != req->dst)
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
skcipher_request_set_callback(&creq->req, rctx->flags,
chacha_encrypt_done, req);
skcipher_request_set_tfm(&creq->req, ctx->chacha);
skcipher_request_set_crypt(&creq->req, src, dst,
req->cryptlen, creq->iv);
err = crypto_skcipher_encrypt(&creq->req);
if (err)
return err;
skip:
return poly_genkey(req);
}
static int chachapoly_encrypt(struct aead_request *req)
{
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen;
rctx->flags = aead_request_flags(req);
/* encrypt call chain:
* - chacha_encrypt/done()
* - poly_genkey/done()
* - poly_init/done()
* - poly_setkey/done()
* - poly_ad/done()
* - poly_adpad/done()
* - poly_cipher/done()
* - poly_cipherpad/done()
* - poly_tail/done/continue()
* - poly_copy_tag()
*/
return chacha_encrypt(req);
}
static int chachapoly_decrypt(struct aead_request *req)
{
struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
rctx->flags = aead_request_flags(req);
/* decrypt call chain:
* - poly_genkey/done()
* - poly_init/done()
* - poly_setkey/done()
* - poly_ad/done()
* - poly_adpad/done()
* - poly_cipher/done()
* - poly_cipherpad/done()
* - poly_tail/done/continue()
* - chacha_decrypt/done()
* - poly_verify_tag()
*/
return poly_genkey(req);
}
static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
if (keylen != ctx->saltlen + CHACHA_KEY_SIZE)
return -EINVAL;
keylen -= ctx->saltlen;
memcpy(ctx->salt, key + keylen, ctx->saltlen);
crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(ctx->chacha, key, keylen);
}
static int chachapoly_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
if (authsize != POLY1305_DIGEST_SIZE)
return -EINVAL;
return 0;
}
static int chachapoly_init(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *chacha;
struct crypto_ahash *poly;
unsigned long align;
poly = crypto_spawn_ahash(&ictx->poly);
if (IS_ERR(poly))
return PTR_ERR(poly);
chacha = crypto_spawn_skcipher(&ictx->chacha);
if (IS_ERR(chacha)) {
crypto_free_ahash(poly);
return PTR_ERR(chacha);
}
ctx->chacha = chacha;
ctx->poly = poly;
ctx->saltlen = ictx->saltlen;
align = crypto_aead_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(
tfm,
align + offsetof(struct chachapoly_req_ctx, u) +
max(offsetof(struct chacha_req, req) +
sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(chacha),
offsetof(struct poly_req, req) +
sizeof(struct ahash_request) +
crypto_ahash_reqsize(poly)));
return 0;
}
static void chachapoly_exit(struct crypto_aead *tfm)
{
struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->poly);
crypto_free_skcipher(ctx->chacha);
}
static void chachapoly_free(struct aead_instance *inst)
{
struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->chacha);
crypto_drop_ahash(&ctx->poly);
kfree(inst);
}
static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
const char *name, unsigned int ivsize)
{
u32 mask;
struct aead_instance *inst;
struct chachapoly_instance_ctx *ctx;
struct skcipher_alg *chacha;
struct hash_alg_common *poly;
int err;
if (ivsize > CHACHAPOLY_IV_SIZE)
return -EINVAL;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
err = crypto_grab_skcipher(&ctx->chacha, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
chacha = crypto_spawn_skcipher_alg(&ctx->chacha);
err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[2]), 0, mask);
if (err)
goto err_free_inst;
poly = crypto_spawn_ahash_alg(&ctx->poly);
err = -EINVAL;
if (poly->digestsize != POLY1305_DIGEST_SIZE)
goto err_free_inst;
/* Need 16-byte IV size, including Initial Block Counter value */
if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_name,
poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,
poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = (chacha->base.cra_priority +
poly->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = chacha->base.cra_alignmask |
poly->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
ctx->saltlen;
inst->alg.ivsize = ivsize;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(chacha);
inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
inst->alg.init = chachapoly_init;
inst->alg.exit = chachapoly_exit;
inst->alg.encrypt = chachapoly_encrypt;
inst->alg.decrypt = chachapoly_decrypt;
inst->alg.setkey = chachapoly_setkey;
inst->alg.setauthsize = chachapoly_setauthsize;
inst->free = chachapoly_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
chachapoly_free(inst);
}
return err;
}
static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
{
return chachapoly_create(tmpl, tb, "rfc7539", 12);
}
static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
{
return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
}
static struct crypto_template rfc7539_tmpls[] = {
{
.name = "rfc7539",
.create = rfc7539_create,
.module = THIS_MODULE,
}, {
.name = "rfc7539esp",
.create = rfc7539esp_create,
.module = THIS_MODULE,
},
};
static int __init chacha20poly1305_module_init(void)
{
return crypto_register_templates(rfc7539_tmpls,
ARRAY_SIZE(rfc7539_tmpls));
}
static void __exit chacha20poly1305_module_exit(void)
{
crypto_unregister_templates(rfc7539_tmpls,
ARRAY_SIZE(rfc7539_tmpls));
}
subsys_initcall(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <[email protected]>");
MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
MODULE_ALIAS_CRYPTO("rfc7539");
MODULE_ALIAS_CRYPTO("rfc7539esp");
| linux-master | crypto/chacha20poly1305.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* FIPS 200 support.
*
* Copyright (c) 2008 Neil Horman <[email protected]>
*/
#include <linux/export.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
#include <generated/utsrelease.h>
int fips_enabled;
EXPORT_SYMBOL_GPL(fips_enabled);
ATOMIC_NOTIFIER_HEAD(fips_fail_notif_chain);
EXPORT_SYMBOL_GPL(fips_fail_notif_chain);
/* Process kernel command-line parameter at boot time. fips=0 or fips=1 */
static int fips_enable(char *str)
{
fips_enabled = !!simple_strtol(str, NULL, 0);
printk(KERN_INFO "fips mode: %s\n",
fips_enabled ? "enabled" : "disabled");
return 1;
}
__setup("fips=", fips_enable);
#define FIPS_MODULE_NAME CONFIG_CRYPTO_FIPS_NAME
#ifdef CONFIG_CRYPTO_FIPS_CUSTOM_VERSION
#define FIPS_MODULE_VERSION CONFIG_CRYPTO_FIPS_VERSION
#else
#define FIPS_MODULE_VERSION UTS_RELEASE
#endif
static char fips_name[] = FIPS_MODULE_NAME;
static char fips_version[] = FIPS_MODULE_VERSION;
static struct ctl_table crypto_sysctl_table[] = {
{
.procname = "fips_enabled",
.data = &fips_enabled,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec
},
{
.procname = "fips_name",
.data = &fips_name,
.maxlen = 64,
.mode = 0444,
.proc_handler = proc_dostring
},
{
.procname = "fips_version",
.data = &fips_version,
.maxlen = 64,
.mode = 0444,
.proc_handler = proc_dostring
},
{}
};
static struct ctl_table_header *crypto_sysctls;
static void crypto_proc_fips_init(void)
{
crypto_sysctls = register_sysctl("crypto", crypto_sysctl_table);
}
static void crypto_proc_fips_exit(void)
{
unregister_sysctl_table(crypto_sysctls);
}
void fips_fail_notify(void)
{
if (fips_enabled)
atomic_notifier_call_chain(&fips_fail_notif_chain, 0, NULL);
}
EXPORT_SYMBOL_GPL(fips_fail_notify);
static int __init fips_init(void)
{
crypto_proc_fips_init();
return 0;
}
static void __exit fips_exit(void)
{
crypto_proc_fips_exit();
}
subsys_initcall(fips_init);
module_exit(fips_exit);
| linux-master | crypto/fips.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SHA-3, as specified in
* https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <[email protected]>
* Ard Biesheuvel <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha3.h>
#include <asm/unaligned.h>
/*
* On some 32-bit architectures (h8300), GCC ends up using
* over 1 KB of stack if we inline the round calculation into the loop
* in keccakf(). On the other hand, on 64-bit architectures with plenty
* of [64-bit wide] general purpose registers, not inlining it severely
* hurts performance. So let's use 64-bitness as a heuristic to decide
* whether to inline or not.
*/
#ifdef CONFIG_64BIT
#define SHA3_INLINE inline
#else
#define SHA3_INLINE noinline
#endif
#define KECCAK_ROUNDS 24
static const u64 keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
/* update the state with given number of rounds */
static SHA3_INLINE void keccakf_round(u64 st[25])
{
u64 t[5], tt, bc[5];
/* Theta */
bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
t[0] = bc[4] ^ rol64(bc[1], 1);
t[1] = bc[0] ^ rol64(bc[2], 1);
t[2] = bc[1] ^ rol64(bc[3], 1);
t[3] = bc[2] ^ rol64(bc[4], 1);
t[4] = bc[3] ^ rol64(bc[0], 1);
st[0] ^= t[0];
/* Rho Pi */
tt = st[1];
st[ 1] = rol64(st[ 6] ^ t[1], 44);
st[ 6] = rol64(st[ 9] ^ t[4], 20);
st[ 9] = rol64(st[22] ^ t[2], 61);
st[22] = rol64(st[14] ^ t[4], 39);
st[14] = rol64(st[20] ^ t[0], 18);
st[20] = rol64(st[ 2] ^ t[2], 62);
st[ 2] = rol64(st[12] ^ t[2], 43);
st[12] = rol64(st[13] ^ t[3], 25);
st[13] = rol64(st[19] ^ t[4], 8);
st[19] = rol64(st[23] ^ t[3], 56);
st[23] = rol64(st[15] ^ t[0], 41);
st[15] = rol64(st[ 4] ^ t[4], 27);
st[ 4] = rol64(st[24] ^ t[4], 14);
st[24] = rol64(st[21] ^ t[1], 2);
st[21] = rol64(st[ 8] ^ t[3], 55);
st[ 8] = rol64(st[16] ^ t[1], 45);
st[16] = rol64(st[ 5] ^ t[0], 36);
st[ 5] = rol64(st[ 3] ^ t[3], 28);
st[ 3] = rol64(st[18] ^ t[3], 21);
st[18] = rol64(st[17] ^ t[2], 15);
st[17] = rol64(st[11] ^ t[1], 10);
st[11] = rol64(st[ 7] ^ t[2], 6);
st[ 7] = rol64(st[10] ^ t[0], 3);
st[10] = rol64( tt ^ t[1], 1);
/* Chi */
bc[ 0] = ~st[ 1] & st[ 2];
bc[ 1] = ~st[ 2] & st[ 3];
bc[ 2] = ~st[ 3] & st[ 4];
bc[ 3] = ~st[ 4] & st[ 0];
bc[ 4] = ~st[ 0] & st[ 1];
st[ 0] ^= bc[ 0];
st[ 1] ^= bc[ 1];
st[ 2] ^= bc[ 2];
st[ 3] ^= bc[ 3];
st[ 4] ^= bc[ 4];
bc[ 0] = ~st[ 6] & st[ 7];
bc[ 1] = ~st[ 7] & st[ 8];
bc[ 2] = ~st[ 8] & st[ 9];
bc[ 3] = ~st[ 9] & st[ 5];
bc[ 4] = ~st[ 5] & st[ 6];
st[ 5] ^= bc[ 0];
st[ 6] ^= bc[ 1];
st[ 7] ^= bc[ 2];
st[ 8] ^= bc[ 3];
st[ 9] ^= bc[ 4];
bc[ 0] = ~st[11] & st[12];
bc[ 1] = ~st[12] & st[13];
bc[ 2] = ~st[13] & st[14];
bc[ 3] = ~st[14] & st[10];
bc[ 4] = ~st[10] & st[11];
st[10] ^= bc[ 0];
st[11] ^= bc[ 1];
st[12] ^= bc[ 2];
st[13] ^= bc[ 3];
st[14] ^= bc[ 4];
bc[ 0] = ~st[16] & st[17];
bc[ 1] = ~st[17] & st[18];
bc[ 2] = ~st[18] & st[19];
bc[ 3] = ~st[19] & st[15];
bc[ 4] = ~st[15] & st[16];
st[15] ^= bc[ 0];
st[16] ^= bc[ 1];
st[17] ^= bc[ 2];
st[18] ^= bc[ 3];
st[19] ^= bc[ 4];
bc[ 0] = ~st[21] & st[22];
bc[ 1] = ~st[22] & st[23];
bc[ 2] = ~st[23] & st[24];
bc[ 3] = ~st[24] & st[20];
bc[ 4] = ~st[20] & st[21];
st[20] ^= bc[ 0];
st[21] ^= bc[ 1];
st[22] ^= bc[ 2];
st[23] ^= bc[ 3];
st[24] ^= bc[ 4];
}
static void keccakf(u64 st[25])
{
int round;
for (round = 0; round < KECCAK_ROUNDS; round++) {
keccakf_round(st);
/* Iota */
st[0] ^= keccakf_rndc[round];
}
}
int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
sctx->rsiz = 200 - 2 * digest_size;
sctx->rsizw = sctx->rsiz / 8;
sctx->partial = 0;
memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
EXPORT_SYMBOL(crypto_sha3_init);
int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int done;
const u8 *src;
done = 0;
src = data;
if ((sctx->partial + len) > (sctx->rsiz - 1)) {
if (sctx->partial) {
done = -sctx->partial;
memcpy(sctx->buf + sctx->partial, data,
done + sctx->rsiz);
src = sctx->buf;
}
do {
unsigned int i;
for (i = 0; i < sctx->rsizw; i++)
sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
keccakf(sctx->st);
done += sctx->rsiz;
src = data + done;
} while (done + (sctx->rsiz - 1) < len);
sctx->partial = 0;
}
memcpy(sctx->buf + sctx->partial, src, len - done);
sctx->partial += (len - done);
return 0;
}
EXPORT_SYMBOL(crypto_sha3_update);
int crypto_sha3_final(struct shash_desc *desc, u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int i, inlen = sctx->partial;
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
__le64 *digest = (__le64 *)out;
sctx->buf[inlen++] = 0x06;
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx->buf[sctx->rsiz - 1] |= 0x80;
for (i = 0; i < sctx->rsizw; i++)
sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
keccakf(sctx->st);
for (i = 0; i < digest_size / 8; i++)
put_unaligned_le64(sctx->st[i], digest++);
if (digest_size & 4)
put_unaligned_le32(sctx->st[i], (__le32 *)digest);
memset(sctx, 0, sizeof(*sctx));
return 0;
}
EXPORT_SYMBOL(crypto_sha3_final);
static struct shash_alg algs[] = { {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
.final = crypto_sha3_final,
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-224",
.base.cra_driver_name = "sha3-224-generic",
.base.cra_blocksize = SHA3_224_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_256_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
.final = crypto_sha3_final,
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-256",
.base.cra_driver_name = "sha3-256-generic",
.base.cra_blocksize = SHA3_256_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
.final = crypto_sha3_final,
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-384",
.base.cra_driver_name = "sha3-384-generic",
.base.cra_blocksize = SHA3_384_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = crypto_sha3_init,
.update = crypto_sha3_update,
.final = crypto_sha3_final,
.descsize = sizeof(struct sha3_state),
.base.cra_name = "sha3-512",
.base.cra_driver_name = "sha3-512-generic",
.base.cra_blocksize = SHA3_512_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
} };
static int __init sha3_generic_mod_init(void)
{
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha3_generic_mod_fini(void)
{
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
subsys_initcall(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha3-224");
MODULE_ALIAS_CRYPTO("sha3-224-generic");
MODULE_ALIAS_CRYPTO("sha3-256");
MODULE_ALIAS_CRYPTO("sha3-256-generic");
MODULE_ALIAS_CRYPTO("sha3-384");
MODULE_ALIAS_CRYPTO("sha3-384-generic");
MODULE_ALIAS_CRYPTO("sha3-512");
MODULE_ALIAS_CRYPTO("sha3-512-generic");
| linux-master | crypto/sha3_generic.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Elliptic Curve (Russian) Digital Signature Algorithm for Cryptographic API
*
* Copyright (c) 2019 Vitaly Chikunov <[email protected]>
*
* References:
* GOST 34.10-2018, GOST R 34.10-2012, RFC 7091, ISO/IEC 14888-3:2018.
*
* Historical references:
* GOST R 34.10-2001, RFC 4357, ISO/IEC 14888-3:2006/Amd 1:2010.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <crypto/streebog.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
#include <crypto/akcipher.h>
#include <linux/oid_registry.h>
#include <linux/scatterlist.h>
#include "ecrdsa_params.asn1.h"
#include "ecrdsa_pub_key.asn1.h"
#include "ecrdsa_defs.h"
#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
#define ECRDSA_MAX_DIGITS (512 / 64)
struct ecrdsa_ctx {
enum OID algo_oid; /* overall public key oid */
enum OID curve_oid; /* parameter */
enum OID digest_oid; /* parameter */
const struct ecc_curve *curve; /* curve from oid */
unsigned int digest_len; /* parameter (bytes) */
const char *digest; /* digest name from oid */
unsigned int key_len; /* @key length (bytes) */
const char *key; /* raw public key */
struct ecc_point pub_key;
u64 _pubp[2][ECRDSA_MAX_DIGITS]; /* point storage for @pub_key */
};
static const struct ecc_curve *get_curve_by_oid(enum OID oid)
{
switch (oid) {
case OID_gostCPSignA:
case OID_gostTC26Sign256B:
return &gost_cp256a;
case OID_gostCPSignB:
case OID_gostTC26Sign256C:
return &gost_cp256b;
case OID_gostCPSignC:
case OID_gostTC26Sign256D:
return &gost_cp256c;
case OID_gostTC26Sign512A:
return &gost_tc512a;
case OID_gostTC26Sign512B:
return &gost_tc512b;
/* The following two aren't implemented: */
case OID_gostTC26Sign256A:
case OID_gostTC26Sign512C:
default:
return NULL;
}
}
static int ecrdsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
unsigned char sig[ECRDSA_MAX_SIG_SIZE];
unsigned char digest[STREEBOG512_DIGEST_SIZE];
unsigned int ndigits = req->dst_len / sizeof(u64);
u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
u64 *v = e; /* e^{-1} \mod q */
u64 z1[ECRDSA_MAX_DIGITS];
u64 *z2 = _r;
struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
/*
* Digest value, digest algorithm, and curve (modulus) should have the
* same length (256 or 512 bits), public key and signature should be
* twice bigger.
*/
if (!ctx->curve ||
!ctx->digest ||
!req->src ||
!ctx->pub_key.x ||
req->dst_len != ctx->digest_len ||
req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
req->dst_len * 2 != req->src_len ||
WARN_ON(req->src_len > sizeof(sig)) ||
WARN_ON(req->dst_len > sizeof(digest)))
return -EBADMSG;
sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
sig, req->src_len);
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src,
req->src_len + req->dst_len),
digest, req->dst_len, req->src_len);
vli_from_be64(s, sig, ndigits);
vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
/* Step 1: verify that 0 < r < q, 0 < s < q */
if (vli_is_zero(r, ndigits) ||
vli_cmp(r, ctx->curve->n, ndigits) >= 0 ||
vli_is_zero(s, ndigits) ||
vli_cmp(s, ctx->curve->n, ndigits) >= 0)
return -EKEYREJECTED;
/* Step 2: calculate hash (h) of the message (passed as input) */
/* Step 3: calculate e = h \mod q */
vli_from_le64(e, digest, ndigits);
if (vli_cmp(e, ctx->curve->n, ndigits) >= 0)
vli_sub(e, e, ctx->curve->n, ndigits);
if (vli_is_zero(e, ndigits))
e[0] = 1;
/* Step 4: calculate v = e^{-1} \mod q */
vli_mod_inv(v, e, ctx->curve->n, ndigits);
/* Step 5: calculate z_1 = sv \mod q, z_2 = -rv \mod q */
vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits);
vli_sub(_r, ctx->curve->n, r, ndigits);
vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits);
/* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
ctx->curve);
if (vli_cmp(cc.x, ctx->curve->n, ndigits) >= 0)
vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
/* Step 7: if R == r signature is valid */
if (!vli_cmp(cc.x, r, ndigits))
return 0;
else
return -EKEYREJECTED;
}
int ecrdsa_param_curve(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecrdsa_ctx *ctx = context;
ctx->curve_oid = look_up_OID(value, vlen);
if (!ctx->curve_oid)
return -EINVAL;
ctx->curve = get_curve_by_oid(ctx->curve_oid);
return 0;
}
/* Optional. If present should match expected digest algo OID. */
int ecrdsa_param_digest(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecrdsa_ctx *ctx = context;
int digest_oid = look_up_OID(value, vlen);
if (digest_oid != ctx->digest_oid)
return -EINVAL;
return 0;
}
int ecrdsa_parse_pub_key(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecrdsa_ctx *ctx = context;
ctx->key = value;
ctx->key_len = vlen;
return 0;
}
static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
{
memcpy(dst, src, sizeof(u32));
return src + sizeof(u32);
}
/* Parse BER encoded subjectPublicKey. */
static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
unsigned int ndigits;
u32 algo, paramlen;
u8 *params;
int err;
err = asn1_ber_decoder(&ecrdsa_pub_key_decoder, ctx, key, keylen);
if (err < 0)
return err;
/* Key parameters is in the key after keylen. */
params = ecrdsa_unpack_u32(¶mlen,
ecrdsa_unpack_u32(&algo, (u8 *)key + keylen));
if (algo == OID_gost2012PKey256) {
ctx->digest = "streebog256";
ctx->digest_oid = OID_gost2012Digest256;
ctx->digest_len = 256 / 8;
} else if (algo == OID_gost2012PKey512) {
ctx->digest = "streebog512";
ctx->digest_oid = OID_gost2012Digest512;
ctx->digest_len = 512 / 8;
} else
return -ENOPKG;
ctx->algo_oid = algo;
/* Parse SubjectPublicKeyInfo.AlgorithmIdentifier.parameters. */
err = asn1_ber_decoder(&ecrdsa_params_decoder, ctx, params, paramlen);
if (err < 0)
return err;
/*
* Sizes of algo (set in digest_len) and curve should match
* each other.
*/
if (!ctx->curve ||
ctx->curve->g.ndigits * sizeof(u64) != ctx->digest_len)
return -ENOPKG;
/*
* Key is two 256- or 512-bit coordinates which should match
* curve size.
*/
if ((ctx->key_len != (2 * 256 / 8) &&
ctx->key_len != (2 * 512 / 8)) ||
ctx->key_len != ctx->curve->g.ndigits * sizeof(u64) * 2)
return -ENOPKG;
ndigits = ctx->key_len / sizeof(u64) / 2;
ctx->pub_key = ECC_POINT_INIT(ctx->_pubp[0], ctx->_pubp[1], ndigits);
vli_from_le64(ctx->pub_key.x, ctx->key, ndigits);
vli_from_le64(ctx->pub_key.y, ctx->key + ndigits * sizeof(u64),
ndigits);
if (ecc_is_pubkey_valid_partial(ctx->curve, &ctx->pub_key))
return -EKEYREJECTED;
return 0;
}
static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
{
struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
* Verify doesn't need any output, so it's just informational
* for keyctl to determine the key bit size.
*/
return ctx->pub_key.ndigits * sizeof(u64);
}
static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
{
}
static struct akcipher_alg ecrdsa_alg = {
.verify = ecrdsa_verify,
.set_pub_key = ecrdsa_set_pub_key,
.max_size = ecrdsa_max_size,
.exit = ecrdsa_exit_tfm,
.base = {
.cra_name = "ecrdsa",
.cra_driver_name = "ecrdsa-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecrdsa_ctx),
},
};
static int __init ecrdsa_mod_init(void)
{
return crypto_register_akcipher(&ecrdsa_alg);
}
static void __exit ecrdsa_mod_fini(void)
{
crypto_unregister_akcipher(&ecrdsa_alg);
}
module_init(ecrdsa_mod_init);
module_exit(ecrdsa_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <[email protected]>");
MODULE_DESCRIPTION("EC-RDSA generic algorithm");
MODULE_ALIAS_CRYPTO("ecrdsa-generic");
| linux-master | crypto/ecrdsa.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/crc64.h>
#include <linux/module.h>
#include <crypto/internal/hash.h>
#include <asm/unaligned.h>
static int chksum_init(struct shash_desc *desc)
{
u64 *crc = shash_desc_ctx(desc);
*crc = 0;
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
u64 *crc = shash_desc_ctx(desc);
*crc = crc64_rocksoft_generic(*crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
u64 *crc = shash_desc_ctx(desc);
put_unaligned_le64(*crc, out);
return 0;
}
static int __chksum_finup(u64 crc, const u8 *data, unsigned int len, u8 *out)
{
crc = crc64_rocksoft_generic(crc, data, len);
put_unaligned_le64(crc, out);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
u64 *crc = shash_desc_ctx(desc);
return __chksum_finup(*crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
.digestsize = sizeof(u64),
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(u64),
.base = {
.cra_name = CRC64_ROCKSOFT_STRING,
.cra_driver_name = "crc64-rocksoft-generic",
.cra_priority = 200,
.cra_blocksize = 1,
.cra_module = THIS_MODULE,
}
};
static int __init crc64_rocksoft_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit crc64_rocksoft_exit(void)
{
crypto_unregister_shash(&alg);
}
module_init(crc64_rocksoft_init);
module_exit(crc64_rocksoft_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Rocksoft model CRC64 calculation.");
MODULE_ALIAS_CRYPTO("crc64-rocksoft");
MODULE_ALIAS_CRYPTO("crc64-rocksoft-generic");
| linux-master | crypto/crc64_rocksoft_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006
* NTT (Nippon Telegraph and Telephone Corporation).
*/
/*
* Algorithm Specification
* https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
*/
#include <crypto/algapi.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
static const u32 camellia_sp1110[256] = {
0x70707000, 0x82828200, 0x2c2c2c00, 0xececec00,
0xb3b3b300, 0x27272700, 0xc0c0c000, 0xe5e5e500,
0xe4e4e400, 0x85858500, 0x57575700, 0x35353500,
0xeaeaea00, 0x0c0c0c00, 0xaeaeae00, 0x41414100,
0x23232300, 0xefefef00, 0x6b6b6b00, 0x93939300,
0x45454500, 0x19191900, 0xa5a5a500, 0x21212100,
0xededed00, 0x0e0e0e00, 0x4f4f4f00, 0x4e4e4e00,
0x1d1d1d00, 0x65656500, 0x92929200, 0xbdbdbd00,
0x86868600, 0xb8b8b800, 0xafafaf00, 0x8f8f8f00,
0x7c7c7c00, 0xebebeb00, 0x1f1f1f00, 0xcecece00,
0x3e3e3e00, 0x30303000, 0xdcdcdc00, 0x5f5f5f00,
0x5e5e5e00, 0xc5c5c500, 0x0b0b0b00, 0x1a1a1a00,
0xa6a6a600, 0xe1e1e100, 0x39393900, 0xcacaca00,
0xd5d5d500, 0x47474700, 0x5d5d5d00, 0x3d3d3d00,
0xd9d9d900, 0x01010100, 0x5a5a5a00, 0xd6d6d600,
0x51515100, 0x56565600, 0x6c6c6c00, 0x4d4d4d00,
0x8b8b8b00, 0x0d0d0d00, 0x9a9a9a00, 0x66666600,
0xfbfbfb00, 0xcccccc00, 0xb0b0b000, 0x2d2d2d00,
0x74747400, 0x12121200, 0x2b2b2b00, 0x20202000,
0xf0f0f000, 0xb1b1b100, 0x84848400, 0x99999900,
0xdfdfdf00, 0x4c4c4c00, 0xcbcbcb00, 0xc2c2c200,
0x34343400, 0x7e7e7e00, 0x76767600, 0x05050500,
0x6d6d6d00, 0xb7b7b700, 0xa9a9a900, 0x31313100,
0xd1d1d100, 0x17171700, 0x04040400, 0xd7d7d700,
0x14141400, 0x58585800, 0x3a3a3a00, 0x61616100,
0xdedede00, 0x1b1b1b00, 0x11111100, 0x1c1c1c00,
0x32323200, 0x0f0f0f00, 0x9c9c9c00, 0x16161600,
0x53535300, 0x18181800, 0xf2f2f200, 0x22222200,
0xfefefe00, 0x44444400, 0xcfcfcf00, 0xb2b2b200,
0xc3c3c300, 0xb5b5b500, 0x7a7a7a00, 0x91919100,
0x24242400, 0x08080800, 0xe8e8e800, 0xa8a8a800,
0x60606000, 0xfcfcfc00, 0x69696900, 0x50505000,
0xaaaaaa00, 0xd0d0d000, 0xa0a0a000, 0x7d7d7d00,
0xa1a1a100, 0x89898900, 0x62626200, 0x97979700,
0x54545400, 0x5b5b5b00, 0x1e1e1e00, 0x95959500,
0xe0e0e000, 0xffffff00, 0x64646400, 0xd2d2d200,
0x10101000, 0xc4c4c400, 0x00000000, 0x48484800,
0xa3a3a300, 0xf7f7f700, 0x75757500, 0xdbdbdb00,
0x8a8a8a00, 0x03030300, 0xe6e6e600, 0xdadada00,
0x09090900, 0x3f3f3f00, 0xdddddd00, 0x94949400,
0x87878700, 0x5c5c5c00, 0x83838300, 0x02020200,
0xcdcdcd00, 0x4a4a4a00, 0x90909000, 0x33333300,
0x73737300, 0x67676700, 0xf6f6f600, 0xf3f3f300,
0x9d9d9d00, 0x7f7f7f00, 0xbfbfbf00, 0xe2e2e200,
0x52525200, 0x9b9b9b00, 0xd8d8d800, 0x26262600,
0xc8c8c800, 0x37373700, 0xc6c6c600, 0x3b3b3b00,
0x81818100, 0x96969600, 0x6f6f6f00, 0x4b4b4b00,
0x13131300, 0xbebebe00, 0x63636300, 0x2e2e2e00,
0xe9e9e900, 0x79797900, 0xa7a7a700, 0x8c8c8c00,
0x9f9f9f00, 0x6e6e6e00, 0xbcbcbc00, 0x8e8e8e00,
0x29292900, 0xf5f5f500, 0xf9f9f900, 0xb6b6b600,
0x2f2f2f00, 0xfdfdfd00, 0xb4b4b400, 0x59595900,
0x78787800, 0x98989800, 0x06060600, 0x6a6a6a00,
0xe7e7e700, 0x46464600, 0x71717100, 0xbababa00,
0xd4d4d400, 0x25252500, 0xababab00, 0x42424200,
0x88888800, 0xa2a2a200, 0x8d8d8d00, 0xfafafa00,
0x72727200, 0x07070700, 0xb9b9b900, 0x55555500,
0xf8f8f800, 0xeeeeee00, 0xacacac00, 0x0a0a0a00,
0x36363600, 0x49494900, 0x2a2a2a00, 0x68686800,
0x3c3c3c00, 0x38383800, 0xf1f1f100, 0xa4a4a400,
0x40404000, 0x28282800, 0xd3d3d300, 0x7b7b7b00,
0xbbbbbb00, 0xc9c9c900, 0x43434300, 0xc1c1c100,
0x15151500, 0xe3e3e300, 0xadadad00, 0xf4f4f400,
0x77777700, 0xc7c7c700, 0x80808000, 0x9e9e9e00,
};
static const u32 camellia_sp0222[256] = {
0x00e0e0e0, 0x00050505, 0x00585858, 0x00d9d9d9,
0x00676767, 0x004e4e4e, 0x00818181, 0x00cbcbcb,
0x00c9c9c9, 0x000b0b0b, 0x00aeaeae, 0x006a6a6a,
0x00d5d5d5, 0x00181818, 0x005d5d5d, 0x00828282,
0x00464646, 0x00dfdfdf, 0x00d6d6d6, 0x00272727,
0x008a8a8a, 0x00323232, 0x004b4b4b, 0x00424242,
0x00dbdbdb, 0x001c1c1c, 0x009e9e9e, 0x009c9c9c,
0x003a3a3a, 0x00cacaca, 0x00252525, 0x007b7b7b,
0x000d0d0d, 0x00717171, 0x005f5f5f, 0x001f1f1f,
0x00f8f8f8, 0x00d7d7d7, 0x003e3e3e, 0x009d9d9d,
0x007c7c7c, 0x00606060, 0x00b9b9b9, 0x00bebebe,
0x00bcbcbc, 0x008b8b8b, 0x00161616, 0x00343434,
0x004d4d4d, 0x00c3c3c3, 0x00727272, 0x00959595,
0x00ababab, 0x008e8e8e, 0x00bababa, 0x007a7a7a,
0x00b3b3b3, 0x00020202, 0x00b4b4b4, 0x00adadad,
0x00a2a2a2, 0x00acacac, 0x00d8d8d8, 0x009a9a9a,
0x00171717, 0x001a1a1a, 0x00353535, 0x00cccccc,
0x00f7f7f7, 0x00999999, 0x00616161, 0x005a5a5a,
0x00e8e8e8, 0x00242424, 0x00565656, 0x00404040,
0x00e1e1e1, 0x00636363, 0x00090909, 0x00333333,
0x00bfbfbf, 0x00989898, 0x00979797, 0x00858585,
0x00686868, 0x00fcfcfc, 0x00ececec, 0x000a0a0a,
0x00dadada, 0x006f6f6f, 0x00535353, 0x00626262,
0x00a3a3a3, 0x002e2e2e, 0x00080808, 0x00afafaf,
0x00282828, 0x00b0b0b0, 0x00747474, 0x00c2c2c2,
0x00bdbdbd, 0x00363636, 0x00222222, 0x00383838,
0x00646464, 0x001e1e1e, 0x00393939, 0x002c2c2c,
0x00a6a6a6, 0x00303030, 0x00e5e5e5, 0x00444444,
0x00fdfdfd, 0x00888888, 0x009f9f9f, 0x00656565,
0x00878787, 0x006b6b6b, 0x00f4f4f4, 0x00232323,
0x00484848, 0x00101010, 0x00d1d1d1, 0x00515151,
0x00c0c0c0, 0x00f9f9f9, 0x00d2d2d2, 0x00a0a0a0,
0x00555555, 0x00a1a1a1, 0x00414141, 0x00fafafa,
0x00434343, 0x00131313, 0x00c4c4c4, 0x002f2f2f,
0x00a8a8a8, 0x00b6b6b6, 0x003c3c3c, 0x002b2b2b,
0x00c1c1c1, 0x00ffffff, 0x00c8c8c8, 0x00a5a5a5,
0x00202020, 0x00898989, 0x00000000, 0x00909090,
0x00474747, 0x00efefef, 0x00eaeaea, 0x00b7b7b7,
0x00151515, 0x00060606, 0x00cdcdcd, 0x00b5b5b5,
0x00121212, 0x007e7e7e, 0x00bbbbbb, 0x00292929,
0x000f0f0f, 0x00b8b8b8, 0x00070707, 0x00040404,
0x009b9b9b, 0x00949494, 0x00212121, 0x00666666,
0x00e6e6e6, 0x00cecece, 0x00ededed, 0x00e7e7e7,
0x003b3b3b, 0x00fefefe, 0x007f7f7f, 0x00c5c5c5,
0x00a4a4a4, 0x00373737, 0x00b1b1b1, 0x004c4c4c,
0x00919191, 0x006e6e6e, 0x008d8d8d, 0x00767676,
0x00030303, 0x002d2d2d, 0x00dedede, 0x00969696,
0x00262626, 0x007d7d7d, 0x00c6c6c6, 0x005c5c5c,
0x00d3d3d3, 0x00f2f2f2, 0x004f4f4f, 0x00191919,
0x003f3f3f, 0x00dcdcdc, 0x00797979, 0x001d1d1d,
0x00525252, 0x00ebebeb, 0x00f3f3f3, 0x006d6d6d,
0x005e5e5e, 0x00fbfbfb, 0x00696969, 0x00b2b2b2,
0x00f0f0f0, 0x00313131, 0x000c0c0c, 0x00d4d4d4,
0x00cfcfcf, 0x008c8c8c, 0x00e2e2e2, 0x00757575,
0x00a9a9a9, 0x004a4a4a, 0x00575757, 0x00848484,
0x00111111, 0x00454545, 0x001b1b1b, 0x00f5f5f5,
0x00e4e4e4, 0x000e0e0e, 0x00737373, 0x00aaaaaa,
0x00f1f1f1, 0x00dddddd, 0x00595959, 0x00141414,
0x006c6c6c, 0x00929292, 0x00545454, 0x00d0d0d0,
0x00787878, 0x00707070, 0x00e3e3e3, 0x00494949,
0x00808080, 0x00505050, 0x00a7a7a7, 0x00f6f6f6,
0x00777777, 0x00939393, 0x00868686, 0x00838383,
0x002a2a2a, 0x00c7c7c7, 0x005b5b5b, 0x00e9e9e9,
0x00eeeeee, 0x008f8f8f, 0x00010101, 0x003d3d3d,
};
static const u32 camellia_sp3033[256] = {
0x38003838, 0x41004141, 0x16001616, 0x76007676,
0xd900d9d9, 0x93009393, 0x60006060, 0xf200f2f2,
0x72007272, 0xc200c2c2, 0xab00abab, 0x9a009a9a,
0x75007575, 0x06000606, 0x57005757, 0xa000a0a0,
0x91009191, 0xf700f7f7, 0xb500b5b5, 0xc900c9c9,
0xa200a2a2, 0x8c008c8c, 0xd200d2d2, 0x90009090,
0xf600f6f6, 0x07000707, 0xa700a7a7, 0x27002727,
0x8e008e8e, 0xb200b2b2, 0x49004949, 0xde00dede,
0x43004343, 0x5c005c5c, 0xd700d7d7, 0xc700c7c7,
0x3e003e3e, 0xf500f5f5, 0x8f008f8f, 0x67006767,
0x1f001f1f, 0x18001818, 0x6e006e6e, 0xaf00afaf,
0x2f002f2f, 0xe200e2e2, 0x85008585, 0x0d000d0d,
0x53005353, 0xf000f0f0, 0x9c009c9c, 0x65006565,
0xea00eaea, 0xa300a3a3, 0xae00aeae, 0x9e009e9e,
0xec00ecec, 0x80008080, 0x2d002d2d, 0x6b006b6b,
0xa800a8a8, 0x2b002b2b, 0x36003636, 0xa600a6a6,
0xc500c5c5, 0x86008686, 0x4d004d4d, 0x33003333,
0xfd00fdfd, 0x66006666, 0x58005858, 0x96009696,
0x3a003a3a, 0x09000909, 0x95009595, 0x10001010,
0x78007878, 0xd800d8d8, 0x42004242, 0xcc00cccc,
0xef00efef, 0x26002626, 0xe500e5e5, 0x61006161,
0x1a001a1a, 0x3f003f3f, 0x3b003b3b, 0x82008282,
0xb600b6b6, 0xdb00dbdb, 0xd400d4d4, 0x98009898,
0xe800e8e8, 0x8b008b8b, 0x02000202, 0xeb00ebeb,
0x0a000a0a, 0x2c002c2c, 0x1d001d1d, 0xb000b0b0,
0x6f006f6f, 0x8d008d8d, 0x88008888, 0x0e000e0e,
0x19001919, 0x87008787, 0x4e004e4e, 0x0b000b0b,
0xa900a9a9, 0x0c000c0c, 0x79007979, 0x11001111,
0x7f007f7f, 0x22002222, 0xe700e7e7, 0x59005959,
0xe100e1e1, 0xda00dada, 0x3d003d3d, 0xc800c8c8,
0x12001212, 0x04000404, 0x74007474, 0x54005454,
0x30003030, 0x7e007e7e, 0xb400b4b4, 0x28002828,
0x55005555, 0x68006868, 0x50005050, 0xbe00bebe,
0xd000d0d0, 0xc400c4c4, 0x31003131, 0xcb00cbcb,
0x2a002a2a, 0xad00adad, 0x0f000f0f, 0xca00caca,
0x70007070, 0xff00ffff, 0x32003232, 0x69006969,
0x08000808, 0x62006262, 0x00000000, 0x24002424,
0xd100d1d1, 0xfb00fbfb, 0xba00baba, 0xed00eded,
0x45004545, 0x81008181, 0x73007373, 0x6d006d6d,
0x84008484, 0x9f009f9f, 0xee00eeee, 0x4a004a4a,
0xc300c3c3, 0x2e002e2e, 0xc100c1c1, 0x01000101,
0xe600e6e6, 0x25002525, 0x48004848, 0x99009999,
0xb900b9b9, 0xb300b3b3, 0x7b007b7b, 0xf900f9f9,
0xce00cece, 0xbf00bfbf, 0xdf00dfdf, 0x71007171,
0x29002929, 0xcd00cdcd, 0x6c006c6c, 0x13001313,
0x64006464, 0x9b009b9b, 0x63006363, 0x9d009d9d,
0xc000c0c0, 0x4b004b4b, 0xb700b7b7, 0xa500a5a5,
0x89008989, 0x5f005f5f, 0xb100b1b1, 0x17001717,
0xf400f4f4, 0xbc00bcbc, 0xd300d3d3, 0x46004646,
0xcf00cfcf, 0x37003737, 0x5e005e5e, 0x47004747,
0x94009494, 0xfa00fafa, 0xfc00fcfc, 0x5b005b5b,
0x97009797, 0xfe00fefe, 0x5a005a5a, 0xac00acac,
0x3c003c3c, 0x4c004c4c, 0x03000303, 0x35003535,
0xf300f3f3, 0x23002323, 0xb800b8b8, 0x5d005d5d,
0x6a006a6a, 0x92009292, 0xd500d5d5, 0x21002121,
0x44004444, 0x51005151, 0xc600c6c6, 0x7d007d7d,
0x39003939, 0x83008383, 0xdc00dcdc, 0xaa00aaaa,
0x7c007c7c, 0x77007777, 0x56005656, 0x05000505,
0x1b001b1b, 0xa400a4a4, 0x15001515, 0x34003434,
0x1e001e1e, 0x1c001c1c, 0xf800f8f8, 0x52005252,
0x20002020, 0x14001414, 0xe900e9e9, 0xbd00bdbd,
0xdd00dddd, 0xe400e4e4, 0xa100a1a1, 0xe000e0e0,
0x8a008a8a, 0xf100f1f1, 0xd600d6d6, 0x7a007a7a,
0xbb00bbbb, 0xe300e3e3, 0x40004040, 0x4f004f4f,
};
static const u32 camellia_sp4404[256] = {
0x70700070, 0x2c2c002c, 0xb3b300b3, 0xc0c000c0,
0xe4e400e4, 0x57570057, 0xeaea00ea, 0xaeae00ae,
0x23230023, 0x6b6b006b, 0x45450045, 0xa5a500a5,
0xeded00ed, 0x4f4f004f, 0x1d1d001d, 0x92920092,
0x86860086, 0xafaf00af, 0x7c7c007c, 0x1f1f001f,
0x3e3e003e, 0xdcdc00dc, 0x5e5e005e, 0x0b0b000b,
0xa6a600a6, 0x39390039, 0xd5d500d5, 0x5d5d005d,
0xd9d900d9, 0x5a5a005a, 0x51510051, 0x6c6c006c,
0x8b8b008b, 0x9a9a009a, 0xfbfb00fb, 0xb0b000b0,
0x74740074, 0x2b2b002b, 0xf0f000f0, 0x84840084,
0xdfdf00df, 0xcbcb00cb, 0x34340034, 0x76760076,
0x6d6d006d, 0xa9a900a9, 0xd1d100d1, 0x04040004,
0x14140014, 0x3a3a003a, 0xdede00de, 0x11110011,
0x32320032, 0x9c9c009c, 0x53530053, 0xf2f200f2,
0xfefe00fe, 0xcfcf00cf, 0xc3c300c3, 0x7a7a007a,
0x24240024, 0xe8e800e8, 0x60600060, 0x69690069,
0xaaaa00aa, 0xa0a000a0, 0xa1a100a1, 0x62620062,
0x54540054, 0x1e1e001e, 0xe0e000e0, 0x64640064,
0x10100010, 0x00000000, 0xa3a300a3, 0x75750075,
0x8a8a008a, 0xe6e600e6, 0x09090009, 0xdddd00dd,
0x87870087, 0x83830083, 0xcdcd00cd, 0x90900090,
0x73730073, 0xf6f600f6, 0x9d9d009d, 0xbfbf00bf,
0x52520052, 0xd8d800d8, 0xc8c800c8, 0xc6c600c6,
0x81810081, 0x6f6f006f, 0x13130013, 0x63630063,
0xe9e900e9, 0xa7a700a7, 0x9f9f009f, 0xbcbc00bc,
0x29290029, 0xf9f900f9, 0x2f2f002f, 0xb4b400b4,
0x78780078, 0x06060006, 0xe7e700e7, 0x71710071,
0xd4d400d4, 0xabab00ab, 0x88880088, 0x8d8d008d,
0x72720072, 0xb9b900b9, 0xf8f800f8, 0xacac00ac,
0x36360036, 0x2a2a002a, 0x3c3c003c, 0xf1f100f1,
0x40400040, 0xd3d300d3, 0xbbbb00bb, 0x43430043,
0x15150015, 0xadad00ad, 0x77770077, 0x80800080,
0x82820082, 0xecec00ec, 0x27270027, 0xe5e500e5,
0x85850085, 0x35350035, 0x0c0c000c, 0x41410041,
0xefef00ef, 0x93930093, 0x19190019, 0x21210021,
0x0e0e000e, 0x4e4e004e, 0x65650065, 0xbdbd00bd,
0xb8b800b8, 0x8f8f008f, 0xebeb00eb, 0xcece00ce,
0x30300030, 0x5f5f005f, 0xc5c500c5, 0x1a1a001a,
0xe1e100e1, 0xcaca00ca, 0x47470047, 0x3d3d003d,
0x01010001, 0xd6d600d6, 0x56560056, 0x4d4d004d,
0x0d0d000d, 0x66660066, 0xcccc00cc, 0x2d2d002d,
0x12120012, 0x20200020, 0xb1b100b1, 0x99990099,
0x4c4c004c, 0xc2c200c2, 0x7e7e007e, 0x05050005,
0xb7b700b7, 0x31310031, 0x17170017, 0xd7d700d7,
0x58580058, 0x61610061, 0x1b1b001b, 0x1c1c001c,
0x0f0f000f, 0x16160016, 0x18180018, 0x22220022,
0x44440044, 0xb2b200b2, 0xb5b500b5, 0x91910091,
0x08080008, 0xa8a800a8, 0xfcfc00fc, 0x50500050,
0xd0d000d0, 0x7d7d007d, 0x89890089, 0x97970097,
0x5b5b005b, 0x95950095, 0xffff00ff, 0xd2d200d2,
0xc4c400c4, 0x48480048, 0xf7f700f7, 0xdbdb00db,
0x03030003, 0xdada00da, 0x3f3f003f, 0x94940094,
0x5c5c005c, 0x02020002, 0x4a4a004a, 0x33330033,
0x67670067, 0xf3f300f3, 0x7f7f007f, 0xe2e200e2,
0x9b9b009b, 0x26260026, 0x37370037, 0x3b3b003b,
0x96960096, 0x4b4b004b, 0xbebe00be, 0x2e2e002e,
0x79790079, 0x8c8c008c, 0x6e6e006e, 0x8e8e008e,
0xf5f500f5, 0xb6b600b6, 0xfdfd00fd, 0x59590059,
0x98980098, 0x6a6a006a, 0x46460046, 0xbaba00ba,
0x25250025, 0x42420042, 0xa2a200a2, 0xfafa00fa,
0x07070007, 0x55550055, 0xeeee00ee, 0x0a0a000a,
0x49490049, 0x68680068, 0x38380038, 0xa4a400a4,
0x28280028, 0x7b7b007b, 0xc9c900c9, 0xc1c100c1,
0xe3e300e3, 0xf4f400f4, 0xc7c700c7, 0x9e9e009e,
};
#define CAMELLIA_MIN_KEY_SIZE 16
#define CAMELLIA_MAX_KEY_SIZE 32
#define CAMELLIA_BLOCK_SIZE 16
#define CAMELLIA_TABLE_BYTE_LEN 272
/*
* NB: L and R below stand for 'left' and 'right' as in written numbers.
* That is, in (xxxL,xxxR) pair xxxL holds most significant digits,
* _not_ least significant ones!
*/
/* key constants */
#define CAMELLIA_SIGMA1L (0xA09E667FL)
#define CAMELLIA_SIGMA1R (0x3BCC908BL)
#define CAMELLIA_SIGMA2L (0xB67AE858L)
#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
#define CAMELLIA_SIGMA3L (0xC6EF372FL)
#define CAMELLIA_SIGMA3R (0xE94F82BEL)
#define CAMELLIA_SIGMA4L (0x54FF53A5L)
#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
#define CAMELLIA_SIGMA5L (0x10E527FAL)
#define CAMELLIA_SIGMA5R (0xDE682D1DL)
#define CAMELLIA_SIGMA6L (0xB05688C2L)
#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
/*
* macros
*/
#define ROLDQ(ll, lr, rl, rr, w0, w1, bits) ({ \
w0 = ll; \
ll = (ll << bits) + (lr >> (32 - bits)); \
lr = (lr << bits) + (rl >> (32 - bits)); \
rl = (rl << bits) + (rr >> (32 - bits)); \
rr = (rr << bits) + (w0 >> (32 - bits)); \
})
#define ROLDQo32(ll, lr, rl, rr, w0, w1, bits) ({ \
w0 = ll; \
w1 = lr; \
ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
})
#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) ({ \
il = xl ^ kl; \
ir = xr ^ kr; \
t0 = il >> 16; \
t1 = ir >> 16; \
yl = camellia_sp1110[(u8)(ir)] \
^ camellia_sp0222[(u8)(t1 >> 8)] \
^ camellia_sp3033[(u8)(t1)] \
^ camellia_sp4404[(u8)(ir >> 8)]; \
yr = camellia_sp1110[(u8)(t0 >> 8)] \
^ camellia_sp0222[(u8)(t0)] \
^ camellia_sp3033[(u8)(il >> 8)] \
^ camellia_sp4404[(u8)(il)]; \
yl ^= yr; \
yr = ror32(yr, 8); \
yr ^= yl; \
})
#define SUBKEY_L(INDEX) (subkey[(INDEX)*2])
#define SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
static void camellia_setup_tail(u32 *subkey, u32 *subL, u32 *subR, int max)
{
u32 dw, tl, tr;
u32 kw4l, kw4r;
/* absorb kw2 to other subkeys */
/* round 2 */
subL[3] ^= subL[1]; subR[3] ^= subR[1];
/* round 4 */
subL[5] ^= subL[1]; subR[5] ^= subR[1];
/* round 6 */
subL[7] ^= subL[1]; subR[7] ^= subR[1];
subL[1] ^= subR[1] & ~subR[9];
dw = subL[1] & subL[9];
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
/* round 8 */
subL[11] ^= subL[1]; subR[11] ^= subR[1];
/* round 10 */
subL[13] ^= subL[1]; subR[13] ^= subR[1];
/* round 12 */
subL[15] ^= subL[1]; subR[15] ^= subR[1];
subL[1] ^= subR[1] & ~subR[17];
dw = subL[1] & subL[17];
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
/* round 14 */
subL[19] ^= subL[1]; subR[19] ^= subR[1];
/* round 16 */
subL[21] ^= subL[1]; subR[21] ^= subR[1];
/* round 18 */
subL[23] ^= subL[1]; subR[23] ^= subR[1];
if (max == 24) {
/* kw3 */
subL[24] ^= subL[1]; subR[24] ^= subR[1];
/* absorb kw4 to other subkeys */
kw4l = subL[25]; kw4r = subR[25];
} else {
subL[1] ^= subR[1] & ~subR[25];
dw = subL[1] & subL[25];
subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
/* round 20 */
subL[27] ^= subL[1]; subR[27] ^= subR[1];
/* round 22 */
subL[29] ^= subL[1]; subR[29] ^= subR[1];
/* round 24 */
subL[31] ^= subL[1]; subR[31] ^= subR[1];
/* kw3 */
subL[32] ^= subL[1]; subR[32] ^= subR[1];
/* absorb kw4 to other subkeys */
kw4l = subL[33]; kw4r = subR[33];
/* round 23 */
subL[30] ^= kw4l; subR[30] ^= kw4r;
/* round 21 */
subL[28] ^= kw4l; subR[28] ^= kw4r;
/* round 19 */
subL[26] ^= kw4l; subR[26] ^= kw4r;
kw4l ^= kw4r & ~subR[24];
dw = kw4l & subL[24];
kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
}
/* round 17 */
subL[22] ^= kw4l; subR[22] ^= kw4r;
/* round 15 */
subL[20] ^= kw4l; subR[20] ^= kw4r;
/* round 13 */
subL[18] ^= kw4l; subR[18] ^= kw4r;
kw4l ^= kw4r & ~subR[16];
dw = kw4l & subL[16];
kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
/* round 11 */
subL[14] ^= kw4l; subR[14] ^= kw4r;
/* round 9 */
subL[12] ^= kw4l; subR[12] ^= kw4r;
/* round 7 */
subL[10] ^= kw4l; subR[10] ^= kw4r;
kw4l ^= kw4r & ~subR[8];
dw = kw4l & subL[8];
kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
/* round 5 */
subL[6] ^= kw4l; subR[6] ^= kw4r;
/* round 3 */
subL[4] ^= kw4l; subR[4] ^= kw4r;
/* round 1 */
subL[2] ^= kw4l; subR[2] ^= kw4r;
/* kw1 */
subL[0] ^= kw4l; subR[0] ^= kw4r;
/* key XOR is end of F-function */
SUBKEY_L(0) = subL[0] ^ subL[2];/* kw1 */
SUBKEY_R(0) = subR[0] ^ subR[2];
SUBKEY_L(2) = subL[3]; /* round 1 */
SUBKEY_R(2) = subR[3];
SUBKEY_L(3) = subL[2] ^ subL[4]; /* round 2 */
SUBKEY_R(3) = subR[2] ^ subR[4];
SUBKEY_L(4) = subL[3] ^ subL[5]; /* round 3 */
SUBKEY_R(4) = subR[3] ^ subR[5];
SUBKEY_L(5) = subL[4] ^ subL[6]; /* round 4 */
SUBKEY_R(5) = subR[4] ^ subR[6];
SUBKEY_L(6) = subL[5] ^ subL[7]; /* round 5 */
SUBKEY_R(6) = subR[5] ^ subR[7];
tl = subL[10] ^ (subR[10] & ~subR[8]);
dw = tl & subL[8]; /* FL(kl1) */
tr = subR[10] ^ rol32(dw, 1);
SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
SUBKEY_R(7) = subR[6] ^ tr;
SUBKEY_L(8) = subL[8]; /* FL(kl1) */
SUBKEY_R(8) = subR[8];
SUBKEY_L(9) = subL[9]; /* FLinv(kl2) */
SUBKEY_R(9) = subR[9];
tl = subL[7] ^ (subR[7] & ~subR[9]);
dw = tl & subL[9]; /* FLinv(kl2) */
tr = subR[7] ^ rol32(dw, 1);
SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
SUBKEY_R(10) = tr ^ subR[11];
SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
SUBKEY_R(11) = subR[10] ^ subR[12];
SUBKEY_L(12) = subL[11] ^ subL[13]; /* round 9 */
SUBKEY_R(12) = subR[11] ^ subR[13];
SUBKEY_L(13) = subL[12] ^ subL[14]; /* round 10 */
SUBKEY_R(13) = subR[12] ^ subR[14];
SUBKEY_L(14) = subL[13] ^ subL[15]; /* round 11 */
SUBKEY_R(14) = subR[13] ^ subR[15];
tl = subL[18] ^ (subR[18] & ~subR[16]);
dw = tl & subL[16]; /* FL(kl3) */
tr = subR[18] ^ rol32(dw, 1);
SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
SUBKEY_R(15) = subR[14] ^ tr;
SUBKEY_L(16) = subL[16]; /* FL(kl3) */
SUBKEY_R(16) = subR[16];
SUBKEY_L(17) = subL[17]; /* FLinv(kl4) */
SUBKEY_R(17) = subR[17];
tl = subL[15] ^ (subR[15] & ~subR[17]);
dw = tl & subL[17]; /* FLinv(kl4) */
tr = subR[15] ^ rol32(dw, 1);
SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
SUBKEY_R(18) = tr ^ subR[19];
SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
SUBKEY_R(19) = subR[18] ^ subR[20];
SUBKEY_L(20) = subL[19] ^ subL[21]; /* round 15 */
SUBKEY_R(20) = subR[19] ^ subR[21];
SUBKEY_L(21) = subL[20] ^ subL[22]; /* round 16 */
SUBKEY_R(21) = subR[20] ^ subR[22];
SUBKEY_L(22) = subL[21] ^ subL[23]; /* round 17 */
SUBKEY_R(22) = subR[21] ^ subR[23];
if (max == 24) {
SUBKEY_L(23) = subL[22]; /* round 18 */
SUBKEY_R(23) = subR[22];
SUBKEY_L(24) = subL[24] ^ subL[23]; /* kw3 */
SUBKEY_R(24) = subR[24] ^ subR[23];
} else {
tl = subL[26] ^ (subR[26] & ~subR[24]);
dw = tl & subL[24]; /* FL(kl5) */
tr = subR[26] ^ rol32(dw, 1);
SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
SUBKEY_R(23) = subR[22] ^ tr;
SUBKEY_L(24) = subL[24]; /* FL(kl5) */
SUBKEY_R(24) = subR[24];
SUBKEY_L(25) = subL[25]; /* FLinv(kl6) */
SUBKEY_R(25) = subR[25];
tl = subL[23] ^ (subR[23] & ~subR[25]);
dw = tl & subL[25]; /* FLinv(kl6) */
tr = subR[23] ^ rol32(dw, 1);
SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
SUBKEY_R(26) = tr ^ subR[27];
SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
SUBKEY_R(27) = subR[26] ^ subR[28];
SUBKEY_L(28) = subL[27] ^ subL[29]; /* round 21 */
SUBKEY_R(28) = subR[27] ^ subR[29];
SUBKEY_L(29) = subL[28] ^ subL[30]; /* round 22 */
SUBKEY_R(29) = subR[28] ^ subR[30];
SUBKEY_L(30) = subL[29] ^ subL[31]; /* round 23 */
SUBKEY_R(30) = subR[29] ^ subR[31];
SUBKEY_L(31) = subL[30]; /* round 24 */
SUBKEY_R(31) = subR[30];
SUBKEY_L(32) = subL[32] ^ subL[31]; /* kw3 */
SUBKEY_R(32) = subR[32] ^ subR[31];
}
}
static void camellia_setup128(const unsigned char *key, u32 *subkey)
{
u32 kll, klr, krl, krr;
u32 il, ir, t0, t1, w0, w1;
u32 subL[26];
u32 subR[26];
/**
* k == kll || klr || krl || krr (|| is concatenation)
*/
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
/* generate KL dependent subkeys */
/* kw1 */
subL[0] = kll; subR[0] = klr;
/* kw2 */
subL[1] = krl; subR[1] = krr;
/* rotation left shift 15bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k3 */
subL[4] = kll; subR[4] = klr;
/* k4 */
subL[5] = krl; subR[5] = krr;
/* rotation left shift 15+30bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 30);
/* k7 */
subL[10] = kll; subR[10] = klr;
/* k8 */
subL[11] = krl; subR[11] = krr;
/* rotation left shift 15+30+15bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k10 */
subL[13] = krl; subR[13] = krr;
/* rotation left shift 15+30+15+17 bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 17);
/* kl3 */
subL[16] = kll; subR[16] = klr;
/* kl4 */
subL[17] = krl; subR[17] = krr;
/* rotation left shift 15+30+15+17+17 bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 17);
/* k13 */
subL[18] = kll; subR[18] = klr;
/* k14 */
subL[19] = krl; subR[19] = krr;
/* rotation left shift 15+30+15+17+17+17 bit */
ROLDQ(kll, klr, krl, krr, w0, w1, 17);
/* k17 */
subL[22] = kll; subR[22] = klr;
/* k18 */
subL[23] = krl; subR[23] = krr;
/* generate KA */
kll = subL[0]; klr = subR[0];
krl = subL[1]; krr = subR[1];
CAMELLIA_F(kll, klr,
CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
w0, w1, il, ir, t0, t1);
krl ^= w0; krr ^= w1;
CAMELLIA_F(krl, krr,
CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
kll, klr, il, ir, t0, t1);
/* current status == (kll, klr, w0, w1) */
CAMELLIA_F(kll, klr,
CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
krl, krr, il, ir, t0, t1);
krl ^= w0; krr ^= w1;
CAMELLIA_F(krl, krr,
CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
w0, w1, il, ir, t0, t1);
kll ^= w0; klr ^= w1;
/* generate KA dependent subkeys */
/* k1, k2 */
subL[2] = kll; subR[2] = klr;
subL[3] = krl; subR[3] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k5,k6 */
subL[6] = kll; subR[6] = klr;
subL[7] = krl; subR[7] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* kl1, kl2 */
subL[8] = kll; subR[8] = klr;
subL[9] = krl; subR[9] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k9 */
subL[12] = kll; subR[12] = klr;
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k11, k12 */
subL[14] = kll; subR[14] = klr;
subL[15] = krl; subR[15] = krr;
ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
/* k15, k16 */
subL[20] = kll; subR[20] = klr;
subL[21] = krl; subR[21] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 17);
/* kw3, kw4 */
subL[24] = kll; subR[24] = klr;
subL[25] = krl; subR[25] = krr;
camellia_setup_tail(subkey, subL, subR, 24);
}
static void camellia_setup256(const unsigned char *key, u32 *subkey)
{
u32 kll, klr, krl, krr; /* left half of key */
u32 krll, krlr, krrl, krrr; /* right half of key */
u32 il, ir, t0, t1, w0, w1; /* temporary variables */
u32 subL[34];
u32 subR[34];
/**
* key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
* (|| is concatenation)
*/
kll = get_unaligned_be32(key);
klr = get_unaligned_be32(key + 4);
krl = get_unaligned_be32(key + 8);
krr = get_unaligned_be32(key + 12);
krll = get_unaligned_be32(key + 16);
krlr = get_unaligned_be32(key + 20);
krrl = get_unaligned_be32(key + 24);
krrr = get_unaligned_be32(key + 28);
/* generate KL dependent subkeys */
/* kw1 */
subL[0] = kll; subR[0] = klr;
/* kw2 */
subL[1] = krl; subR[1] = krr;
ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
/* k9 */
subL[12] = kll; subR[12] = klr;
/* k10 */
subL[13] = krl; subR[13] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* kl3 */
subL[16] = kll; subR[16] = klr;
/* kl4 */
subL[17] = krl; subR[17] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 17);
/* k17 */
subL[22] = kll; subR[22] = klr;
/* k18 */
subL[23] = krl; subR[23] = krr;
ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
/* k23 */
subL[30] = kll; subR[30] = klr;
/* k24 */
subL[31] = krl; subR[31] = krr;
/* generate KR dependent subkeys */
ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
/* k3 */
subL[4] = krll; subR[4] = krlr;
/* k4 */
subL[5] = krrl; subR[5] = krrr;
ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
/* kl1 */
subL[8] = krll; subR[8] = krlr;
/* kl2 */
subL[9] = krrl; subR[9] = krrr;
ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
/* k13 */
subL[18] = krll; subR[18] = krlr;
/* k14 */
subL[19] = krrl; subR[19] = krrr;
ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
/* k19 */
subL[26] = krll; subR[26] = krlr;
/* k20 */
subL[27] = krrl; subR[27] = krrr;
ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
/* generate KA */
kll = subL[0] ^ krll; klr = subR[0] ^ krlr;
krl = subL[1] ^ krrl; krr = subR[1] ^ krrr;
CAMELLIA_F(kll, klr,
CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
w0, w1, il, ir, t0, t1);
krl ^= w0; krr ^= w1;
CAMELLIA_F(krl, krr,
CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
kll, klr, il, ir, t0, t1);
kll ^= krll; klr ^= krlr;
CAMELLIA_F(kll, klr,
CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
krl, krr, il, ir, t0, t1);
krl ^= w0 ^ krrl; krr ^= w1 ^ krrr;
CAMELLIA_F(krl, krr,
CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
w0, w1, il, ir, t0, t1);
kll ^= w0; klr ^= w1;
/* generate KB */
krll ^= kll; krlr ^= klr;
krrl ^= krl; krrr ^= krr;
CAMELLIA_F(krll, krlr,
CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R,
w0, w1, il, ir, t0, t1);
krrl ^= w0; krrr ^= w1;
CAMELLIA_F(krrl, krrr,
CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R,
w0, w1, il, ir, t0, t1);
krll ^= w0; krlr ^= w1;
/* generate KA dependent subkeys */
ROLDQ(kll, klr, krl, krr, w0, w1, 15);
/* k5 */
subL[6] = kll; subR[6] = klr;
/* k6 */
subL[7] = krl; subR[7] = krr;
ROLDQ(kll, klr, krl, krr, w0, w1, 30);
/* k11 */
subL[14] = kll; subR[14] = klr;
/* k12 */
subL[15] = krl; subR[15] = krr;
/* rotation left shift 32bit */
/* kl5 */
subL[24] = klr; subR[24] = krl;
/* kl6 */
subL[25] = krr; subR[25] = kll;
/* rotation left shift 49 from k11,k12 -> k21,k22 */
ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
/* k21 */
subL[28] = kll; subR[28] = klr;
/* k22 */
subL[29] = krl; subR[29] = krr;
/* generate KB dependent subkeys */
/* k1 */
subL[2] = krll; subR[2] = krlr;
/* k2 */
subL[3] = krrl; subR[3] = krrr;
ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
/* k7 */
subL[10] = krll; subR[10] = krlr;
/* k8 */
subL[11] = krrl; subR[11] = krrr;
ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
/* k15 */
subL[20] = krll; subR[20] = krlr;
/* k16 */
subL[21] = krrl; subR[21] = krrr;
ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
/* kw3 */
subL[32] = krll; subR[32] = krlr;
/* kw4 */
subL[33] = krrl; subR[33] = krrr;
camellia_setup_tail(subkey, subL, subR, 32);
}
static void camellia_setup192(const unsigned char *key, u32 *subkey)
{
unsigned char kk[32];
u32 krll, krlr, krrl, krrr;
memcpy(kk, key, 24);
memcpy((unsigned char *)&krll, key+16, 4);
memcpy((unsigned char *)&krlr, key+20, 4);
krrl = ~krll;
krrr = ~krlr;
memcpy(kk+24, (unsigned char *)&krrl, 4);
memcpy(kk+28, (unsigned char *)&krrr, 4);
camellia_setup256(kk, subkey);
}
/*
* Encrypt/decrypt
*/
#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) ({ \
t0 = kll; \
t2 = krr; \
t0 &= ll; \
t2 |= rr; \
rl ^= t2; \
lr ^= rol32(t0, 1); \
t3 = krl; \
t1 = klr; \
t3 &= rl; \
t1 |= lr; \
ll ^= t1; \
rr ^= rol32(t3, 1); \
})
#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) ({ \
yl ^= kl; \
yr ^= kr; \
ir = camellia_sp1110[(u8)xr]; \
il = camellia_sp1110[(u8)(xl >> 24)]; \
ir ^= camellia_sp0222[(u8)(xr >> 24)]; \
il ^= camellia_sp0222[(u8)(xl >> 16)]; \
ir ^= camellia_sp3033[(u8)(xr >> 16)]; \
il ^= camellia_sp3033[(u8)(xl >> 8)]; \
ir ^= camellia_sp4404[(u8)(xr >> 8)]; \
il ^= camellia_sp4404[(u8)xl]; \
ir ^= il; \
yl ^= ir; \
yr ^= ror32(il, 8) ^ ir; \
})
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
static void camellia_do_encrypt(const u32 *subkey, u32 *io, unsigned max)
{
u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(0);
io[1] ^= SUBKEY_R(0);
/* main iteration */
#define ROUNDS(i) ({ \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
io[0], io[1], il, ir); \
})
#define FLS(i) ({ \
CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
t0, t1, il, ir); \
})
ROUNDS(0);
FLS(8);
ROUNDS(8);
FLS(16);
ROUNDS(16);
if (max == 32) {
FLS(24);
ROUNDS(24);
}
#undef ROUNDS
#undef FLS
/* post whitening but kw4 */
io[2] ^= SUBKEY_L(max);
io[3] ^= SUBKEY_R(max);
/* NB: io[0],[1] should be swapped with [2],[3] by caller! */
}
static void camellia_do_decrypt(const u32 *subkey, u32 *io, unsigned i)
{
u32 il, ir, t0, t1; /* temporary variables */
/* pre whitening but absorb kw2 */
io[0] ^= SUBKEY_L(i);
io[1] ^= SUBKEY_R(i);
/* main iteration */
#define ROUNDS(i) ({ \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 7), SUBKEY_R(i + 7), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 6), SUBKEY_R(i + 6), \
io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 5), SUBKEY_R(i + 5), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 4), SUBKEY_R(i + 4), \
io[0], io[1], il, ir); \
CAMELLIA_ROUNDSM(io[0], io[1], \
SUBKEY_L(i + 3), SUBKEY_R(i + 3), \
io[2], io[3], il, ir); \
CAMELLIA_ROUNDSM(io[2], io[3], \
SUBKEY_L(i + 2), SUBKEY_R(i + 2), \
io[0], io[1], il, ir); \
})
#define FLS(i) ({ \
CAMELLIA_FLS(io[0], io[1], io[2], io[3], \
SUBKEY_L(i + 1), SUBKEY_R(i + 1), \
SUBKEY_L(i + 0), SUBKEY_R(i + 0), \
t0, t1, il, ir); \
})
if (i == 32) {
ROUNDS(24);
FLS(24);
}
ROUNDS(16);
FLS(16);
ROUNDS(8);
FLS(8);
ROUNDS(0);
#undef ROUNDS
#undef FLS
/* post whitening but kw4 */
io[2] ^= SUBKEY_L(0);
io[3] ^= SUBKEY_R(0);
/* NB: 0,1 should be swapped with 2,3 by caller! */
}
struct camellia_ctx {
int key_length;
u32 key_table[CAMELLIA_TABLE_BYTE_LEN / sizeof(u32)];
};
static int
camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
const unsigned char *key = (const unsigned char *)in_key;
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
cctx->key_length = key_len;
switch (key_len) {
case 16:
camellia_setup128(key, cctx->key_table);
break;
case 24:
camellia_setup192(key, cctx->key_table);
break;
case 32:
camellia_setup256(key, cctx->key_table);
break;
}
return 0;
}
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
unsigned int max;
u32 tmp[4];
tmp[0] = get_unaligned_be32(in);
tmp[1] = get_unaligned_be32(in + 4);
tmp[2] = get_unaligned_be32(in + 8);
tmp[3] = get_unaligned_be32(in + 12);
if (cctx->key_length == 16)
max = 24;
else
max = 32; /* for key lengths of 24 and 32 */
camellia_do_encrypt(cctx->key_table, tmp, max);
/* do_encrypt returns 0,1 swapped with 2,3 */
put_unaligned_be32(tmp[2], out);
put_unaligned_be32(tmp[3], out + 4);
put_unaligned_be32(tmp[0], out + 8);
put_unaligned_be32(tmp[1], out + 12);
}
static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
unsigned int max;
u32 tmp[4];
tmp[0] = get_unaligned_be32(in);
tmp[1] = get_unaligned_be32(in + 4);
tmp[2] = get_unaligned_be32(in + 8);
tmp[3] = get_unaligned_be32(in + 12);
if (cctx->key_length == 16)
max = 24;
else
max = 32; /* for key lengths of 24 and 32 */
camellia_do_decrypt(cctx->key_table, tmp, max);
/* do_decrypt returns 0,1 swapped with 2,3 */
put_unaligned_be32(tmp[2], out);
put_unaligned_be32(tmp[3], out + 4);
put_unaligned_be32(tmp[0], out + 8);
put_unaligned_be32(tmp[1], out + 12);
}
static struct crypto_alg camellia_alg = {
.cra_name = "camellia",
.cra_driver_name = "camellia-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
.cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
.cia_setkey = camellia_set_key,
.cia_encrypt = camellia_encrypt,
.cia_decrypt = camellia_decrypt
}
}
};
static int __init camellia_init(void)
{
return crypto_register_alg(&camellia_alg);
}
static void __exit camellia_fini(void)
{
crypto_unregister_alg(&camellia_alg);
}
subsys_initcall(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("camellia");
MODULE_ALIAS_CRYPTO("camellia-generic");
| linux-master | crypto/camellia_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Single-block cipher operations.
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2005 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
unsigned int keylen)
{
struct cipher_alg *cia = crypto_cipher_alg(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen)
{
struct cipher_alg *cia = crypto_cipher_alg(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize)
return -EINVAL;
if ((unsigned long)key & alignmask)
return setkey_unaligned(tfm, key, keylen);
return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen);
}
EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, CRYPTO_INTERNAL);
static inline void cipher_crypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src, bool enc)
{
unsigned long alignmask = crypto_cipher_alignmask(tfm);
struct cipher_alg *cia = crypto_cipher_alg(tfm);
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
enc ? cia->cia_encrypt : cia->cia_decrypt;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
unsigned int bs = crypto_cipher_blocksize(tfm);
u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, bs);
fn(crypto_cipher_tfm(tfm), tmp, tmp);
memcpy(dst, tmp, bs);
} else {
fn(crypto_cipher_tfm(tfm), dst, src);
}
}
void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
cipher_crypt_one(tfm, dst, src, true);
}
EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, CRYPTO_INTERNAL);
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
cipher_crypt_one(tfm, dst, src, false);
}
EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, CRYPTO_INTERNAL);
struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher)
{
struct crypto_tfm *tfm = crypto_cipher_tfm(cipher);
struct crypto_alg *alg = tfm->__crt_alg;
struct crypto_cipher *ncipher;
struct crypto_tfm *ntfm;
if (alg->cra_init)
return ERR_PTR(-ENOSYS);
if (unlikely(!crypto_mod_get(alg)))
return ERR_PTR(-ESTALE);
ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC);
if (IS_ERR(ntfm)) {
crypto_mod_put(alg);
return ERR_CAST(ntfm);
}
ntfm->crt_flags = tfm->crt_flags;
ncipher = __crypto_cipher_cast(ntfm);
return ncipher;
}
EXPORT_SYMBOL_GPL(crypto_clone_cipher);
| linux-master | crypto/cipher.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* CRC32C chksum
*
*@Article{castagnoli-crc,
* author = { Guy Castagnoli and Stefan Braeuer and Martin Herrman},
* title = {{Optimization of Cyclic Redundancy-Check Codes with 24
* and 32 Parity Bits}},
* journal = IEEE Transactions on Communication,
* year = {1993},
* volume = {41},
* number = {6},
* pages = {},
* month = {June},
*}
* Used by the iSCSI driver, possibly others, and derived from
* the iscsi-crc.c module of the linux-iscsi driver at
* http://linux-iscsi.sourceforge.net.
*
* Following the example of lib/crc32, this function is intended to be
* flexible and useful for all users. Modules that currently have their
* own crc32c, but hopefully may be able to use this one are:
* net/sctp (please add all your doco to here if you change to
* use this one!)
* <endoflist>
*
* Copyright (c) 2004 Cisco Systems, Inc.
* Copyright (c) 2008 Herbert Xu <[email protected]>
*/
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/crc32.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
u32 key;
};
struct chksum_desc_ctx {
u32 crc;
};
/*
* Steps through buffer one byte at a time, calculates reflected
* crc using table.
*/
static int chksum_init(struct shash_desc *desc)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = mctx->key;
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
if (keylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = get_unaligned_le32(key);
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = __crc32c_le(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~__crc32c_le(*crcp, data, len), out);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksum_finup(&mctx->key, data, length, out);
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static struct shash_alg alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = crc32c_cra_init,
}
};
static int __init crc32c_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit crc32c_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <[email protected]>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-generic");
| linux-master | crypto/crc32c_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
*
* Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
*
* Copyright (c) 2008 Adrian-Ken Rueegsegger <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include "ripemd.h"
struct rmd160_ctx {
u64 byte_count;
u32 state[5];
__le32 buffer[16];
};
#define K1 RMD_K1
#define K2 RMD_K2
#define K3 RMD_K3
#define K4 RMD_K4
#define K5 RMD_K5
#define KK1 RMD_K6
#define KK2 RMD_K7
#define KK3 RMD_K8
#define KK4 RMD_K9
#define KK5 RMD_K1
#define F1(x, y, z) (x ^ y ^ z) /* XOR */
#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
#define F3(x, y, z) ((x | ~y) ^ z)
#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
#define F5(x, y, z) (x ^ (y | ~z))
#define ROUND(a, b, c, d, e, f, k, x, s) { \
(a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
(a) = rol32((a), (s)) + (e); \
(c) = rol32((c), 10); \
}
static void rmd160_transform(u32 *state, const __le32 *in)
{
u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
/* Initialize left lane */
aa = state[0];
bb = state[1];
cc = state[2];
dd = state[3];
ee = state[4];
/* Initialize right lane */
aaa = state[0];
bbb = state[1];
ccc = state[2];
ddd = state[3];
eee = state[4];
/* round 1: left lane */
ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
/* round 2: left lane" */
ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
/* round 3: left lane" */
ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
/* round 4: left lane" */
ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
/* round 5: left lane" */
ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
/* round 1: right lane */
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
/* round 2: right lane */
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
/* round 3: right lane */
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
/* round 4: right lane */
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
/* round 5: right lane */
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
/* combine results */
ddd += cc + state[1]; /* final result for state[0] */
state[1] = state[2] + dd + eee;
state[2] = state[3] + ee + aaa;
state[3] = state[4] + aa + bbb;
state[4] = state[0] + bb + ccc;
state[0] = ddd;
}
static int rmd160_init(struct shash_desc *desc)
{
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
rctx->byte_count = 0;
rctx->state[0] = RMD_H0;
rctx->state[1] = RMD_H1;
rctx->state[2] = RMD_H2;
rctx->state[3] = RMD_H3;
rctx->state[4] = RMD_H4;
memset(rctx->buffer, 0, sizeof(rctx->buffer));
return 0;
}
static int rmd160_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
rctx->byte_count += len;
/* Enough space in buffer? If so copy and we're done */
if (avail > len) {
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, len);
goto out;
}
memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
data, avail);
rmd160_transform(rctx->state, rctx->buffer);
data += avail;
len -= avail;
while (len >= sizeof(rctx->buffer)) {
memcpy(rctx->buffer, data, sizeof(rctx->buffer));
rmd160_transform(rctx->state, rctx->buffer);
data += sizeof(rctx->buffer);
len -= sizeof(rctx->buffer);
}
memcpy(rctx->buffer, data, len);
out:
return 0;
}
/* Add padding and return the message digest. */
static int rmd160_final(struct shash_desc *desc, u8 *out)
{
struct rmd160_ctx *rctx = shash_desc_ctx(desc);
u32 i, index, padlen;
__le64 bits;
__le32 *dst = (__le32 *)out;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_le64(rctx->byte_count << 3);
/* Pad out to 56 mod 64 */
index = rctx->byte_count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
rmd160_update(desc, padding, padlen);
/* Append length */
rmd160_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_le32p(&rctx->state[i]);
/* Wipe context */
memset(rctx, 0, sizeof(*rctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = RMD160_DIGEST_SIZE,
.init = rmd160_init,
.update = rmd160_update,
.final = rmd160_final,
.descsize = sizeof(struct rmd160_ctx),
.base = {
.cra_name = "rmd160",
.cra_driver_name = "rmd160-generic",
.cra_blocksize = RMD160_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init rmd160_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit rmd160_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adrian-Ken Rueegsegger <[email protected]>");
MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
MODULE_ALIAS_CRYPTO("rmd160");
| linux-master | crypto/rmd160.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2021 IBM Corporation
*/
#include <linux/module.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/ecc.h>
#include <crypto/akcipher.h>
#include <crypto/ecdh.h>
#include <linux/asn1_decoder.h>
#include <linux/scatterlist.h>
#include "ecdsasignature.asn1.h"
struct ecc_ctx {
unsigned int curve_id;
const struct ecc_curve *curve;
bool pub_key_set;
u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */
u64 y[ECC_MAX_DIGITS];
struct ecc_point pub_key;
};
struct ecdsa_signature_ctx {
const struct ecc_curve *curve;
u64 r[ECC_MAX_DIGITS];
u64 s[ECC_MAX_DIGITS];
};
/*
* Get the r and s components of a signature from the X509 certificate.
*/
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t keylen = ndigits * sizeof(u64);
ssize_t diff = vlen - keylen;
const char *d = value;
u8 rs[ECC_MAX_BYTES];
if (!value || !vlen)
return -EINVAL;
/* diff = 0: 'value' has exacly the right size
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
* makes the value a positive integer; error on more
* diff < 0: 'value' is missing leading zeros, which we add
*/
if (diff > 0) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
diff--;
d++;
}
if (diff)
return -EINVAL;
}
if (-diff >= keylen)
return -EINVAL;
if (diff) {
/* leading zeros not given in 'value' */
memset(rs, 0, -diff);
}
memcpy(&rs[-diff], d, vlen);
ecc_swap_digits((u64 *)rs, dest, ndigits);
return 0;
}
int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
{
const struct ecc_curve *curve = ctx->curve;
unsigned int ndigits = curve->g.ndigits;
u64 s1[ECC_MAX_DIGITS];
u64 u1[ECC_MAX_DIGITS];
u64 u2[ECC_MAX_DIGITS];
u64 x1[ECC_MAX_DIGITS];
u64 y1[ECC_MAX_DIGITS];
struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits);
/* 0 < r < n and 0 < s < n */
if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 ||
vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0)
return -EBADMSG;
/* hash is given */
pr_devel("hash : %016llx %016llx ... %016llx\n",
hash[ndigits - 1], hash[ndigits - 2], hash[0]);
/* s1 = (s^-1) mod n */
vli_mod_inv(s1, s, curve->n, ndigits);
/* u1 = (hash * s1) mod n */
vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits);
/* u2 = (r * s1) mod n */
vli_mod_mult_slow(u2, r, s1, curve->n, ndigits);
/* res = u1*G + u2 * pub_key */
ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve);
/* res.x = res.x mod n (if res.x > order) */
if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
/* faster alternative for NIST p384, p256 & p192 */
vli_sub(res.x, res.x, curve->n, ndigits);
if (!vli_cmp(res.x, r, ndigits))
return 0;
return -EKEYREJECTED;
}
/*
* Verify an ECDSA signature.
*/
static int ecdsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
u8 rawhash[ECC_MAX_BYTES];
u64 hash[ECC_MAX_DIGITS];
unsigned char *buffer;
ssize_t diff;
int ret;
if (unlikely(!ctx->pub_key_set))
return -EINVAL;
buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, req->src_len + req->dst_len),
buffer, req->src_len + req->dst_len, 0);
ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
buffer, req->src_len);
if (ret < 0)
goto error;
/* if the hash is shorter then we will add leading zeros to fit to ndigits */
diff = keylen - req->dst_len;
if (diff >= 0) {
if (diff)
memset(rawhash, 0, diff);
memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
} else if (diff < 0) {
/* given hash is longer, we take the left-most bytes */
memcpy(&rawhash, buffer + req->src_len, keylen);
}
ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
error:
kfree(buffer);
return ret;
}
static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
{
ctx->curve_id = curve_id;
ctx->curve = ecc_get_curve(curve_id);
if (!ctx->curve)
return -EINVAL;
return 0;
}
static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx)
{
ctx->pub_key_set = false;
}
static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
{
unsigned int curve_id = ctx->curve_id;
int ret;
ecdsa_ecc_ctx_deinit(ctx);
ret = ecdsa_ecc_ctx_init(ctx, curve_id);
if (ret == 0)
ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y,
ctx->curve->g.ndigits);
return ret;
}
/*
* Set the public key given the raw uncompressed key data from an X509
* certificate. The key data contain the concatenated X and Y coordinates of
* the public key.
*/
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
const unsigned char *d = key;
const u64 *digits = (const u64 *)&d[1];
unsigned int ndigits;
int ret;
ret = ecdsa_ecc_ctx_reset(ctx);
if (ret < 0)
return ret;
if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
return -EINVAL;
/* we only accept uncompressed format indicated by '4' */
if (d[0] != 4)
return -EINVAL;
keylen--;
ndigits = (keylen >> 1) / sizeof(u64);
if (ndigits != ctx->curve->g.ndigits)
return -EINVAL;
ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
ctx->pub_key_set = ret == 0;
return ret;
}
static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
ecdsa_ecc_ctx_deinit(ctx);
}
static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
}
static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
}
static struct akcipher_alg ecdsa_nist_p384 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p384_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p384",
.cra_driver_name = "ecdsa-nist-p384-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
}
static struct akcipher_alg ecdsa_nist_p256 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p256_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p256",
.cra_driver_name = "ecdsa-nist-p256-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
}
static struct akcipher_alg ecdsa_nist_p192 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p192_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p192",
.cra_driver_name = "ecdsa-nist-p192-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static bool ecdsa_nist_p192_registered;
static int __init ecdsa_init(void)
{
int ret;
/* NIST p192 may not be available in FIPS mode */
ret = crypto_register_akcipher(&ecdsa_nist_p192);
ecdsa_nist_p192_registered = ret == 0;
ret = crypto_register_akcipher(&ecdsa_nist_p256);
if (ret)
goto nist_p256_error;
ret = crypto_register_akcipher(&ecdsa_nist_p384);
if (ret)
goto nist_p384_error;
return 0;
nist_p384_error:
crypto_unregister_akcipher(&ecdsa_nist_p256);
nist_p256_error:
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
return ret;
}
static void __exit ecdsa_exit(void)
{
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
crypto_unregister_akcipher(&ecdsa_nist_p256);
crypto_unregister_akcipher(&ecdsa_nist_p384);
}
subsys_initcall(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Berger <[email protected]>");
MODULE_DESCRIPTION("ECDSA generic algorithm");
MODULE_ALIAS_CRYPTO("ecdsa-generic");
| linux-master | crypto/ecdsa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* algif_skcipher: User-space interface for skcipher algorithms
*
* This file provides the user-space API for symmetric key ciphers.
*
* Copyright (c) 2010 Herbert Xu <[email protected]>
*
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
* filled by user space with the data submitted via sendmsg. Filling up the TX
* SGL does not cause a crypto operation -- the data will only be tracked by
* the kernel. Upon receipt of one recvmsg call, the caller must provide a
* buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
* TX buffers are extracted from the TX SGL into a separate SGL.
*
* After the completion of the crypto operation, the RX SGL and the cipher
* request is released. The extracted TX SGL parts are released together with
* the RX SGL release.
*/
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_chunksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
size_t len = 0;
if (!ctx->init || (ctx->more && ctx->used < bs)) {
err = af_alg_wait_for_data(sk, flags, bs);
if (err)
return err;
}
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_skcipher_reqsize(tfm));
if (IS_ERR(areq))
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
if (err)
goto free;
/*
* If more buffers are to be expected to be processed, process only
* full block size buffers.
*/
if (ctx->more || len < ctx->used)
len -= len % bs;
/*
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = len;
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
&ctx->wait);
}
free:
af_alg_free_resources(areq);
return err ? err : len;
}
static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
int ret = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
int err = _skcipher_recvmsg(sock, msg, ignored, flags);
/*
* This error covers -EIOCBQUEUED which implies that we can
* only handle one AIO request. If the caller wants to have
* multiple AIO requests in parallel, he must make multiple
* separate AIO calls.
*
* Also return the error if no data has been processed so far.
*/
if (err <= 0) {
if (err == -EIOCBQUEUED || !ret)
ret = err;
goto out;
}
ret += err;
}
out:
af_alg_wmem_wakeup(sk);
release_sock(sk);
return ret;
}
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
.recvmsg = skcipher_recvmsg,
.poll = af_alg_poll,
};
static int skcipher_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct crypto_skcipher *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
atomic_set(&ask->nokey_refcnt, 0);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = skcipher_check_key(sock);
if (err)
return err;
return skcipher_sendmsg(sock, msg, size);
}
static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = skcipher_check_key(sock);
if (err)
return err;
return skcipher_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_skcipher_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll = af_alg_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct crypto_skcipher *tfm = private;
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
memset(ctx, 0, len);
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct crypto_skcipher *tfm = private;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
.accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
.ops_nokey = &algif_skcipher_ops_nokey,
.name = "skcipher",
.owner = THIS_MODULE
};
static int __init algif_skcipher_init(void)
{
return af_alg_register_type(&algif_type_skcipher);
}
static void __exit algif_skcipher_exit(void)
{
int err = af_alg_unregister_type(&algif_type_skcipher);
BUG_ON(err);
}
module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
MODULE_LICENSE("GPL");
| linux-master | crypto/algif_skcipher.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* RNG operations.
*
* Copyright (c) 2008 Neil Horman <[email protected]>
* Copyright (c) 2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/rng.h>
#include <linux/atomic.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
static DEFINE_MUTEX(crypto_default_rng_lock);
struct crypto_rng *crypto_default_rng;
EXPORT_SYMBOL_GPL(crypto_default_rng);
static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
struct rng_alg *alg = crypto_rng_alg(tfm);
u8 *buf = NULL;
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&rng_get_stat(alg)->seed_cnt);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
err = -ENOMEM;
if (!buf)
goto out;
err = get_random_bytes_wait(buf, slen);
if (err)
goto free_buf;
seed = buf;
}
err = alg->seed(tfm, seed, slen);
free_buf:
kfree_sensitive(buf);
out:
return crypto_rng_errstat(alg, err);
}
EXPORT_SYMBOL_GPL(crypto_rng_reset);
static int crypto_rng_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
static unsigned int seedsize(struct crypto_alg *alg)
{
struct rng_alg *ralg = container_of(alg, struct rng_alg, base);
return ralg->seedsize;
}
static int __maybe_unused crypto_rng_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
memset(&rrng, 0, sizeof(rrng));
strscpy(rrng.type, "rng", sizeof(rrng.type));
rrng.seedsize = seedsize(alg);
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : rng\n");
seq_printf(m, "seedsize : %u\n", seedsize(alg));
}
static int __maybe_unused crypto_rng_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct rng_alg *rng = __crypto_rng_alg(alg);
struct crypto_istat_rng *istat;
struct crypto_stat_rng rrng;
istat = rng_get_stat(rng);
memset(&rrng, 0, sizeof(rrng));
strscpy(rrng.type, "rng", sizeof(rrng.type));
rrng.stat_generate_cnt = atomic64_read(&istat->generate_cnt);
rrng.stat_generate_tlen = atomic64_read(&istat->generate_tlen);
rrng.stat_seed_cnt = atomic64_read(&istat->seed_cnt);
rrng.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
}
static const struct crypto_type crypto_rng_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_rng_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_rng_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_rng_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_rng_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_RNG,
.tfmsize = offsetof(struct crypto_rng, base),
};
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_rng);
int crypto_get_default_rng(void)
{
struct crypto_rng *rng;
int err;
mutex_lock(&crypto_default_rng_lock);
if (!crypto_default_rng) {
rng = crypto_alloc_rng("stdrng", 0, 0);
err = PTR_ERR(rng);
if (IS_ERR(rng))
goto unlock;
err = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
if (err) {
crypto_free_rng(rng);
goto unlock;
}
crypto_default_rng = rng;
}
crypto_default_rng_refcnt++;
err = 0;
unlock:
mutex_unlock(&crypto_default_rng_lock);
return err;
}
EXPORT_SYMBOL_GPL(crypto_get_default_rng);
void crypto_put_default_rng(void)
{
mutex_lock(&crypto_default_rng_lock);
crypto_default_rng_refcnt--;
mutex_unlock(&crypto_default_rng_lock);
}
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
int crypto_del_default_rng(void)
{
int err = -EBUSY;
mutex_lock(&crypto_default_rng_lock);
if (crypto_default_rng_refcnt)
goto out;
crypto_free_rng(crypto_default_rng);
crypto_default_rng = NULL;
err = 0;
out:
mutex_unlock(&crypto_default_rng_lock);
return err;
}
EXPORT_SYMBOL_GPL(crypto_del_default_rng);
#endif
int crypto_register_rng(struct rng_alg *alg)
{
struct crypto_istat_rng *istat = rng_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->seedsize > PAGE_SIZE / 8)
return -EINVAL;
base->cra_type = &crypto_rng_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_rng);
void crypto_unregister_rng(struct rng_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_rng);
int crypto_register_rngs(struct rng_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_rng(algs + i);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_rng(algs + i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_rngs);
void crypto_unregister_rngs(struct rng_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_rng(algs + i);
}
EXPORT_SYMBOL_GPL(crypto_unregister_rngs);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Random Number Generator");
| linux-master | crypto/rng.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SM2 asymmetric public-key algorithm
* as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
* described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
*
* Copyright (c) 2020, Alibaba Group.
* Authors: Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <linux/mpi.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/rng.h>
#include <crypto/sm2.h>
#include "sm2signature.asn1.h"
/* The default user id as specified in GM/T 0009-2012 */
#define SM2_DEFAULT_USERID "1234567812345678"
#define SM2_DEFAULT_USERID_LEN 16
#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8)
struct ecc_domain_parms {
const char *desc; /* Description of the curve. */
unsigned int nbits; /* Number of bits. */
unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */
/* The model describing this curve. This is mainly used to select
* the group equation.
*/
enum gcry_mpi_ec_models model;
/* The actual ECC dialect used. This is used for curve specific
* optimizations and to select encodings etc.
*/
enum ecc_dialects dialect;
const char *p; /* The prime defining the field. */
const char *a, *b; /* The coefficients. For Twisted Edwards
* Curves b is used for d. For Montgomery
* Curves (a,b) has ((A-2)/4,B^-1).
*/
const char *n; /* The order of the base point. */
const char *g_x, *g_y; /* Base point. */
unsigned int h; /* Cofactor. */
};
static const struct ecc_domain_parms sm2_ecp = {
.desc = "sm2p256v1",
.nbits = 256,
.fips = 0,
.model = MPI_EC_WEIERSTRASS,
.dialect = ECC_DIALECT_STANDARD,
.p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff",
.a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc",
.b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93",
.n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123",
.g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7",
.g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0",
.h = 1
};
static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
const void *key, unsigned int keylen);
static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
{
const struct ecc_domain_parms *ecp = &sm2_ecp;
MPI p, a, b;
MPI x, y;
int rc = -EINVAL;
p = mpi_scanval(ecp->p);
a = mpi_scanval(ecp->a);
b = mpi_scanval(ecp->b);
if (!p || !a || !b)
goto free_p;
x = mpi_scanval(ecp->g_x);
y = mpi_scanval(ecp->g_y);
if (!x || !y)
goto free;
rc = -ENOMEM;
ec->Q = mpi_point_new(0);
if (!ec->Q)
goto free;
/* mpi_ec_setup_elliptic_curve */
ec->G = mpi_point_new(0);
if (!ec->G) {
mpi_point_release(ec->Q);
goto free;
}
mpi_set(ec->G->x, x);
mpi_set(ec->G->y, y);
mpi_set_ui(ec->G->z, 1);
rc = -EINVAL;
ec->n = mpi_scanval(ecp->n);
if (!ec->n) {
mpi_point_release(ec->Q);
mpi_point_release(ec->G);
goto free;
}
ec->h = ecp->h;
ec->name = ecp->desc;
mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b);
rc = 0;
free:
mpi_free(x);
mpi_free(y);
free_p:
mpi_free(p);
mpi_free(a);
mpi_free(b);
return rc;
}
static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
{
mpi_ec_deinit(ec);
memset(ec, 0, sizeof(*ec));
}
/* RESULT must have been initialized and is set on success to the
* point given by VALUE.
*/
static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
{
int rc;
size_t n;
unsigned char *buf;
MPI x, y;
n = MPI_NBYTES(value);
buf = kmalloc(n, GFP_KERNEL);
if (!buf)
return -ENOMEM;
rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
if (rc)
goto err_freebuf;
rc = -EINVAL;
if (n < 1 || ((n - 1) % 2))
goto err_freebuf;
/* No support for point compression */
if (*buf != 0x4)
goto err_freebuf;
rc = -ENOMEM;
n = (n - 1) / 2;
x = mpi_read_raw_data(buf + 1, n);
if (!x)
goto err_freebuf;
y = mpi_read_raw_data(buf + 1 + n, n);
if (!y)
goto err_freex;
mpi_normalize(x);
mpi_normalize(y);
mpi_set(result->x, x);
mpi_set(result->y, y);
mpi_set_ui(result->z, 1);
rc = 0;
mpi_free(y);
err_freex:
mpi_free(x);
err_freebuf:
kfree(buf);
return rc;
}
struct sm2_signature_ctx {
MPI sig_r;
MPI sig_s;
};
int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct sm2_signature_ctx *sig = context;
if (!value || !vlen)
return -EINVAL;
sig->sig_r = mpi_read_raw_data(value, vlen);
if (!sig->sig_r)
return -ENOMEM;
return 0;
}
int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct sm2_signature_ctx *sig = context;
if (!value || !vlen)
return -EINVAL;
sig->sig_s = mpi_read_raw_data(value, vlen);
if (!sig->sig_s)
return -ENOMEM;
return 0;
}
static int sm2_z_digest_update(struct shash_desc *desc,
MPI m, unsigned int pbytes)
{
static const unsigned char zero[32];
unsigned char *in;
unsigned int inlen;
int err;
in = mpi_get_buffer(m, &inlen, NULL);
if (!in)
return -EINVAL;
if (inlen < pbytes) {
/* padding with zero */
err = crypto_shash_update(desc, zero, pbytes - inlen) ?:
crypto_shash_update(desc, in, inlen);
} else if (inlen > pbytes) {
/* skip the starting zero */
err = crypto_shash_update(desc, in + inlen - pbytes, pbytes);
} else {
err = crypto_shash_update(desc, in, inlen);
}
kfree(in);
return err;
}
static int sm2_z_digest_update_point(struct shash_desc *desc,
MPI_POINT point, struct mpi_ec_ctx *ec,
unsigned int pbytes)
{
MPI x, y;
int ret = -EINVAL;
x = mpi_new(0);
y = mpi_new(0);
ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL :
sm2_z_digest_update(desc, x, pbytes) ?:
sm2_z_digest_update(desc, y, pbytes);
mpi_free(x);
mpi_free(y);
return ret;
}
int sm2_compute_z_digest(struct shash_desc *desc,
const void *key, unsigned int keylen, void *dgst)
{
struct mpi_ec_ctx *ec;
unsigned int bits_len;
unsigned int pbytes;
u8 entl[2];
int err;
ec = kmalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
return -ENOMEM;
err = sm2_ec_ctx_init(ec);
if (err)
goto out_free_ec;
err = __sm2_set_pub_key(ec, key, keylen);
if (err)
goto out_deinit_ec;
bits_len = SM2_DEFAULT_USERID_LEN * 8;
entl[0] = bits_len >> 8;
entl[1] = bits_len & 0xff;
pbytes = MPI_NBYTES(ec->p);
/* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
err = crypto_shash_init(desc);
if (err)
goto out_deinit_ec;
err = crypto_shash_update(desc, entl, 2);
if (err)
goto out_deinit_ec;
err = crypto_shash_update(desc, SM2_DEFAULT_USERID,
SM2_DEFAULT_USERID_LEN);
if (err)
goto out_deinit_ec;
err = sm2_z_digest_update(desc, ec->a, pbytes) ?:
sm2_z_digest_update(desc, ec->b, pbytes) ?:
sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?:
sm2_z_digest_update_point(desc, ec->Q, ec, pbytes);
if (err)
goto out_deinit_ec;
err = crypto_shash_final(desc, dgst);
out_deinit_ec:
sm2_ec_ctx_deinit(ec);
out_free_ec:
kfree(ec);
return err;
}
EXPORT_SYMBOL_GPL(sm2_compute_z_digest);
static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s)
{
int rc = -EINVAL;
struct gcry_mpi_point sG, tP;
MPI t = NULL;
MPI x1 = NULL, y1 = NULL;
mpi_point_init(&sG);
mpi_point_init(&tP);
x1 = mpi_new(0);
y1 = mpi_new(0);
t = mpi_new(0);
/* r, s in [1, n-1] */
if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 ||
mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) {
goto leave;
}
/* t = (r + s) % n, t == 0 */
mpi_addm(t, sig_r, sig_s, ec->n);
if (mpi_cmp_ui(t, 0) == 0)
goto leave;
/* sG + tP = (x1, y1) */
rc = -EBADMSG;
mpi_ec_mul_point(&sG, sig_s, ec->G, ec);
mpi_ec_mul_point(&tP, t, ec->Q, ec);
mpi_ec_add_points(&sG, &sG, &tP, ec);
if (mpi_ec_get_affine(x1, y1, &sG, ec))
goto leave;
/* R = (e + x1) % n */
mpi_addm(t, hash, x1, ec->n);
/* check R == r */
rc = -EKEYREJECTED;
if (mpi_cmp(t, sig_r))
goto leave;
rc = 0;
leave:
mpi_point_free_parts(&sG);
mpi_point_free_parts(&tP);
mpi_free(x1);
mpi_free(y1);
mpi_free(t);
return rc;
}
static int sm2_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
unsigned char *buffer;
struct sm2_signature_ctx sig;
MPI hash;
int ret;
if (unlikely(!ec->Q))
return -EINVAL;
buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, req->src_len + req->dst_len),
buffer, req->src_len + req->dst_len, 0);
sig.sig_r = NULL;
sig.sig_s = NULL;
ret = asn1_ber_decoder(&sm2signature_decoder, &sig,
buffer, req->src_len);
if (ret)
goto error;
ret = -ENOMEM;
hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len);
if (!hash)
goto error;
ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s);
mpi_free(hash);
error:
mpi_free(sig.sig_r);
mpi_free(sig.sig_s);
kfree(buffer);
return ret;
}
static int sm2_set_pub_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
return __sm2_set_pub_key(ec, key, keylen);
}
static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
const void *key, unsigned int keylen)
{
MPI a;
int rc;
/* include the uncompressed flag '0x04' */
a = mpi_read_raw_data(key, keylen);
if (!a)
return -ENOMEM;
mpi_normalize(a);
rc = sm2_ecc_os2ec(ec->Q, a);
mpi_free(a);
return rc;
}
static unsigned int sm2_max_size(struct crypto_akcipher *tfm)
{
/* Unlimited max size */
return PAGE_SIZE;
}
static int sm2_init_tfm(struct crypto_akcipher *tfm)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
return sm2_ec_ctx_init(ec);
}
static void sm2_exit_tfm(struct crypto_akcipher *tfm)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
sm2_ec_ctx_deinit(ec);
}
static struct akcipher_alg sm2 = {
.verify = sm2_verify,
.set_pub_key = sm2_set_pub_key,
.max_size = sm2_max_size,
.init = sm2_init_tfm,
.exit = sm2_exit_tfm,
.base = {
.cra_name = "sm2",
.cra_driver_name = "sm2-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct mpi_ec_ctx),
},
};
static int __init sm2_init(void)
{
return crypto_register_akcipher(&sm2);
}
static void __exit sm2_exit(void)
{
crypto_unregister_akcipher(&sm2);
}
subsys_initcall(sm2_init);
module_exit(sm2_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tianjia Zhang <[email protected]>");
MODULE_DESCRIPTION("SM2 generic algorithm");
MODULE_ALIAS_CRYPTO("sm2-generic");
| linux-master | crypto/sm2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* X.509 certificate parser
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "X.509: "fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "x509_parser.h"
#include "x509.asn1.h"
#include "x509_akid.asn1.h"
struct x509_parse_context {
struct x509_certificate *cert; /* Certificate being constructed */
unsigned long data; /* Start of data */
const void *key; /* Key data */
size_t key_size; /* Size of key data */
const void *params; /* Key parameters */
size_t params_size; /* Size of key parameters */
enum OID key_algo; /* Algorithm used by the cert's key */
enum OID last_oid; /* Last OID encountered */
enum OID sig_algo; /* Algorithm used to sign the cert */
u8 o_size; /* Size of organizationName (O) */
u8 cn_size; /* Size of commonName (CN) */
u8 email_size; /* Size of emailAddress */
u16 o_offset; /* Offset of organizationName (O) */
u16 cn_offset; /* Offset of commonName (CN) */
u16 email_offset; /* Offset of emailAddress */
unsigned raw_akid_size;
const void *raw_akid; /* Raw authorityKeyId in ASN.1 */
const void *akid_raw_issuer; /* Raw directoryName in authorityKeyId */
unsigned akid_raw_issuer_size;
};
/*
* Free an X.509 certificate
*/
void x509_free_certificate(struct x509_certificate *cert)
{
if (cert) {
public_key_free(cert->pub);
public_key_signature_free(cert->sig);
kfree(cert->issuer);
kfree(cert->subject);
kfree(cert->id);
kfree(cert->skid);
kfree(cert);
}
}
EXPORT_SYMBOL_GPL(x509_free_certificate);
/*
* Parse an X.509 certificate
*/
struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
{
struct x509_certificate *cert;
struct x509_parse_context *ctx;
struct asymmetric_key_id *kid;
long ret;
ret = -ENOMEM;
cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL);
if (!cert)
goto error_no_cert;
cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
if (!cert->pub)
goto error_no_ctx;
cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL);
if (!cert->sig)
goto error_no_ctx;
ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL);
if (!ctx)
goto error_no_ctx;
ctx->cert = cert;
ctx->data = (unsigned long)data;
/* Attempt to decode the certificate */
ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen);
if (ret < 0)
goto error_decode;
/* Decode the AuthorityKeyIdentifier */
if (ctx->raw_akid) {
pr_devel("AKID: %u %*phN\n",
ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid);
ret = asn1_ber_decoder(&x509_akid_decoder, ctx,
ctx->raw_akid, ctx->raw_akid_size);
if (ret < 0) {
pr_warn("Couldn't decode AuthKeyIdentifier\n");
goto error_decode;
}
}
ret = -ENOMEM;
cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
if (!cert->pub->key)
goto error_decode;
cert->pub->keylen = ctx->key_size;
cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
if (!cert->pub->params)
goto error_decode;
cert->pub->paramlen = ctx->params_size;
cert->pub->algo = ctx->key_algo;
/* Grab the signature bits */
ret = x509_get_sig_params(cert);
if (ret < 0)
goto error_decode;
/* Generate cert issuer + serial number key ID */
kid = asymmetric_key_generate_id(cert->raw_serial,
cert->raw_serial_size,
cert->raw_issuer,
cert->raw_issuer_size);
if (IS_ERR(kid)) {
ret = PTR_ERR(kid);
goto error_decode;
}
cert->id = kid;
/* Detect self-signed certificates */
ret = x509_check_for_self_signed(cert);
if (ret < 0)
goto error_decode;
kfree(ctx);
return cert;
error_decode:
kfree(ctx);
error_no_ctx:
x509_free_certificate(cert);
error_no_cert:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(x509_cert_parse);
/*
* Note an OID when we find one for later processing when we know how
* to interpret it.
*/
int x509_note_OID(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
ctx->last_oid = look_up_OID(value, vlen);
if (ctx->last_oid == OID__NR) {
char buffer[50];
sprint_oid(value, vlen, buffer, sizeof(buffer));
pr_debug("Unknown OID: [%lu] %s\n",
(unsigned long)value - ctx->data, buffer);
}
return 0;
}
/*
* Save the position of the TBS data so that we can check the signature over it
* later.
*/
int x509_note_tbs_certificate(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
pr_debug("x509_note_tbs_certificate(,%zu,%02x,%ld,%zu)!\n",
hdrlen, tag, (unsigned long)value - ctx->data, vlen);
ctx->cert->tbs = value - hdrlen;
ctx->cert->tbs_size = vlen + hdrlen;
return 0;
}
/*
* Record the algorithm that was used to sign this certificate.
*/
int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
pr_debug("PubKey Algo: %u\n", ctx->last_oid);
switch (ctx->last_oid) {
case OID_md2WithRSAEncryption:
case OID_md3WithRSAEncryption:
default:
return -ENOPKG; /* Unsupported combination */
case OID_md4WithRSAEncryption:
ctx->cert->sig->hash_algo = "md4";
goto rsa_pkcs1;
case OID_sha1WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha1";
goto rsa_pkcs1;
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
case OID_sha384WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha384";
goto rsa_pkcs1;
case OID_sha512WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha512";
goto rsa_pkcs1;
case OID_sha224WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
case OID_id_ecdsa_with_sha1:
ctx->cert->sig->hash_algo = "sha1";
goto ecdsa;
case OID_id_ecdsa_with_sha224:
ctx->cert->sig->hash_algo = "sha224";
goto ecdsa;
case OID_id_ecdsa_with_sha256:
ctx->cert->sig->hash_algo = "sha256";
goto ecdsa;
case OID_id_ecdsa_with_sha384:
ctx->cert->sig->hash_algo = "sha384";
goto ecdsa;
case OID_id_ecdsa_with_sha512:
ctx->cert->sig->hash_algo = "sha512";
goto ecdsa;
case OID_gost2012Signature256:
ctx->cert->sig->hash_algo = "streebog256";
goto ecrdsa;
case OID_gost2012Signature512:
ctx->cert->sig->hash_algo = "streebog512";
goto ecrdsa;
case OID_SM2_with_SM3:
ctx->cert->sig->hash_algo = "sm3";
goto sm2;
}
rsa_pkcs1:
ctx->cert->sig->pkey_algo = "rsa";
ctx->cert->sig->encoding = "pkcs1";
ctx->sig_algo = ctx->last_oid;
return 0;
ecrdsa:
ctx->cert->sig->pkey_algo = "ecrdsa";
ctx->cert->sig->encoding = "raw";
ctx->sig_algo = ctx->last_oid;
return 0;
sm2:
ctx->cert->sig->pkey_algo = "sm2";
ctx->cert->sig->encoding = "raw";
ctx->sig_algo = ctx->last_oid;
return 0;
ecdsa:
ctx->cert->sig->pkey_algo = "ecdsa";
ctx->cert->sig->encoding = "x962";
ctx->sig_algo = ctx->last_oid;
return 0;
}
/*
* Note the whereabouts and type of the signature.
*/
int x509_note_signature(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
pr_debug("Signature: alg=%u, size=%zu\n", ctx->last_oid, vlen);
/*
* In X.509 certificates, the signature's algorithm is stored in two
* places: inside the TBSCertificate (the data that is signed), and
* alongside the signature. These *must* match.
*/
if (ctx->last_oid != ctx->sig_algo) {
pr_warn("signatureAlgorithm (%u) differs from tbsCertificate.signature (%u)\n",
ctx->last_oid, ctx->sig_algo);
return -EINVAL;
}
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
value++;
vlen--;
}
ctx->cert->raw_sig = value;
ctx->cert->raw_sig_size = vlen;
return 0;
}
/*
* Note the certificate serial number
*/
int x509_note_serial(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
ctx->cert->raw_serial = value;
ctx->cert->raw_serial_size = vlen;
return 0;
}
/*
* Note some of the name segments from which we'll fabricate a name.
*/
int x509_extract_name_segment(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
switch (ctx->last_oid) {
case OID_commonName:
ctx->cn_size = vlen;
ctx->cn_offset = (unsigned long)value - ctx->data;
break;
case OID_organizationName:
ctx->o_size = vlen;
ctx->o_offset = (unsigned long)value - ctx->data;
break;
case OID_email_address:
ctx->email_size = vlen;
ctx->email_offset = (unsigned long)value - ctx->data;
break;
default:
break;
}
return 0;
}
/*
* Fabricate and save the issuer and subject names
*/
static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen,
unsigned char tag,
char **_name, size_t vlen)
{
const void *name, *data = (const void *)ctx->data;
size_t namesize;
char *buffer;
if (*_name)
return -EINVAL;
/* Empty name string if no material */
if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) {
buffer = kmalloc(1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
buffer[0] = 0;
goto done;
}
if (ctx->cn_size && ctx->o_size) {
/* Consider combining O and CN, but use only the CN if it is
* prefixed by the O, or a significant portion thereof.
*/
namesize = ctx->cn_size;
name = data + ctx->cn_offset;
if (ctx->cn_size >= ctx->o_size &&
memcmp(data + ctx->cn_offset, data + ctx->o_offset,
ctx->o_size) == 0)
goto single_component;
if (ctx->cn_size >= 7 &&
ctx->o_size >= 7 &&
memcmp(data + ctx->cn_offset, data + ctx->o_offset, 7) == 0)
goto single_component;
buffer = kmalloc(ctx->o_size + 2 + ctx->cn_size + 1,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memcpy(buffer,
data + ctx->o_offset, ctx->o_size);
buffer[ctx->o_size + 0] = ':';
buffer[ctx->o_size + 1] = ' ';
memcpy(buffer + ctx->o_size + 2,
data + ctx->cn_offset, ctx->cn_size);
buffer[ctx->o_size + 2 + ctx->cn_size] = 0;
goto done;
} else if (ctx->cn_size) {
namesize = ctx->cn_size;
name = data + ctx->cn_offset;
} else if (ctx->o_size) {
namesize = ctx->o_size;
name = data + ctx->o_offset;
} else {
namesize = ctx->email_size;
name = data + ctx->email_offset;
}
single_component:
buffer = kmalloc(namesize + 1, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
memcpy(buffer, name, namesize);
buffer[namesize] = 0;
done:
*_name = buffer;
ctx->cn_size = 0;
ctx->o_size = 0;
ctx->email_size = 0;
return 0;
}
int x509_note_issuer(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
ctx->cert->raw_issuer = value;
ctx->cert->raw_issuer_size = vlen;
if (!ctx->cert->sig->auth_ids[2]) {
kid = asymmetric_key_generate_id(value, vlen, "", 0);
if (IS_ERR(kid))
return PTR_ERR(kid);
ctx->cert->sig->auth_ids[2] = kid;
}
return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen);
}
int x509_note_subject(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
ctx->cert->raw_subject = value;
ctx->cert->raw_subject_size = vlen;
return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->subject, vlen);
}
/*
* Extract the parameters for the public key
*/
int x509_note_params(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
/*
* AlgorithmIdentifier is used three times in the x509, we should skip
* first and ignore third, using second one which is after subject and
* before subjectPublicKey.
*/
if (!ctx->cert->raw_subject || ctx->key)
return 0;
ctx->params = value - hdrlen;
ctx->params_size = vlen + hdrlen;
return 0;
}
/*
* Extract the data for the public key algorithm
*/
int x509_extract_key_data(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
enum OID oid;
ctx->key_algo = ctx->last_oid;
switch (ctx->last_oid) {
case OID_rsaEncryption:
ctx->cert->pub->pkey_algo = "rsa";
break;
case OID_gost2012PKey256:
case OID_gost2012PKey512:
ctx->cert->pub->pkey_algo = "ecrdsa";
break;
case OID_sm2:
ctx->cert->pub->pkey_algo = "sm2";
break;
case OID_id_ecPublicKey:
if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
return -EBADMSG;
switch (oid) {
case OID_sm2:
ctx->cert->pub->pkey_algo = "sm2";
break;
case OID_id_prime192v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
break;
case OID_id_prime256v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p256";
break;
case OID_id_ansip384r1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
break;
default:
return -ENOPKG;
}
break;
default:
return -ENOPKG;
}
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
ctx->key = value + 1;
ctx->key_size = vlen - 1;
return 0;
}
/* The keyIdentifier in AuthorityKeyIdentifier SEQUENCE is tag(CONT,PRIM,0) */
#define SEQ_TAG_KEYID (ASN1_CONT << 6)
/*
* Process certificate extensions that are used to qualify the certificate.
*/
int x509_process_extension(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
const unsigned char *v = value;
pr_debug("Extension: %u\n", ctx->last_oid);
if (ctx->last_oid == OID_subjectKeyIdentifier) {
/* Get hold of the key fingerprint */
if (ctx->cert->skid || vlen < 3)
return -EBADMSG;
if (v[0] != ASN1_OTS || v[1] != vlen - 2)
return -EBADMSG;
v += 2;
vlen -= 2;
ctx->cert->raw_skid_size = vlen;
ctx->cert->raw_skid = v;
kid = asymmetric_key_generate_id(v, vlen, "", 0);
if (IS_ERR(kid))
return PTR_ERR(kid);
ctx->cert->skid = kid;
pr_debug("subjkeyid %*phN\n", kid->len, kid->data);
return 0;
}
if (ctx->last_oid == OID_keyUsage) {
/*
* Get hold of the keyUsage bit string
* v[1] is the encoding size
* (Expect either 0x02 or 0x03, making it 1 or 2 bytes)
* v[2] is the number of unused bits in the bit string
* (If >= 3 keyCertSign is missing when v[1] = 0x02)
* v[3] and possibly v[4] contain the bit string
*
* From RFC 5280 4.2.1.3:
* 0x04 is where keyCertSign lands in this bit string
* 0x80 is where digitalSignature lands in this bit string
*/
if (v[0] != ASN1_BTS)
return -EBADMSG;
if (vlen < 4)
return -EBADMSG;
if (v[2] >= 8)
return -EBADMSG;
if (v[3] & 0x80)
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_DIGITALSIG;
if (v[1] == 0x02 && v[2] <= 2 && (v[3] & 0x04))
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN;
else if (vlen > 4 && v[1] == 0x03 && (v[3] & 0x04))
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN;
return 0;
}
if (ctx->last_oid == OID_authorityKeyIdentifier) {
/* Get hold of the CA key fingerprint */
ctx->raw_akid = v;
ctx->raw_akid_size = vlen;
return 0;
}
if (ctx->last_oid == OID_basicConstraints) {
/*
* Get hold of the basicConstraints
* v[1] is the encoding size
* (Expect 0x2 or greater, making it 1 or more bytes)
* v[2] is the encoding type
* (Expect an ASN1_BOOL for the CA)
* v[3] is the contents of the ASN1_BOOL
* (Expect 1 if the CA is TRUE)
* vlen should match the entire extension size
*/
if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ))
return -EBADMSG;
if (vlen < 2)
return -EBADMSG;
if (v[1] != vlen - 2)
return -EBADMSG;
if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1)
ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA;
return 0;
}
return 0;
}
/**
* x509_decode_time - Decode an X.509 time ASN.1 object
* @_t: The time to fill in
* @hdrlen: The length of the object header
* @tag: The object tag
* @value: The object value
* @vlen: The size of the object value
*
* Decode an ASN.1 universal time or generalised time field into a struct the
* kernel can handle and check it for validity. The time is decoded thus:
*
* [RFC5280 §4.1.2.5]
* CAs conforming to this profile MUST always encode certificate validity
* dates through the year 2049 as UTCTime; certificate validity dates in
* 2050 or later MUST be encoded as GeneralizedTime. Conforming
* applications MUST be able to process validity dates that are encoded in
* either UTCTime or GeneralizedTime.
*/
int x509_decode_time(time64_t *_t, size_t hdrlen,
unsigned char tag,
const unsigned char *value, size_t vlen)
{
static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
31, 31, 30, 31, 30, 31 };
const unsigned char *p = value;
unsigned year, mon, day, hour, min, sec, mon_len;
#define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; })
#define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; })
if (tag == ASN1_UNITIM) {
/* UTCTime: YYMMDDHHMMSSZ */
if (vlen != 13)
goto unsupported_time;
year = DD2bin(p);
if (year >= 50)
year += 1900;
else
year += 2000;
} else if (tag == ASN1_GENTIM) {
/* GenTime: YYYYMMDDHHMMSSZ */
if (vlen != 15)
goto unsupported_time;
year = DD2bin(p) * 100 + DD2bin(p);
if (year >= 1950 && year <= 2049)
goto invalid_time;
} else {
goto unsupported_time;
}
mon = DD2bin(p);
day = DD2bin(p);
hour = DD2bin(p);
min = DD2bin(p);
sec = DD2bin(p);
if (*p != 'Z')
goto unsupported_time;
if (year < 1970 ||
mon < 1 || mon > 12)
goto invalid_time;
mon_len = month_lengths[mon - 1];
if (mon == 2) {
if (year % 4 == 0) {
mon_len = 29;
if (year % 100 == 0) {
mon_len = 28;
if (year % 400 == 0)
mon_len = 29;
}
}
}
if (day < 1 || day > mon_len ||
hour > 24 || /* ISO 8601 permits 24:00:00 as midnight tomorrow */
min > 59 ||
sec > 60) /* ISO 8601 permits leap seconds [X.680 46.3] */
goto invalid_time;
*_t = mktime64(year, mon, day, hour, min, sec);
return 0;
unsupported_time:
pr_debug("Got unsupported time [tag %02x]: '%*phN'\n",
tag, (int)vlen, value);
return -EBADMSG;
invalid_time:
pr_debug("Got invalid time [tag %02x]: '%*phN'\n",
tag, (int)vlen, value);
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(x509_decode_time);
int x509_note_not_before(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
}
int x509_note_not_after(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
}
/*
* Note a key identifier-based AuthorityKeyIdentifier
*/
int x509_akid_note_kid(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
pr_debug("AKID: keyid: %*phN\n", (int)vlen, value);
if (ctx->cert->sig->auth_ids[1])
return 0;
kid = asymmetric_key_generate_id(value, vlen, "", 0);
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_debug("authkeyid %*phN\n", kid->len, kid->data);
ctx->cert->sig->auth_ids[1] = kid;
return 0;
}
/*
* Note a directoryName in an AuthorityKeyIdentifier
*/
int x509_akid_note_name(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
pr_debug("AKID: name: %*phN\n", (int)vlen, value);
ctx->akid_raw_issuer = value;
ctx->akid_raw_issuer_size = vlen;
return 0;
}
/*
* Note a serial number in an AuthorityKeyIdentifier
*/
int x509_akid_note_serial(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct x509_parse_context *ctx = context;
struct asymmetric_key_id *kid;
pr_debug("AKID: serial: %*phN\n", (int)vlen, value);
if (!ctx->akid_raw_issuer || ctx->cert->sig->auth_ids[0])
return 0;
kid = asymmetric_key_generate_id(value,
vlen,
ctx->akid_raw_issuer,
ctx->akid_raw_issuer_size);
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_debug("authkeyid %*phN\n", kid->len, kid->data);
ctx->cert->sig->auth_ids[0] = kid;
return 0;
}
| linux-master | crypto/asymmetric_keys/x509_cert_parser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* In-software asymmetric public-key crypto subtype
*
* See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKEY: "fmt
#include <crypto/akcipher.h>
#include <crypto/public_key.h>
#include <crypto/sig.h>
#include <keys/asymmetric-subtype.h>
#include <linux/asn1.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/*
* Provide a part of a description of the key for /proc/keys.
*/
static void public_key_describe(const struct key *asymmetric_key,
struct seq_file *m)
{
struct public_key *key = asymmetric_key->payload.data[asym_crypto];
if (key)
seq_printf(m, "%s.%s", key->id_type, key->pkey_algo);
}
/*
* Destroy a public key algorithm key.
*/
void public_key_free(struct public_key *key)
{
if (key) {
kfree_sensitive(key->key);
kfree(key->params);
kfree(key);
}
}
EXPORT_SYMBOL_GPL(public_key_free);
/*
* Destroy a public key algorithm key.
*/
static void public_key_destroy(void *payload0, void *payload3)
{
public_key_free(payload0);
public_key_signature_free(payload3);
}
/*
* Given a public_key, and an encoding and hash_algo to be used for signing
* and/or verification with that key, determine the name of the corresponding
* akcipher algorithm. Also check that encoding and hash_algo are allowed.
*/
static int
software_key_determine_akcipher(const struct public_key *pkey,
const char *encoding, const char *hash_algo,
char alg_name[CRYPTO_MAX_ALG_NAME], bool *sig,
enum kernel_pkey_operation op)
{
int n;
*sig = true;
if (!encoding)
return -EINVAL;
if (strcmp(pkey->pkey_algo, "rsa") == 0) {
/*
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
if (!hash_algo) {
*sig = false;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
} else {
*sig = op == kernel_pkey_sign ||
op == kernel_pkey_verify;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
pkey->pkey_algo, hash_algo);
}
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
}
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
/*
* Raw RSA cannot differentiate between different hash
* algorithms.
*/
if (hash_algo)
return -EINVAL;
*sig = false;
} else if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
if (strcmp(encoding, "x962") != 0)
return -EINVAL;
/*
* ECDSA signatures are taken over a raw hash, so they don't
* differentiate between different hash algorithms. That means
* that the verifier should hard-code a specific hash algorithm.
* Unfortunately, in practice ECDSA is used with multiple SHAs,
* so we have to allow all of them and not just one.
*/
if (!hash_algo)
return -EINVAL;
if (strcmp(hash_algo, "sha1") != 0 &&
strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
strcmp(hash_algo, "sha512") != 0)
return -EINVAL;
} else if (strcmp(pkey->pkey_algo, "sm2") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
if (!hash_algo)
return -EINVAL;
if (strcmp(hash_algo, "sm3") != 0)
return -EINVAL;
} else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
if (!hash_algo)
return -EINVAL;
if (strcmp(hash_algo, "streebog256") != 0 &&
strcmp(hash_algo, "streebog512") != 0)
return -EINVAL;
} else {
/* Unknown public key algorithm */
return -ENOPKG;
}
if (strscpy(alg_name, pkey->pkey_algo, CRYPTO_MAX_ALG_NAME) < 0)
return -EINVAL;
return 0;
}
static u8 *pkey_pack_u32(u8 *dst, u32 val)
{
memcpy(dst, &val, sizeof(val));
return dst + sizeof(val);
}
/*
* Query information about a key.
*/
static int software_key_query(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_sig *sig;
u8 *key, *ptr;
int ret, len;
bool issig;
ret = software_key_determine_akcipher(pkey, params->encoding,
params->hash_algo, alg_name,
&issig, kernel_pkey_sign);
if (ret < 0)
return ret;
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
return -ENOMEM;
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
if (issig) {
sig = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(sig)) {
ret = PTR_ERR(sig);
goto error_free_key;
}
if (pkey->key_is_private)
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
else
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret < 0)
goto error_free_tfm;
len = crypto_sig_maxsize(sig);
info->supported_ops = KEYCTL_SUPPORTS_VERIFY;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_SIGN;
if (strcmp(params->encoding, "pkcs1") == 0) {
info->supported_ops |= KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
}
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto error_free_key;
}
if (pkey->key_is_private)
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
goto error_free_tfm;
len = crypto_akcipher_maxsize(tfm);
info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT;
if (pkey->key_is_private)
info->supported_ops |= KEYCTL_SUPPORTS_DECRYPT;
}
info->key_size = len * 8;
if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
/*
* ECDSA key sizes are much smaller than RSA, and thus could
* operate on (hashed) inputs that are larger than key size.
* For example SHA384-hashed input used with secp256r1
* based keys. Set max_data_size to be at least as large as
* the largest supported hash size (SHA512)
*/
info->max_data_size = 64;
/*
* Verify takes ECDSA-Sig (described in RFC 5480) as input,
* which is actually 2 'key_size'-bit integers encoded in
* ASN.1. Account for the ASN.1 encoding overhead here.
*/
info->max_sig_size = 2 * (len + 3) + 2;
} else {
info->max_data_size = len;
info->max_sig_size = len;
}
info->max_enc_size = len;
info->max_dec_size = len;
ret = 0;
error_free_tfm:
if (issig)
crypto_free_sig(sig);
else
crypto_free_akcipher(tfm);
error_free_key:
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
/*
* Do encryption, decryption and signing ops.
*/
static int software_key_eds_op(struct kernel_pkey_params *params,
const void *in, void *out)
{
const struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_akcipher *tfm;
struct crypto_sig *sig;
char *key, *ptr;
bool issig;
int ksz;
int ret;
pr_devel("==>%s()\n", __func__);
ret = software_key_determine_akcipher(pkey, params->encoding,
params->hash_algo, alg_name,
&issig, params->op);
if (ret < 0)
return ret;
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key)
return -ENOMEM;
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
if (issig) {
sig = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(sig)) {
ret = PTR_ERR(sig);
goto error_free_key;
}
if (pkey->key_is_private)
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
else
ret = crypto_sig_set_pubkey(sig, key, pkey->keylen);
if (ret)
goto error_free_tfm;
ksz = crypto_sig_maxsize(sig);
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto error_free_key;
}
if (pkey->key_is_private)
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
goto error_free_tfm;
ksz = crypto_akcipher_maxsize(tfm);
}
ret = -EINVAL;
/* Perform the encryption calculation. */
switch (params->op) {
case kernel_pkey_encrypt:
if (issig)
break;
ret = crypto_akcipher_sync_encrypt(tfm, in, params->in_len,
out, params->out_len);
break;
case kernel_pkey_decrypt:
if (issig)
break;
ret = crypto_akcipher_sync_decrypt(tfm, in, params->in_len,
out, params->out_len);
break;
case kernel_pkey_sign:
if (!issig)
break;
ret = crypto_sig_sign(sig, in, params->in_len,
out, params->out_len);
break;
default:
BUG();
}
if (ret == 0)
ret = ksz;
error_free_tfm:
if (issig)
crypto_free_sig(sig);
else
crypto_free_akcipher(tfm);
error_free_key:
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
/*
* Verify a signature using a public key.
*/
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig)
{
char alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_sig *tfm;
char *key, *ptr;
bool issig;
int ret;
pr_devel("==>%s()\n", __func__);
BUG_ON(!pkey);
BUG_ON(!sig);
BUG_ON(!sig->s);
/*
* If the signature specifies a public key algorithm, it *must* match
* the key's actual public key algorithm.
*
* Small exception: ECDSA signatures don't specify the curve, but ECDSA
* keys do. So the strings can mismatch slightly in that case:
* "ecdsa-nist-*" for the key, but "ecdsa" for the signature.
*/
if (sig->pkey_algo) {
if (strcmp(pkey->pkey_algo, sig->pkey_algo) != 0 &&
(strncmp(pkey->pkey_algo, "ecdsa-", 6) != 0 ||
strcmp(sig->pkey_algo, "ecdsa") != 0))
return -EKEYREJECTED;
}
ret = software_key_determine_akcipher(pkey, sig->encoding,
sig->hash_algo, alg_name,
&issig, kernel_pkey_verify);
if (ret < 0)
return ret;
tfm = crypto_alloc_sig(alg_name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
if (!key) {
ret = -ENOMEM;
goto error_free_tfm;
}
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
ptr = pkey_pack_u32(ptr, pkey->algo);
ptr = pkey_pack_u32(ptr, pkey->paramlen);
memcpy(ptr, pkey->params, pkey->paramlen);
if (pkey->key_is_private)
ret = crypto_sig_set_privkey(tfm, key, pkey->keylen);
else
ret = crypto_sig_set_pubkey(tfm, key, pkey->keylen);
if (ret)
goto error_free_key;
ret = crypto_sig_verify(tfm, sig->s, sig->s_size,
sig->digest, sig->digest_size);
error_free_key:
kfree_sensitive(key);
error_free_tfm:
crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
if (WARN_ON_ONCE(ret > 0))
ret = -EINVAL;
return ret;
}
EXPORT_SYMBOL_GPL(public_key_verify_signature);
static int public_key_verify_signature_2(const struct key *key,
const struct public_key_signature *sig)
{
const struct public_key *pk = key->payload.data[asym_crypto];
return public_key_verify_signature(pk, sig);
}
/*
* Public key algorithm asymmetric key subtype
*/
struct asymmetric_key_subtype public_key_subtype = {
.owner = THIS_MODULE,
.name = "public_key",
.name_len = sizeof("public_key") - 1,
.describe = public_key_describe,
.destroy = public_key_destroy,
.query = software_key_query,
.eds_op = software_key_eds_op,
.verify_signature = public_key_verify_signature_2,
};
EXPORT_SYMBOL_GPL(public_key_subtype);
| linux-master | crypto/asymmetric_keys/public_key.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* PKCS#7 parser
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
#include "pkcs7.asn1.h"
MODULE_DESCRIPTION("PKCS#7 parser");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
struct pkcs7_parse_context {
struct pkcs7_message *msg; /* Message being constructed */
struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */
struct pkcs7_signed_info **ppsinfo;
struct x509_certificate *certs; /* Certificate cache */
struct x509_certificate **ppcerts;
unsigned long data; /* Start of data */
enum OID last_oid; /* Last OID encountered */
unsigned x509_index;
unsigned sinfo_index;
const void *raw_serial;
unsigned raw_serial_size;
unsigned raw_issuer_size;
const void *raw_issuer;
const void *raw_skid;
unsigned raw_skid_size;
bool expect_skid;
};
/*
* Free a signed information block.
*/
static void pkcs7_free_signed_info(struct pkcs7_signed_info *sinfo)
{
if (sinfo) {
public_key_signature_free(sinfo->sig);
kfree(sinfo);
}
}
/**
* pkcs7_free_message - Free a PKCS#7 message
* @pkcs7: The PKCS#7 message to free
*/
void pkcs7_free_message(struct pkcs7_message *pkcs7)
{
struct x509_certificate *cert;
struct pkcs7_signed_info *sinfo;
if (pkcs7) {
while (pkcs7->certs) {
cert = pkcs7->certs;
pkcs7->certs = cert->next;
x509_free_certificate(cert);
}
while (pkcs7->crl) {
cert = pkcs7->crl;
pkcs7->crl = cert->next;
x509_free_certificate(cert);
}
while (pkcs7->signed_infos) {
sinfo = pkcs7->signed_infos;
pkcs7->signed_infos = sinfo->next;
pkcs7_free_signed_info(sinfo);
}
kfree(pkcs7);
}
}
EXPORT_SYMBOL_GPL(pkcs7_free_message);
/*
* Check authenticatedAttributes are provided or not provided consistently.
*/
static int pkcs7_check_authattrs(struct pkcs7_message *msg)
{
struct pkcs7_signed_info *sinfo;
bool want = false;
sinfo = msg->signed_infos;
if (!sinfo)
goto inconsistent;
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;
}
for (sinfo = sinfo->next; sinfo; sinfo = sinfo->next)
if (!!sinfo->authattrs != want)
goto inconsistent;
return 0;
inconsistent:
pr_warn("Inconsistently supplied authAttrs\n");
return -EINVAL;
}
/**
* pkcs7_parse_message - Parse a PKCS#7 message
* @data: The raw binary ASN.1 encoded message to be parsed
* @datalen: The size of the encoded message
*/
struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
{
struct pkcs7_parse_context *ctx;
struct pkcs7_message *msg = ERR_PTR(-ENOMEM);
int ret;
ctx = kzalloc(sizeof(struct pkcs7_parse_context), GFP_KERNEL);
if (!ctx)
goto out_no_ctx;
ctx->msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL);
if (!ctx->msg)
goto out_no_msg;
ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
if (!ctx->sinfo)
goto out_no_sinfo;
ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature),
GFP_KERNEL);
if (!ctx->sinfo->sig)
goto out_no_sig;
ctx->data = (unsigned long)data;
ctx->ppcerts = &ctx->certs;
ctx->ppsinfo = &ctx->msg->signed_infos;
/* Attempt to decode the signature */
ret = asn1_ber_decoder(&pkcs7_decoder, ctx, data, datalen);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
ret = pkcs7_check_authattrs(ctx->msg);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
msg = ctx->msg;
ctx->msg = NULL;
out:
while (ctx->certs) {
struct x509_certificate *cert = ctx->certs;
ctx->certs = cert->next;
x509_free_certificate(cert);
}
out_no_sig:
pkcs7_free_signed_info(ctx->sinfo);
out_no_sinfo:
pkcs7_free_message(ctx->msg);
out_no_msg:
kfree(ctx);
out_no_ctx:
return msg;
}
EXPORT_SYMBOL_GPL(pkcs7_parse_message);
/**
* pkcs7_get_content_data - Get access to the PKCS#7 content
* @pkcs7: The preparsed PKCS#7 message to access
* @_data: Place to return a pointer to the data
* @_data_len: Place to return the data length
* @_headerlen: Size of ASN.1 header not included in _data
*
* Get access to the data content of the PKCS#7 message. The size of the
* header of the ASN.1 object that contains it is also provided and can be used
* to adjust *_data and *_data_len to get the entire object.
*
* Returns -ENODATA if the data object was missing from the message.
*/
int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
const void **_data, size_t *_data_len,
size_t *_headerlen)
{
if (!pkcs7->data)
return -ENODATA;
*_data = pkcs7->data;
*_data_len = pkcs7->data_len;
if (_headerlen)
*_headerlen = pkcs7->data_hdrlen;
return 0;
}
EXPORT_SYMBOL_GPL(pkcs7_get_content_data);
/*
* Note an OID when we find one for later processing when we know how
* to interpret it.
*/
int pkcs7_note_OID(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
ctx->last_oid = look_up_OID(value, vlen);
if (ctx->last_oid == OID__NR) {
char buffer[50];
sprint_oid(value, vlen, buffer, sizeof(buffer));
printk("PKCS7: Unknown OID: [%lu] %s\n",
(unsigned long)value - ctx->data, buffer);
}
return 0;
}
/*
* Note the digest algorithm for the signature.
*/
int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
case OID_md4:
ctx->sinfo->sig->hash_algo = "md4";
break;
case OID_md5:
ctx->sinfo->sig->hash_algo = "md5";
break;
case OID_sha1:
ctx->sinfo->sig->hash_algo = "sha1";
break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
case OID_sha384:
ctx->sinfo->sig->hash_algo = "sha384";
break;
case OID_sha512:
ctx->sinfo->sig->hash_algo = "sha512";
break;
case OID_sha224:
ctx->sinfo->sig->hash_algo = "sha224";
break;
case OID_sm3:
ctx->sinfo->sig->hash_algo = "sm3";
break;
case OID_gost2012Digest256:
ctx->sinfo->sig->hash_algo = "streebog256";
break;
case OID_gost2012Digest512:
ctx->sinfo->sig->hash_algo = "streebog512";
break;
default:
printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG;
}
return 0;
}
/*
* Note the public key algorithm for the signature.
*/
int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
case OID_rsaEncryption:
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
case OID_id_ecdsa_with_sha512:
ctx->sinfo->sig->pkey_algo = "ecdsa";
ctx->sinfo->sig->encoding = "x962";
break;
case OID_SM2_with_SM3:
ctx->sinfo->sig->pkey_algo = "sm2";
ctx->sinfo->sig->encoding = "raw";
break;
case OID_gost2012PKey256:
case OID_gost2012PKey512:
ctx->sinfo->sig->pkey_algo = "ecrdsa";
ctx->sinfo->sig->encoding = "raw";
break;
default:
printk("Unsupported pkey algo: %u\n", ctx->last_oid);
return -ENOPKG;
}
return 0;
}
/*
* We only support signed data [RFC2315 sec 9].
*/
int pkcs7_check_content_type(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
if (ctx->last_oid != OID_signed_data) {
pr_warn("Only support pkcs7_signedData type\n");
return -EINVAL;
}
return 0;
}
/*
* Note the SignedData version
*/
int pkcs7_note_signeddata_version(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
unsigned version;
if (vlen != 1)
goto unsupported;
ctx->msg->version = version = *(const u8 *)value;
switch (version) {
case 1:
/* PKCS#7 SignedData [RFC2315 sec 9.1]
* CMS ver 1 SignedData [RFC5652 sec 5.1]
*/
break;
case 3:
/* CMS ver 3 SignedData [RFC2315 sec 5.1] */
break;
default:
goto unsupported;
}
return 0;
unsupported:
pr_warn("Unsupported SignedData version\n");
return -EINVAL;
}
/*
* Note the SignerInfo version
*/
int pkcs7_note_signerinfo_version(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
unsigned version;
if (vlen != 1)
goto unsupported;
version = *(const u8 *)value;
switch (version) {
case 1:
/* PKCS#7 SignerInfo [RFC2315 sec 9.2]
* CMS ver 1 SignerInfo [RFC5652 sec 5.3]
*/
if (ctx->msg->version != 1)
goto version_mismatch;
ctx->expect_skid = false;
break;
case 3:
/* CMS ver 3 SignerInfo [RFC2315 sec 5.3] */
if (ctx->msg->version == 1)
goto version_mismatch;
ctx->expect_skid = true;
break;
default:
goto unsupported;
}
return 0;
unsupported:
pr_warn("Unsupported SignerInfo version\n");
return -EINVAL;
version_mismatch:
pr_warn("SignedData-SignerInfo version mismatch\n");
return -EBADMSG;
}
/*
* Extract a certificate and store it in the context.
*/
int pkcs7_extract_cert(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
struct x509_certificate *x509;
if (tag != ((ASN1_UNIV << 6) | ASN1_CONS_BIT | ASN1_SEQ)) {
pr_debug("Cert began with tag %02x at %lu\n",
tag, (unsigned long)ctx - ctx->data);
return -EBADMSG;
}
/* We have to correct for the header so that the X.509 parser can start
* from the beginning. Note that since X.509 stipulates DER, there
* probably shouldn't be an EOC trailer - but it is in PKCS#7 (which
* stipulates BER).
*/
value -= hdrlen;
vlen += hdrlen;
if (((u8*)value)[1] == 0x80)
vlen += 2; /* Indefinite length - there should be an EOC */
x509 = x509_cert_parse(value, vlen);
if (IS_ERR(x509))
return PTR_ERR(x509);
x509->index = ++ctx->x509_index;
pr_debug("Got cert %u for %s\n", x509->index, x509->subject);
pr_debug("- fingerprint %*phN\n", x509->id->len, x509->id->data);
*ctx->ppcerts = x509;
ctx->ppcerts = &x509->next;
return 0;
}
/*
* Save the certificate list
*/
int pkcs7_note_certificate_list(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
pr_devel("Got cert list (%02x)\n", tag);
*ctx->ppcerts = ctx->msg->certs;
ctx->msg->certs = ctx->certs;
ctx->certs = NULL;
ctx->ppcerts = &ctx->certs;
return 0;
}
/*
* Note the content type.
*/
int pkcs7_note_content(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
if (ctx->last_oid != OID_data &&
ctx->last_oid != OID_msIndirectData) {
pr_warn("Unsupported data type %d\n", ctx->last_oid);
return -EINVAL;
}
ctx->msg->data_type = ctx->last_oid;
return 0;
}
/*
* Extract the data from the message and store that and its content type OID in
* the context.
*/
int pkcs7_note_data(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
pr_debug("Got data\n");
ctx->msg->data = value;
ctx->msg->data_len = vlen;
ctx->msg->data_hdrlen = hdrlen;
return 0;
}
/*
* Parse authenticated attributes.
*/
int pkcs7_sig_note_authenticated_attr(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
struct pkcs7_signed_info *sinfo = ctx->sinfo;
enum OID content_type;
pr_devel("AuthAttr: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value);
switch (ctx->last_oid) {
case OID_contentType:
if (__test_and_set_bit(sinfo_has_content_type, &sinfo->aa_set))
goto repeated;
content_type = look_up_OID(value, vlen);
if (content_type != ctx->msg->data_type) {
pr_warn("Mismatch between global data type (%d) and sinfo %u (%d)\n",
ctx->msg->data_type, sinfo->index,
content_type);
return -EBADMSG;
}
return 0;
case OID_signingTime:
if (__test_and_set_bit(sinfo_has_signing_time, &sinfo->aa_set))
goto repeated;
/* Should we check that the signing time is consistent
* with the signer's X.509 cert?
*/
return x509_decode_time(&sinfo->signing_time,
hdrlen, tag, value, vlen);
case OID_messageDigest:
if (__test_and_set_bit(sinfo_has_message_digest, &sinfo->aa_set))
goto repeated;
if (tag != ASN1_OTS)
return -EBADMSG;
sinfo->msgdigest = value;
sinfo->msgdigest_len = vlen;
return 0;
case OID_smimeCapabilites:
if (__test_and_set_bit(sinfo_has_smime_caps, &sinfo->aa_set))
goto repeated;
if (ctx->msg->data_type != OID_msIndirectData) {
pr_warn("S/MIME Caps only allowed with Authenticode\n");
return -EKEYREJECTED;
}
return 0;
/* Microsoft SpOpusInfo seems to be contain cont[0] 16-bit BE
* char URLs and cont[1] 8-bit char URLs.
*
* Microsoft StatementType seems to contain a list of OIDs that
* are also used as extendedKeyUsage types in X.509 certs.
*/
case OID_msSpOpusInfo:
if (__test_and_set_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))
goto repeated;
goto authenticode_check;
case OID_msStatementType:
if (__test_and_set_bit(sinfo_has_ms_statement_type, &sinfo->aa_set))
goto repeated;
authenticode_check:
if (ctx->msg->data_type != OID_msIndirectData) {
pr_warn("Authenticode AuthAttrs only allowed with Authenticode\n");
return -EKEYREJECTED;
}
/* I'm not sure how to validate these */
return 0;
default:
return 0;
}
repeated:
/* We permit max one item per AuthenticatedAttribute and no repeats */
pr_warn("Repeated/multivalue AuthAttrs not permitted\n");
return -EKEYREJECTED;
}
/*
* Note the set of auth attributes for digestion purposes [RFC2315 sec 9.3]
*/
int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
struct pkcs7_signed_info *sinfo = ctx->sinfo;
if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
!test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
pr_warn("Missing required AuthAttr\n");
return -EBADMSG;
}
if (ctx->msg->data_type != OID_msIndirectData &&
test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set)) {
pr_warn("Unexpected Authenticode AuthAttr\n");
return -EBADMSG;
}
/* We need to switch the 'CONT 0' to a 'SET OF' when we digest */
sinfo->authattrs = value - (hdrlen - 1);
sinfo->authattrs_len = vlen + (hdrlen - 1);
return 0;
}
/*
* Note the issuing certificate serial number
*/
int pkcs7_sig_note_serial(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
ctx->raw_serial = value;
ctx->raw_serial_size = vlen;
return 0;
}
/*
* Note the issuer's name
*/
int pkcs7_sig_note_issuer(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
ctx->raw_issuer = value;
ctx->raw_issuer_size = vlen;
return 0;
}
/*
* Note the issuing cert's subjectKeyIdentifier
*/
int pkcs7_sig_note_skid(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
pr_devel("SKID: %02x %zu [%*ph]\n", tag, vlen, (unsigned)vlen, value);
ctx->raw_skid = value;
ctx->raw_skid_size = vlen;
return 0;
}
/*
* Note the signature data
*/
int pkcs7_sig_note_signature(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
ctx->sinfo->sig->s = kmemdup(value, vlen, GFP_KERNEL);
if (!ctx->sinfo->sig->s)
return -ENOMEM;
ctx->sinfo->sig->s_size = vlen;
return 0;
}
/*
* Note a signature information block
*/
int pkcs7_note_signed_info(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs7_parse_context *ctx = context;
struct pkcs7_signed_info *sinfo = ctx->sinfo;
struct asymmetric_key_id *kid;
if (ctx->msg->data_type == OID_msIndirectData && !sinfo->authattrs) {
pr_warn("Authenticode requires AuthAttrs\n");
return -EBADMSG;
}
/* Generate cert issuer + serial number key ID */
if (!ctx->expect_skid) {
kid = asymmetric_key_generate_id(ctx->raw_serial,
ctx->raw_serial_size,
ctx->raw_issuer,
ctx->raw_issuer_size);
} else {
kid = asymmetric_key_generate_id(ctx->raw_skid,
ctx->raw_skid_size,
"", 0);
}
if (IS_ERR(kid))
return PTR_ERR(kid);
pr_devel("SINFO KID: %u [%*phN]\n", kid->len, kid->len, kid->data);
sinfo->sig->auth_ids[0] = kid;
sinfo->index = ++ctx->sinfo_index;
*ctx->ppsinfo = sinfo;
ctx->ppsinfo = &sinfo->next;
ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL);
if (!ctx->sinfo)
return -ENOMEM;
ctx->sinfo->sig = kzalloc(sizeof(struct public_key_signature),
GFP_KERNEL);
if (!ctx->sinfo->sig)
return -ENOMEM;
return 0;
}
| linux-master | crypto/asymmetric_keys/pkcs7_parser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Testing module to load key from trusted PKCS#7 message
*
* Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKCS7key: "fmt
#include <linux/key.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/verification.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PKCS#7 testing key type");
MODULE_AUTHOR("Red Hat, Inc.");
static unsigned pkcs7_usage;
module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(pkcs7_usage,
"Usage to specify when verifying the PKCS#7 message");
/*
* Retrieve the PKCS#7 message content.
*/
static int pkcs7_view_content(void *ctx, const void *data, size_t len,
size_t asn1hdrlen)
{
struct key_preparsed_payload *prep = ctx;
const void *saved_prep_data;
size_t saved_prep_datalen;
int ret;
saved_prep_data = prep->data;
saved_prep_datalen = prep->datalen;
prep->data = data;
prep->datalen = len;
ret = user_preparse(prep);
prep->data = saved_prep_data;
prep->datalen = saved_prep_datalen;
return ret;
}
/*
* Preparse a PKCS#7 wrapped and validated data blob.
*/
static int pkcs7_preparse(struct key_preparsed_payload *prep)
{
enum key_being_used_for usage = pkcs7_usage;
if (usage >= NR__KEY_BEING_USED_FOR) {
pr_err("Invalid usage type %d\n", usage);
return -EINVAL;
}
return verify_pkcs7_signature(NULL, 0,
prep->data, prep->datalen,
VERIFY_USE_SECONDARY_KEYRING, usage,
pkcs7_view_content, prep);
}
/*
* user defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
static struct key_type key_type_pkcs7 = {
.name = "pkcs7_test",
.preparse = pkcs7_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
};
/*
* Module stuff
*/
static int __init pkcs7_key_init(void)
{
return register_key_type(&key_type_pkcs7);
}
static void __exit pkcs7_key_cleanup(void)
{
unregister_key_type(&key_type_pkcs7);
}
module_init(pkcs7_key_init);
module_exit(pkcs7_key_cleanup);
| linux-master | crypto/asymmetric_keys/pkcs7_key_type.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Validate the trust chain of a PKCS#7 message.
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/asn1.h>
#include <linux/key.h>
#include <keys/asymmetric-type.h>
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
/*
* Check the trust on one PKCS#7 SignedInfo block.
*/
static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
struct pkcs7_signed_info *sinfo,
struct key *trust_keyring)
{
struct public_key_signature *sig = sinfo->sig;
struct x509_certificate *x509, *last = NULL, *p;
struct key *key;
int ret;
kenter(",%u,", sinfo->index);
if (sinfo->unsupported_crypto) {
kleave(" = -ENOPKG [cached]");
return -ENOPKG;
}
for (x509 = sinfo->signer; x509; x509 = x509->signer) {
if (x509->seen) {
if (x509->verified)
goto verified;
kleave(" = -ENOKEY [cached]");
return -ENOKEY;
}
x509->seen = true;
/* Look to see if this certificate is present in the trusted
* keys.
*/
key = find_asymmetric_key(trust_keyring,
x509->id, x509->skid, NULL, false);
if (!IS_ERR(key)) {
/* One of the X.509 certificates in the PKCS#7 message
* is apparently the same as one we already trust.
* Verify that the trusted variant can also validate
* the signature on the descendant.
*/
pr_devel("sinfo %u: Cert %u as key %x\n",
sinfo->index, x509->index, key_serial(key));
goto matched;
}
if (key == ERR_PTR(-ENOMEM))
return -ENOMEM;
/* Self-signed certificates form roots of their own, and if we
* don't know them, then we can't accept them.
*/
if (x509->signer == x509) {
kleave(" = -ENOKEY [unknown self-signed]");
return -ENOKEY;
}
might_sleep();
last = x509;
sig = last->sig;
}
/* No match - see if the root certificate has a signer amongst the
* trusted keys.
*/
if (last && (last->sig->auth_ids[0] || last->sig->auth_ids[1])) {
key = find_asymmetric_key(trust_keyring,
last->sig->auth_ids[0],
last->sig->auth_ids[1],
NULL, false);
if (!IS_ERR(key)) {
x509 = last;
pr_devel("sinfo %u: Root cert %u signer is key %x\n",
sinfo->index, x509->index, key_serial(key));
goto matched;
}
if (PTR_ERR(key) != -ENOKEY)
return PTR_ERR(key);
}
/* As a last resort, see if we have a trusted public key that matches
* the signed info directly.
*/
key = find_asymmetric_key(trust_keyring,
sinfo->sig->auth_ids[0], NULL, NULL, false);
if (!IS_ERR(key)) {
pr_devel("sinfo %u: Direct signer is key %x\n",
sinfo->index, key_serial(key));
x509 = NULL;
sig = sinfo->sig;
goto matched;
}
if (PTR_ERR(key) != -ENOKEY)
return PTR_ERR(key);
kleave(" = -ENOKEY [no backref]");
return -ENOKEY;
matched:
ret = verify_signature(key, sig);
key_put(key);
if (ret < 0) {
if (ret == -ENOMEM)
return ret;
kleave(" = -EKEYREJECTED [verify %d]", ret);
return -EKEYREJECTED;
}
verified:
if (x509) {
x509->verified = true;
for (p = sinfo->signer; p != x509; p = p->signer)
p->verified = true;
}
kleave(" = 0");
return 0;
}
/**
* pkcs7_validate_trust - Validate PKCS#7 trust chain
* @pkcs7: The PKCS#7 certificate to validate
* @trust_keyring: Signing certificates to use as starting points
*
* Validate that the certificate chain inside the PKCS#7 message intersects
* keys we already know and trust.
*
* Returns, in order of descending priority:
*
* (*) -EKEYREJECTED if a signature failed to match for which we have a valid
* key, or:
*
* (*) 0 if at least one signature chain intersects with the keys in the trust
* keyring, or:
*
* (*) -ENOPKG if a suitable crypto module couldn't be found for a check on a
* chain.
*
* (*) -ENOKEY if we couldn't find a match for any of the signature chains in
* the message.
*
* May also return -ENOMEM.
*/
int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
struct key *trust_keyring)
{
struct pkcs7_signed_info *sinfo;
struct x509_certificate *p;
int cached_ret = -ENOKEY;
int ret;
for (p = pkcs7->certs; p; p = p->next)
p->seen = false;
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_validate_trust_one(pkcs7, sinfo, trust_keyring);
switch (ret) {
case -ENOKEY:
continue;
case -ENOPKG:
if (cached_ret == -ENOKEY)
cached_ret = -ENOPKG;
continue;
case 0:
cached_ret = 0;
continue;
default:
return ret;
}
}
return cached_ret;
}
EXPORT_SYMBOL_GPL(pkcs7_validate_trust);
| linux-master | crypto/asymmetric_keys/pkcs7_trust.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Instantiate a public key crypto key from an X.509 Certificate
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "X.509: "fmt
#include <crypto/hash.h>
#include <crypto/sm2.h>
#include <keys/asymmetric-parser.h>
#include <keys/asymmetric-subtype.h>
#include <keys/system_keyring.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "asymmetric_keys.h"
#include "x509_parser.h"
/*
* Set up the signature parameters in an X.509 certificate. This involves
* digesting the signed data and extracting the signature.
*/
int x509_get_sig_params(struct x509_certificate *cert)
{
struct public_key_signature *sig = cert->sig;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t desc_size;
int ret;
pr_devel("==>%s()\n", __func__);
sig->s = kmemdup(cert->raw_sig, cert->raw_sig_size, GFP_KERNEL);
if (!sig->s)
return -ENOMEM;
sig->s_size = cert->raw_sig_size;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(sig->hash_algo, 0, 0);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT) {
cert->unsupported_sig = true;
return 0;
}
return PTR_ERR(tfm);
}
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
sig->digest_size = crypto_shash_digestsize(tfm);
ret = -ENOMEM;
sig->digest = kmalloc(sig->digest_size, GFP_KERNEL);
if (!sig->digest)
goto error;
desc = kzalloc(desc_size, GFP_KERNEL);
if (!desc)
goto error;
desc->tfm = tfm;
if (strcmp(cert->pub->pkey_algo, "sm2") == 0) {
ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL :
crypto_shash_init(desc) ?:
sm2_compute_z_digest(desc, cert->pub->key,
cert->pub->keylen, sig->digest) ?:
crypto_shash_init(desc) ?:
crypto_shash_update(desc, sig->digest,
sig->digest_size) ?:
crypto_shash_finup(desc, cert->tbs, cert->tbs_size,
sig->digest);
} else {
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size,
sig->digest);
}
if (ret < 0)
goto error_2;
ret = is_hash_blacklisted(sig->digest, sig->digest_size,
BLACKLIST_HASH_X509_TBS);
if (ret == -EKEYREJECTED) {
pr_err("Cert %*phN is blacklisted\n",
sig->digest_size, sig->digest);
cert->blacklisted = true;
ret = 0;
}
error_2:
kfree(desc);
error:
crypto_free_shash(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
/*
* Check for self-signedness in an X.509 cert and if found, check the signature
* immediately if we can.
*/
int x509_check_for_self_signed(struct x509_certificate *cert)
{
int ret = 0;
pr_devel("==>%s()\n", __func__);
if (cert->raw_subject_size != cert->raw_issuer_size ||
memcmp(cert->raw_subject, cert->raw_issuer,
cert->raw_issuer_size) != 0)
goto not_self_signed;
if (cert->sig->auth_ids[0] || cert->sig->auth_ids[1]) {
/* If the AKID is present it may have one or two parts. If
* both are supplied, both must match.
*/
bool a = asymmetric_key_id_same(cert->skid, cert->sig->auth_ids[1]);
bool b = asymmetric_key_id_same(cert->id, cert->sig->auth_ids[0]);
if (!a && !b)
goto not_self_signed;
ret = -EKEYREJECTED;
if (((a && !b) || (b && !a)) &&
cert->sig->auth_ids[0] && cert->sig->auth_ids[1])
goto out;
}
if (cert->unsupported_sig) {
ret = 0;
goto out;
}
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {
cert->unsupported_sig = true;
ret = 0;
}
goto out;
}
pr_devel("Cert Self-signature verified");
cert->self_signed = true;
out:
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
not_self_signed:
pr_devel("<==%s() = 0 [not]\n", __func__);
return 0;
}
/*
* Attempt to parse a data blob for a key as an X509 certificate.
*/
static int x509_key_preparse(struct key_preparsed_payload *prep)
{
struct asymmetric_key_ids *kids;
struct x509_certificate *cert;
const char *q;
size_t srlen, sulen;
char *desc = NULL, *p;
int ret;
cert = x509_cert_parse(prep->data, prep->datalen);
if (IS_ERR(cert))
return PTR_ERR(cert);
pr_devel("Cert Issuer: %s\n", cert->issuer);
pr_devel("Cert Subject: %s\n", cert->subject);
pr_devel("Cert Key Algo: %s\n", cert->pub->pkey_algo);
pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
cert->pub->id_type = "X509";
if (cert->unsupported_sig) {
public_key_signature_free(cert->sig);
cert->sig = NULL;
} else {
pr_devel("Cert Signature: %s + %s\n",
cert->sig->pkey_algo, cert->sig->hash_algo);
}
/* Don't permit addition of blacklisted keys */
ret = -EKEYREJECTED;
if (cert->blacklisted)
goto error_free_cert;
/* Propose a description */
sulen = strlen(cert->subject);
if (cert->raw_skid) {
srlen = cert->raw_skid_size;
q = cert->raw_skid;
} else {
srlen = cert->raw_serial_size;
q = cert->raw_serial;
}
ret = -ENOMEM;
desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
if (!desc)
goto error_free_cert;
p = memcpy(desc, cert->subject, sulen);
p += sulen;
*p++ = ':';
*p++ = ' ';
p = bin2hex(p, q, srlen);
*p = 0;
kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL);
if (!kids)
goto error_free_desc;
kids->id[0] = cert->id;
kids->id[1] = cert->skid;
kids->id[2] = asymmetric_key_generate_id(cert->raw_subject,
cert->raw_subject_size,
"", 0);
if (IS_ERR(kids->id[2])) {
ret = PTR_ERR(kids->id[2]);
goto error_free_kids;
}
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
prep->payload.data[asym_subtype] = &public_key_subtype;
prep->payload.data[asym_key_ids] = kids;
prep->payload.data[asym_crypto] = cert->pub;
prep->payload.data[asym_auth] = cert->sig;
prep->description = desc;
prep->quotalen = 100;
/* We've finished with the certificate */
cert->pub = NULL;
cert->id = NULL;
cert->skid = NULL;
cert->sig = NULL;
desc = NULL;
kids = NULL;
ret = 0;
error_free_kids:
kfree(kids);
error_free_desc:
kfree(desc);
error_free_cert:
x509_free_certificate(cert);
return ret;
}
static struct asymmetric_key_parser x509_key_parser = {
.owner = THIS_MODULE,
.name = "x509",
.parse = x509_key_preparse,
};
/*
* Module stuff
*/
extern int __init certs_selftest(void);
static int __init x509_key_init(void)
{
int ret;
ret = register_asymmetric_key_parser(&x509_key_parser);
if (ret < 0)
return ret;
return fips_signature_selftest();
}
static void __exit x509_key_exit(void)
{
unregister_asymmetric_key_parser(&x509_key_parser);
}
module_init(x509_key_init);
module_exit(x509_key_exit);
MODULE_DESCRIPTION("X.509 certificate parser");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
| linux-master | crypto/asymmetric_keys/x509_public_key.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/kernel.h>
#include <linux/key.h>
#include <keys/asymmetric-type.h>
int x509_load_certificate_list(const u8 cert_list[],
const unsigned long list_size,
const struct key *keyring)
{
key_ref_t key;
const u8 *p, *end;
size_t plen;
p = cert_list;
end = p + list_size;
while (p < end) {
/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
* than 256 bytes in size.
*/
if (end - p < 4)
goto dodgy_cert;
if (p[0] != 0x30 &&
p[1] != 0x82)
goto dodgy_cert;
plen = (p[2] << 8) | p[3];
plen += 4;
if (plen > end - p)
goto dodgy_cert;
key = key_create_or_update(make_key_ref(keyring, 1),
"asymmetric",
NULL,
p,
plen,
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ),
KEY_ALLOC_NOT_IN_QUOTA |
KEY_ALLOC_BUILT_IN |
KEY_ALLOC_BYPASS_RESTRICTION);
if (IS_ERR(key)) {
pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
PTR_ERR(key));
} else {
pr_notice("Loaded X.509 cert '%s'\n",
key_ref_to_ptr(key)->description);
key_ref_put(key);
}
p += plen;
}
return 0;
dodgy_cert:
pr_err("Problem parsing in-kernel X.509 certificate list\n");
return 0;
}
EXPORT_SYMBOL_GPL(x509_load_certificate_list);
| linux-master | crypto/asymmetric_keys/x509_loader.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* PKCS#8 Private Key parser [RFC 5208].
*
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKCS8: "fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
#include <keys/asymmetric-subtype.h>
#include <keys/asymmetric-parser.h>
#include <crypto/public_key.h>
#include "pkcs8.asn1.h"
struct pkcs8_parse_context {
struct public_key *pub;
unsigned long data; /* Start of data */
enum OID last_oid; /* Last OID encountered */
enum OID algo_oid; /* Algorithm OID */
u32 key_size;
const void *key;
};
/*
* Note an OID when we find one for later processing when we know how to
* interpret it.
*/
int pkcs8_note_OID(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs8_parse_context *ctx = context;
ctx->last_oid = look_up_OID(value, vlen);
if (ctx->last_oid == OID__NR) {
char buffer[50];
sprint_oid(value, vlen, buffer, sizeof(buffer));
pr_info("Unknown OID: [%lu] %s\n",
(unsigned long)value - ctx->data, buffer);
}
return 0;
}
/*
* Note the version number of the ASN.1 blob.
*/
int pkcs8_note_version(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
if (vlen != 1 || ((const u8 *)value)[0] != 0) {
pr_warn("Unsupported PKCS#8 version\n");
return -EBADMSG;
}
return 0;
}
/*
* Note the public algorithm.
*/
int pkcs8_note_algo(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs8_parse_context *ctx = context;
if (ctx->last_oid != OID_rsaEncryption)
return -ENOPKG;
ctx->pub->pkey_algo = "rsa";
return 0;
}
/*
* Note the key data of the ASN.1 blob.
*/
int pkcs8_note_key(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pkcs8_parse_context *ctx = context;
ctx->key = value;
ctx->key_size = vlen;
return 0;
}
/*
* Parse a PKCS#8 private key blob.
*/
static struct public_key *pkcs8_parse(const void *data, size_t datalen)
{
struct pkcs8_parse_context ctx;
struct public_key *pub;
long ret;
memset(&ctx, 0, sizeof(ctx));
ret = -ENOMEM;
ctx.pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
if (!ctx.pub)
goto error;
ctx.data = (unsigned long)data;
/* Attempt to decode the private key */
ret = asn1_ber_decoder(&pkcs8_decoder, &ctx, data, datalen);
if (ret < 0)
goto error_decode;
ret = -ENOMEM;
pub = ctx.pub;
pub->key = kmemdup(ctx.key, ctx.key_size, GFP_KERNEL);
if (!pub->key)
goto error_decode;
pub->keylen = ctx.key_size;
pub->key_is_private = true;
return pub;
error_decode:
kfree(ctx.pub);
error:
return ERR_PTR(ret);
}
/*
* Attempt to parse a data blob for a key as a PKCS#8 private key.
*/
static int pkcs8_key_preparse(struct key_preparsed_payload *prep)
{
struct public_key *pub;
pub = pkcs8_parse(prep->data, prep->datalen);
if (IS_ERR(pub))
return PTR_ERR(pub);
pr_devel("Cert Key Algo: %s\n", pub->pkey_algo);
pub->id_type = "PKCS8";
/* We're pinning the module by being linked against it */
__module_get(public_key_subtype.owner);
prep->payload.data[asym_subtype] = &public_key_subtype;
prep->payload.data[asym_key_ids] = NULL;
prep->payload.data[asym_crypto] = pub;
prep->payload.data[asym_auth] = NULL;
prep->quotalen = 100;
return 0;
}
static struct asymmetric_key_parser pkcs8_key_parser = {
.owner = THIS_MODULE,
.name = "pkcs8",
.parse = pkcs8_key_preparse,
};
/*
* Module stuff
*/
static int __init pkcs8_key_init(void)
{
return register_asymmetric_key_parser(&pkcs8_key_parser);
}
static void __exit pkcs8_key_exit(void)
{
unregister_asymmetric_key_parser(&pkcs8_key_parser);
}
module_init(pkcs8_key_init);
module_exit(pkcs8_key_exit);
MODULE_DESCRIPTION("PKCS#8 certificate parser");
MODULE_LICENSE("GPL");
| linux-master | crypto/asymmetric_keys/pkcs8_parser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Signature verification with an asymmetric key
*
* See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "SIG: "fmt
#include <keys/asymmetric-subtype.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/keyctl.h>
#include <crypto/public_key.h>
#include <keys/user-type.h>
#include "asymmetric_keys.h"
/*
* Destroy a public key signature.
*/
void public_key_signature_free(struct public_key_signature *sig)
{
int i;
if (sig) {
for (i = 0; i < ARRAY_SIZE(sig->auth_ids); i++)
kfree(sig->auth_ids[i]);
kfree(sig->s);
kfree(sig->digest);
kfree(sig);
}
}
EXPORT_SYMBOL_GPL(public_key_signature_free);
/**
* query_asymmetric_key - Get information about an asymmetric key.
* @params: Various parameters.
* @info: Where to put the information.
*/
int query_asymmetric_key(const struct kernel_pkey_params *params,
struct kernel_pkey_query *info)
{
const struct asymmetric_key_subtype *subtype;
struct key *key = params->key;
int ret;
pr_devel("==>%s()\n", __func__);
if (key->type != &key_type_asymmetric)
return -EINVAL;
subtype = asymmetric_key_subtype(key);
if (!subtype ||
!key->payload.data[0])
return -EINVAL;
if (!subtype->query)
return -ENOTSUPP;
ret = subtype->query(params, info);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(query_asymmetric_key);
/**
* encrypt_blob - Encrypt data using an asymmetric key
* @params: Various parameters
* @data: Data blob to be encrypted, length params->data_len
* @enc: Encrypted data buffer, length params->enc_len
*
* Encrypt the specified data blob using the private key specified by
* params->key. The encrypted data is wrapped in an encoding if
* params->encoding is specified (eg. "pkcs1").
*
* Returns the length of the data placed in the encrypted data buffer or an
* error.
*/
int encrypt_blob(struct kernel_pkey_params *params,
const void *data, void *enc)
{
params->op = kernel_pkey_encrypt;
return asymmetric_key_eds_op(params, data, enc);
}
EXPORT_SYMBOL_GPL(encrypt_blob);
/**
* decrypt_blob - Decrypt data using an asymmetric key
* @params: Various parameters
* @enc: Encrypted data to be decrypted, length params->enc_len
* @data: Decrypted data buffer, length params->data_len
*
* Decrypt the specified data blob using the private key specified by
* params->key. The decrypted data is wrapped in an encoding if
* params->encoding is specified (eg. "pkcs1").
*
* Returns the length of the data placed in the decrypted data buffer or an
* error.
*/
int decrypt_blob(struct kernel_pkey_params *params,
const void *enc, void *data)
{
params->op = kernel_pkey_decrypt;
return asymmetric_key_eds_op(params, enc, data);
}
EXPORT_SYMBOL_GPL(decrypt_blob);
/**
* create_signature - Sign some data using an asymmetric key
* @params: Various parameters
* @data: Data blob to be signed, length params->data_len
* @enc: Signature buffer, length params->enc_len
*
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
* passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
int create_signature(struct kernel_pkey_params *params,
const void *data, void *enc)
{
params->op = kernel_pkey_sign;
return asymmetric_key_eds_op(params, data, enc);
}
EXPORT_SYMBOL_GPL(create_signature);
/**
* verify_signature - Initiate the use of an asymmetric key to verify a signature
* @key: The asymmetric key to verify against
* @sig: The signature to check
*
* Returns 0 if successful or else an error.
*/
int verify_signature(const struct key *key,
const struct public_key_signature *sig)
{
const struct asymmetric_key_subtype *subtype;
int ret;
pr_devel("==>%s()\n", __func__);
if (key->type != &key_type_asymmetric)
return -EINVAL;
subtype = asymmetric_key_subtype(key);
if (!subtype ||
!key->payload.data[0])
return -EINVAL;
if (!subtype->verify_signature)
return -ENOTSUPP;
ret = subtype->verify_signature(key, sig);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(verify_signature);
| linux-master | crypto/asymmetric_keys/signature.c |
/* Self-testing for signature checking.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <linux/kernel.h>
#include <linux/cred.h>
#include <linux/key.h>
#include <crypto/pkcs7.h>
#include "x509_parser.h"
struct certs_test {
const u8 *data;
size_t data_len;
const u8 *pkcs7;
size_t pkcs7_len;
};
/*
* Set of X.509 certificates to provide public keys for the tests. These will
* be loaded into a temporary keyring for the duration of the testing.
*/
static const __initconst u8 certs_selftest_keys[] = {
"\x30\x82\x05\x55\x30\x82\x03\x3d\xa0\x03\x02\x01\x02\x02\x14\x73"
"\x98\xea\x98\x2d\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a"
"\xfc\x8c\x0a\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01\x0b"
"\x05\x00\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29\x43"
"\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66"
"\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65\x73"
"\x74\x69\x6e\x67\x20\x6b\x65\x79\x30\x20\x17\x0d\x32\x32\x30\x35"
"\x31\x38\x32\x32\x33\x32\x34\x31\x5a\x18\x0f\x32\x31\x32\x32\x30"
"\x34\x32\x34\x32\x32\x33\x32\x34\x31\x5a\x30\x34\x31\x32\x30\x30"
"\x06\x03\x55\x04\x03\x0c\x29\x43\x65\x72\x74\x69\x66\x69\x63\x61"
"\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x20"
"\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x6b\x65\x79"
"\x30\x82\x02\x22\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x01"
"\x01\x05\x00\x03\x82\x02\x0f\x00\x30\x82\x02\x0a\x02\x82\x02\x01"
"\x00\xcc\xac\x49\xdd\x3b\xca\xb0\x15\x7e\x84\x6a\xb2\x0a\x69\x5f"
"\x1c\x0a\x61\x82\x3b\x4f\x2c\xa3\x95\x2c\x08\x58\x4b\xb1\x5d\x99"
"\xe0\xc3\xc1\x79\xc2\xb3\xeb\xc0\x1e\x6d\x3e\x54\x1d\xbd\xb7\x92"
"\x7b\x4d\xb5\x95\x58\xb2\x52\x2e\xc6\x24\x4b\x71\x63\x80\x32\x77"
"\xa7\x38\x5e\xdb\x72\xae\x6e\x0d\xec\xfb\xb6\x6d\x01\x7f\xe9\x55"
"\x66\xdf\xbf\x1d\x76\x78\x02\x31\xe8\xe5\x07\xf8\xb7\x82\x5c\x0d"
"\xd4\xbb\xfb\xa2\x59\x0d\x2e\x3a\x78\x95\x3a\x8b\x46\x06\x47\x44"
"\x46\xd7\xcd\x06\x6a\x41\x13\xe3\x19\xf6\xbb\x6e\x38\xf4\x83\x01"
"\xa3\xbf\x4a\x39\x4f\xd7\x0a\xe9\x38\xb3\xf5\x94\x14\x4e\xdd\xf7"
"\x43\xfd\x24\xb2\x49\x3c\xa5\xf7\x7a\x7c\xd4\x45\x3d\x97\x75\x68"
"\xf1\xed\x4c\x42\x0b\x70\xca\x85\xf3\xde\xe5\x88\x2c\xc5\xbe\xb6"
"\x97\x34\xba\x24\x02\xcd\x8b\x86\x9f\xa9\x73\xca\x73\xcf\x92\x81"
"\xee\x75\x55\xbb\x18\x67\x5c\xff\x3f\xb5\xdd\x33\x1b\x0c\xe9\x78"
"\xdb\x5c\xcf\xaa\x5c\x43\x42\xdf\x5e\xa9\x6d\xec\xd7\xd7\xff\xe6"
"\xa1\x3a\x92\x1a\xda\xae\xf6\x8c\x6f\x7b\xd5\xb4\x6e\x06\xe9\x8f"
"\xe8\xde\x09\x31\x89\xed\x0e\x11\xa1\xfa\x8a\xe9\xe9\x64\x59\x62"
"\x53\xda\xd1\x70\xbe\x11\xd4\x99\x97\x11\xcf\x99\xde\x0b\x9d\x94"
"\x7e\xaa\xb8\x52\xea\x37\xdb\x90\x7e\x35\xbd\xd9\xfe\x6d\x0a\x48"
"\x70\x28\xdd\xd5\x0d\x7f\x03\x80\x93\x14\x23\x8f\xb9\x22\xcd\x7c"
"\x29\xfe\xf1\x72\xb5\x5c\x0b\x12\xcf\x9c\x15\xf6\x11\x4c\x7a\x45"
"\x25\x8c\x45\x0a\x34\xac\x2d\x9a\x81\xca\x0b\x13\x22\xcd\xeb\x1a"
"\x38\x88\x18\x97\x96\x08\x81\xaa\xcc\x8f\x0f\x8a\x32\x7b\x76\x68"
"\x03\x68\x43\xbf\x11\xba\x55\x60\xfd\x80\x1c\x0d\x9b\x69\xb6\x09"
"\x72\xbc\x0f\x41\x2f\x07\x82\xc6\xe3\xb2\x13\x91\xc4\x6d\x14\x95"
"\x31\xbe\x19\xbd\xbc\xed\xe1\x4c\x74\xa2\xe0\x78\x0b\xbb\x94\xec"
"\x4c\x53\x3a\xa2\xb5\x84\x1d\x4b\x65\x7e\xdc\xf7\xdb\x36\x7d\xbe"
"\x9e\x3b\x36\x66\x42\x66\x76\x35\xbf\xbe\xf0\xc1\x3c\x7c\xe9\x42"
"\x5c\x24\x53\x03\x05\xa8\x67\x24\x50\x02\x75\xff\x24\x46\x3b\x35"
"\x89\x76\xe6\x70\xda\xc5\x51\x8c\x9a\xe5\x05\xb0\x0b\xd0\x2d\xd4"
"\x7d\x57\x75\x94\x6b\xf9\x0a\xad\x0e\x41\x00\x15\xd0\x4f\xc0\x7f"
"\x90\x2d\x18\x48\x8f\x28\xfe\x5d\xa7\xcd\x99\x9e\xbd\x02\x6c\x8a"
"\x31\xf3\x1c\xc7\x4b\xe6\x93\xcd\x42\xa2\xe4\x68\x10\x47\x9d\xfc"
"\x21\x02\x03\x01\x00\x01\xa3\x5d\x30\x5b\x30\x0c\x06\x03\x55\x1d"
"\x13\x01\x01\xff\x04\x02\x30\x00\x30\x0b\x06\x03\x55\x1d\x0f\x04"
"\x04\x03\x02\x07\x80\x30\x1d\x06\x03\x55\x1d\x0e\x04\x16\x04\x14"
"\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88\x17"
"\x51\x8f\xe3\xdb\x30\x1f\x06\x03\x55\x1d\x23\x04\x18\x30\x16\x80"
"\x14\xf5\x87\x03\xbb\x33\xce\x1b\x73\xee\x02\xec\xcd\xee\x5b\x88"
"\x17\x51\x8f\xe3\xdb\x30\x0d\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01"
"\x01\x0b\x05\x00\x03\x82\x02\x01\x00\xc0\x2e\x12\x41\x7b\x73\x85"
"\x16\xc8\xdb\x86\x79\xe8\xf5\xcd\x44\xf4\xc6\xe2\x81\x23\x5e\x47"
"\xcb\xab\x25\xf1\x1e\x58\x3e\x31\x7f\x78\xad\x85\xeb\xfe\x14\x88"
"\x60\xf7\x7f\xd2\x26\xa2\xf4\x98\x2a\xfd\xba\x05\x0c\x20\x33\x12"
"\xcc\x4d\x14\x61\x64\x81\x93\xd3\x33\xed\xc8\xff\xf1\x78\xcc\x5f"
"\x51\x9f\x09\xd7\xbe\x0d\x5c\x74\xfd\x9b\xdf\x52\x4a\xc9\xa8\x71"
"\x25\x33\x04\x10\x67\x36\xd0\xb3\x0b\xc9\xa1\x40\x72\xae\x41\x7b"
"\x68\xe6\xe4\x7b\xd0\x28\xf7\x6d\xe7\x3f\x50\xfc\x91\x7c\x91\x56"
"\xd4\xdf\xa6\xbb\xe8\x4d\x1b\x58\xaa\x28\xfa\xc1\x19\xeb\x11\x2f"
"\x24\x8b\x7c\xc5\xa9\x86\x26\xaa\x6e\xb7\x9b\xd5\xf8\x06\xfb\x02"
"\x52\x7b\x9c\x9e\xa1\xe0\x07\x8b\x5e\xe4\xb8\x55\x29\xf6\x48\x52"
"\x1c\x1b\x54\x2d\x46\xd8\xe5\x71\xb9\x60\xd1\x45\xb5\x92\x89\x8a"
"\x63\x58\x2a\xb3\xc6\xb2\x76\xe2\x3c\x82\x59\x04\xae\x5a\xc4\x99"
"\x7b\x2e\x4b\x46\x57\xb8\x29\x24\xb2\xfd\xee\x2c\x0d\xa4\x83\xfa"
"\x65\x2a\x07\x35\x8b\x97\xcf\xbd\x96\x2e\xd1\x7e\x6c\xc2\x1e\x87"
"\xb6\x6c\x76\x65\xb5\xb2\x62\xda\x8b\xe9\x73\xe3\xdb\x33\xdd\x13"
"\x3a\x17\x63\x6a\x76\xde\x8d\x8f\xe0\x47\x61\x28\x3a\x83\xff\x8f"
"\xe7\xc7\xe0\x4a\xa3\xe5\x07\xcf\xe9\x8c\x35\x35\x2e\xe7\x80\x66"
"\x31\xbf\x91\x58\x0a\xe1\x25\x3d\x38\xd3\xa4\xf0\x59\x34\x47\x07"
"\x62\x0f\xbe\x30\xdd\x81\x88\x58\xf0\x28\xb0\x96\xe5\x82\xf8\x05"
"\xb7\x13\x01\xbc\xfa\xc6\x1f\x86\x72\xcc\xf9\xee\x8e\xd9\xd6\x04"
"\x8c\x24\x6c\xbf\x0f\x5d\x37\x39\xcf\x45\xc1\x93\x3a\xd2\xed\x5c"
"\x58\x79\x74\x86\x62\x30\x7e\x8e\xbb\xdd\x7a\xa9\xed\xca\x40\xcb"
"\x62\x47\xf4\xb4\x9f\x52\x7f\x72\x63\xa8\xf0\x2b\xaf\x45\x2a\x48"
"\x19\x6d\xe3\xfb\xf9\x19\x66\x69\xc8\xcc\x62\x87\x6c\x53\x2b\x2d"
"\x6e\x90\x6c\x54\x3a\x82\x25\x41\xcb\x18\x6a\xa4\x22\xa8\xa1\xc4"
"\x47\xd7\x81\x00\x1c\x15\x51\x0f\x1a\xaf\xef\x9f\xa6\x61\x8c\xbd"
"\x6b\x8b\xed\xe6\xac\x0e\xb6\x3a\x4c\x92\xe6\x0f\x91\x0a\x0f\x71"
"\xc7\xa0\xb9\x0d\x3a\x17\x5a\x6f\x35\xc8\xe7\x50\x4f\x46\xe8\x70"
"\x60\x48\x06\x82\x8b\x66\x58\xe6\x73\x91\x9c\x12\x3d\x35\x8e\x46"
"\xad\x5a\xf5\xb3\xdb\x69\x21\x04\xfd\xd3\x1c\xdf\x94\x9d\x56\xb0"
"\x0a\xd1\x95\x76\x8d\xec\x9e\xdd\x0b\x15\x97\x64\xad\xe5\xf2\x62"
"\x02\xfc\x9e\x5f\x56\x42\x39\x05\xb3"
};
/*
* Signed data and detached signature blobs that form the verification tests.
*/
static const __initconst u8 certs_selftest_1_data[] = {
"\x54\x68\x69\x73\x20\x69\x73\x20\x73\x6f\x6d\x65\x20\x74\x65\x73"
"\x74\x20\x64\x61\x74\x61\x20\x75\x73\x65\x64\x20\x66\x6f\x72\x20"
"\x73\x65\x6c\x66\x2d\x74\x65\x73\x74\x69\x6e\x67\x20\x63\x65\x72"
"\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69\x66\x69\x63"
"\x61\x74\x69\x6f\x6e\x2e\x0a"
};
static const __initconst u8 certs_selftest_1_pkcs7[] = {
"\x30\x82\x02\xab\x06\x09\x2a\x86\x48\x86\xf7\x0d\x01\x07\x02\xa0"
"\x82\x02\x9c\x30\x82\x02\x98\x02\x01\x01\x31\x0d\x30\x0b\x06\x09"
"\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0b\x06\x09\x2a\x86\x48"
"\x86\xf7\x0d\x01\x07\x01\x31\x82\x02\x75\x30\x82\x02\x71\x02\x01"
"\x01\x30\x4c\x30\x34\x31\x32\x30\x30\x06\x03\x55\x04\x03\x0c\x29"
"\x43\x65\x72\x74\x69\x66\x69\x63\x61\x74\x65\x20\x76\x65\x72\x69"
"\x66\x69\x63\x61\x74\x69\x6f\x6e\x20\x73\x65\x6c\x66\x2d\x74\x65"
"\x73\x74\x69\x6e\x67\x20\x6b\x65\x79\x02\x14\x73\x98\xea\x98\x2d"
"\xd0\x2e\xa8\xb1\xcf\x57\xc7\xf2\x97\xb3\xe6\x1a\xfc\x8c\x0a\x30"
"\x0b\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x30\x0d\x06\x09"
"\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x00\xac"
"\xb0\xf2\x07\xd6\x99\x6d\xc0\xc0\xd9\x8d\x31\x0d\x7e\x04\xeb\xc3"
"\x88\x90\xc4\x58\x46\xd4\xe2\xa0\xa3\x25\xe3\x04\x50\x37\x85\x8c"
"\x91\xc6\xfc\xc5\xd4\x92\xfd\x05\xd8\xb8\xa3\xb8\xba\x89\x13\x00"
"\x88\x79\x99\x51\x6b\x5b\x28\x31\xc0\xb3\x1b\x7a\x68\x2c\x00\xdb"
"\x4b\x46\x11\xf3\xfa\x50\x8e\x19\x89\xa2\x4c\xda\x4c\x89\x01\x11"
"\x89\xee\xd3\xc8\xc1\xe7\xa7\xf6\xb2\xa2\xf8\x65\xb8\x35\x20\x33"
"\xba\x12\x62\xd5\xbd\xaa\x71\xe5\x5b\xc0\x6a\x32\xff\x6a\x2e\x23"
"\xef\x2b\xb6\x58\xb1\xfb\x5f\x82\x34\x40\x6d\x9f\xbc\x27\xac\x37"
"\x23\x99\xcf\x7d\x20\xb2\x39\x01\xc0\x12\xce\xd7\x5d\x2f\xb6\xab"
"\xb5\x56\x4f\xef\xf4\x72\x07\x58\x65\xa9\xeb\x1f\x75\x1c\x5f\x0c"
"\x88\xe0\xa4\xe2\xcd\x73\x2b\x9e\xb2\x05\x7e\x12\xf8\xd0\x66\x41"
"\xcc\x12\x63\xd4\xd6\xac\x9b\x1d\x14\x77\x8d\x1c\x57\xd5\x27\xc6"
"\x49\xa2\x41\x43\xf3\x59\x29\xe5\xcb\xd1\x75\xbc\x3a\x97\x2a\x72"
"\x22\x66\xc5\x3b\xc1\xba\xfc\x53\x18\x98\xe2\x21\x64\xc6\x52\x87"
"\x13\xd5\x7c\x42\xe8\xfb\x9c\x9a\x45\x32\xd5\xa5\x22\x62\x9d\xd4"
"\xcb\xa4\xfa\x77\xbb\x50\x24\x0b\x8b\x88\x99\x15\x56\xa9\x1e\x92"
"\xbf\x5d\x94\x77\xb6\xf1\x67\x01\x60\x06\x58\x5c\xdf\x18\x52\x79"
"\x37\x30\x93\x7d\x87\x04\xf1\xe0\x55\x59\x52\xf3\xc2\xb1\x1c\x5b"
"\x12\x7c\x49\x87\xfb\xf7\xed\xdd\x95\x71\xec\x4b\x1a\x85\x08\xb0"
"\xa0\x36\xc4\x7b\xab\x40\xe0\xf1\x98\xcc\xaf\x19\x40\x8f\x47\x6f"
"\xf0\x6c\x84\x29\x7f\x7f\x04\x46\xcb\x08\x0f\xe0\xc1\xc9\x70\x6e"
"\x95\x3b\xa4\xbc\x29\x2b\x53\x67\x45\x1b\x0d\xbc\x13\xa5\x76\x31"
"\xaf\xb9\xd0\xe0\x60\x12\xd2\xf4\xb7\x7c\x58\x7e\xf6\x2d\xbb\x24"
"\x14\x5a\x20\x24\xa8\x12\xdf\x25\xbd\x42\xce\x96\x7c\x2e\xba\x14"
"\x1b\x81\x9f\x18\x45\xa4\xc6\x70\x3e\x0e\xf0\xd3\x7b\x9c\x10\xbe"
"\xb8\x7a\x89\xc5\x9e\xd9\x97\xdf\xd7\xe7\xc6\x1d\xc0\x20\x6c\xb8"
"\x1e\x3a\x63\xb8\x39\x8e\x8e\x62\xd5\xd2\xb4\xcd\xff\x46\xfc\x8e"
"\xec\x07\x35\x0c\xff\xb0\x05\xe6\xf4\xe5\xfe\xa2\xe3\x0a\xe6\x36"
"\xa7\x4a\x7e\x62\x1d\xc4\x50\x39\x35\x4e\x28\xcb\x4a\xfb\x9d\xdb"
"\xdd\x23\xd6\x53\xb1\x74\x77\x12\xf7\x9c\xf0\x9a\x6b\xf7\xa9\x64"
"\x2d\x86\x21\x2a\xcf\xc6\x54\xf5\xc9\xad\xfa\xb5\x12\xb4\xf3\x51"
"\x77\x55\x3c\x6f\x0c\x32\xd3\x8c\x44\x39\x71\x25\xfe\x96\xd2"
};
/*
* List of tests to be run.
*/
#define TEST(data, pkcs7) { data, sizeof(data) - 1, pkcs7, sizeof(pkcs7) - 1 }
static const struct certs_test certs_tests[] __initconst = {
TEST(certs_selftest_1_data, certs_selftest_1_pkcs7),
};
int __init fips_signature_selftest(void)
{
struct key *keyring;
int ret, i;
pr_notice("Running certificate verification selftests\n");
keyring = keyring_alloc(".certs_selftest",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ |
KEY_USR_SEARCH,
KEY_ALLOC_NOT_IN_QUOTA,
NULL, NULL);
if (IS_ERR(keyring))
panic("Can't allocate certs selftest keyring: %ld\n",
PTR_ERR(keyring));
ret = x509_load_certificate_list(certs_selftest_keys,
sizeof(certs_selftest_keys) - 1, keyring);
if (ret < 0)
panic("Can't allocate certs selftest keyring: %d\n", ret);
for (i = 0; i < ARRAY_SIZE(certs_tests); i++) {
const struct certs_test *test = &certs_tests[i];
struct pkcs7_message *pkcs7;
pkcs7 = pkcs7_parse_message(test->pkcs7, test->pkcs7_len);
if (IS_ERR(pkcs7))
panic("Certs selftest %d: pkcs7_parse_message() = %d\n", i, ret);
pkcs7_supply_detached_data(pkcs7, test->data, test->data_len);
ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
if (ret < 0)
panic("Certs selftest %d: pkcs7_verify() = %d\n", i, ret);
ret = pkcs7_validate_trust(pkcs7, keyring);
if (ret < 0)
panic("Certs selftest %d: pkcs7_validate_trust() = %d\n", i, ret);
pkcs7_free_message(pkcs7);
}
key_put(keyring);
return 0;
}
| linux-master | crypto/asymmetric_keys/selftest.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Parse a Microsoft Individual Code Signing blob
*
* Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "MSCODE: "fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/oid_registry.h>
#include <crypto/pkcs7.h>
#include "verify_pefile.h"
#include "mscode.asn1.h"
/*
* Parse a Microsoft Individual Code Signing blob
*/
int mscode_parse(void *_ctx, const void *content_data, size_t data_len,
size_t asn1hdrlen)
{
struct pefile_context *ctx = _ctx;
content_data -= asn1hdrlen;
data_len += asn1hdrlen;
pr_devel("Data: %zu [%*ph]\n", data_len, (unsigned)(data_len),
content_data);
return asn1_ber_decoder(&mscode_decoder, ctx, content_data, data_len);
}
/*
* Check the content type OID
*/
int mscode_note_content_type(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
enum OID oid;
oid = look_up_OID(value, vlen);
if (oid == OID__NR) {
char buffer[50];
sprint_oid(value, vlen, buffer, sizeof(buffer));
pr_err("Unknown OID: %s\n", buffer);
return -EBADMSG;
}
/*
* pesign utility had a bug where it was putting
* OID_msIndividualSPKeyPurpose instead of OID_msPeImageDataObjId
* So allow both OIDs.
*/
if (oid != OID_msPeImageDataObjId &&
oid != OID_msIndividualSPKeyPurpose) {
pr_err("Unexpected content type OID %u\n", oid);
return -EBADMSG;
}
return 0;
}
/*
* Note the digest algorithm OID
*/
int mscode_note_digest_algo(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pefile_context *ctx = context;
char buffer[50];
enum OID oid;
oid = look_up_OID(value, vlen);
switch (oid) {
case OID_md4:
ctx->digest_algo = "md4";
break;
case OID_md5:
ctx->digest_algo = "md5";
break;
case OID_sha1:
ctx->digest_algo = "sha1";
break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
case OID_sha384:
ctx->digest_algo = "sha384";
break;
case OID_sha512:
ctx->digest_algo = "sha512";
break;
case OID_sha224:
ctx->digest_algo = "sha224";
break;
case OID__NR:
sprint_oid(value, vlen, buffer, sizeof(buffer));
pr_err("Unknown OID: %s\n", buffer);
return -EBADMSG;
default:
pr_err("Unsupported content type: %u\n", oid);
return -ENOPKG;
}
return 0;
}
/*
* Note the digest we're guaranteeing with this certificate
*/
int mscode_note_digest(void *context, size_t hdrlen,
unsigned char tag,
const void *value, size_t vlen)
{
struct pefile_context *ctx = context;
ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
if (!ctx->digest)
return -ENOMEM;
ctx->digest_len = vlen;
return 0;
}
| linux-master | crypto/asymmetric_keys/mscode_parser.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Instantiate a public key crypto key from an X.509 Certificate
*
* Copyright (C) 2012, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "ASYM: "fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <crypto/public_key.h>
#include "asymmetric_keys.h"
static bool use_builtin_keys;
static struct asymmetric_key_id *ca_keyid;
#ifndef MODULE
static struct {
struct asymmetric_key_id id;
unsigned char data[10];
} cakey;
static int __init ca_keys_setup(char *str)
{
if (!str) /* default system keyring */
return 1;
if (strncmp(str, "id:", 3) == 0) {
struct asymmetric_key_id *p = &cakey.id;
size_t hexlen = (strlen(str) - 3) / 2;
int ret;
if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
pr_err("Missing or invalid ca_keys id\n");
return 1;
}
ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
if (ret < 0)
pr_err("Unparsable ca_keys id hex string\n");
else
ca_keyid = p; /* owner key 'id:xxxxxx' */
} else if (strcmp(str, "builtin") == 0) {
use_builtin_keys = true;
}
return 1;
}
__setup("ca_keys=", ca_keys_setup);
#endif
/**
* restrict_link_by_signature - Restrict additions to a ring of public keys
* @dest_keyring: Keyring being linked to.
* @type: The type of key being added.
* @payload: The payload of the new key.
* @trust_keyring: A ring of keys that can be used to vouch for the new cert.
*
* Check the new certificate against the ones in the trust keyring. If one of
* those is the signing key and validates the new certificate, then mark the
* new certificate as being trusted.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we couldn't find a
* matching parent certificate in the trusted list, -EKEYREJECTED if the
* signature check fails or the key is blacklisted, -ENOPKG if the signature
* uses unsupported crypto, or some other error if there is a matching
* certificate but the signature check cannot be performed.
*/
int restrict_link_by_signature(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trust_keyring)
{
const struct public_key_signature *sig;
struct key *key;
int ret;
pr_devel("==>%s()\n", __func__);
if (!trust_keyring)
return -ENOKEY;
if (type != &key_type_asymmetric)
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
if (!sig)
return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2])
return -ENOKEY;
if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
return -EPERM;
/* See if we have a key that signed this one. */
key = find_asymmetric_key(trust_keyring,
sig->auth_ids[0], sig->auth_ids[1],
sig->auth_ids[2], false);
if (IS_ERR(key))
return -ENOKEY;
if (use_builtin_keys && !test_bit(KEY_FLAG_BUILTIN, &key->flags))
ret = -ENOKEY;
else
ret = verify_signature(key, sig);
key_put(key);
return ret;
}
/**
* restrict_link_by_ca - Restrict additions to a ring of CA keys
* @dest_keyring: Keyring being linked to.
* @type: The type of key being added.
* @payload: The payload of the new key.
* @trust_keyring: Unused.
*
* Check if the new certificate is a CA. If it is a CA, then mark the new
* certificate as being ok to link.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if the
* certificate is not a CA. -ENOPKG if the signature uses unsupported
* crypto, or some other error if there is a matching certificate but
* the signature check cannot be performed.
*/
int restrict_link_by_ca(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trust_keyring)
{
const struct public_key *pkey;
if (type != &key_type_asymmetric)
return -EOPNOTSUPP;
pkey = payload->data[asym_crypto];
if (!pkey)
return -ENOPKG;
if (!test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
return -ENOKEY;
if (!test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
return -ENOKEY;
if (!IS_ENABLED(CONFIG_INTEGRITY_CA_MACHINE_KEYRING_MAX))
return 0;
if (test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
return -ENOKEY;
return 0;
}
/**
* restrict_link_by_digsig - Restrict additions to a ring of digsig keys
* @dest_keyring: Keyring being linked to.
* @type: The type of key being added.
* @payload: The payload of the new key.
* @trust_keyring: A ring of keys that can be used to vouch for the new cert.
*
* Check if the new certificate has digitalSignature usage set. If it is,
* then mark the new certificate as being ok to link. Afterwards verify
* the new certificate against the ones in the trust_keyring.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if the
* certificate is not a digsig. -ENOPKG if the signature uses unsupported
* crypto, or some other error if there is a matching certificate but
* the signature check cannot be performed.
*/
int restrict_link_by_digsig(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trust_keyring)
{
const struct public_key *pkey;
if (type != &key_type_asymmetric)
return -EOPNOTSUPP;
pkey = payload->data[asym_crypto];
if (!pkey)
return -ENOPKG;
if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
return -ENOKEY;
if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
return -ENOKEY;
if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
return -ENOKEY;
return restrict_link_by_signature(dest_keyring, type, payload,
trust_keyring);
}
static bool match_either_id(const struct asymmetric_key_id **pair,
const struct asymmetric_key_id *single)
{
return (asymmetric_key_id_same(pair[0], single) ||
asymmetric_key_id_same(pair[1], single));
}
static int key_or_keyring_common(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trusted, bool check_dest)
{
const struct public_key_signature *sig;
struct key *key = NULL;
int ret;
pr_devel("==>%s()\n", __func__);
if (!dest_keyring)
return -ENOKEY;
else if (dest_keyring->type != &key_type_keyring)
return -EOPNOTSUPP;
if (!trusted && !check_dest)
return -ENOKEY;
if (type != &key_type_asymmetric)
return -EOPNOTSUPP;
sig = payload->data[asym_auth];
if (!sig)
return -ENOPKG;
if (!sig->auth_ids[0] && !sig->auth_ids[1] && !sig->auth_ids[2])
return -ENOKEY;
if (trusted) {
if (trusted->type == &key_type_keyring) {
/* See if we have a key that signed this one. */
key = find_asymmetric_key(trusted, sig->auth_ids[0],
sig->auth_ids[1],
sig->auth_ids[2], false);
if (IS_ERR(key))
key = NULL;
} else if (trusted->type == &key_type_asymmetric) {
const struct asymmetric_key_id **signer_ids;
signer_ids = (const struct asymmetric_key_id **)
asymmetric_key_ids(trusted)->id;
/*
* The auth_ids come from the candidate key (the
* one that is being considered for addition to
* dest_keyring) and identify the key that was
* used to sign.
*
* The signer_ids are identifiers for the
* signing key specified for dest_keyring.
*
* The first auth_id is the preferred id, 2nd and
* 3rd are the fallbacks. If exactly one of
* auth_ids[0] and auth_ids[1] is present, it may
* match either signer_ids[0] or signed_ids[1].
* If both are present the first one may match
* either signed_id but the second one must match
* the second signer_id. If neither of them is
* available, auth_ids[2] is matched against
* signer_ids[2] as a fallback.
*/
if (!sig->auth_ids[0] && !sig->auth_ids[1]) {
if (asymmetric_key_id_same(signer_ids[2],
sig->auth_ids[2]))
key = __key_get(trusted);
} else if (!sig->auth_ids[0] || !sig->auth_ids[1]) {
const struct asymmetric_key_id *auth_id;
auth_id = sig->auth_ids[0] ?: sig->auth_ids[1];
if (match_either_id(signer_ids, auth_id))
key = __key_get(trusted);
} else if (asymmetric_key_id_same(signer_ids[1],
sig->auth_ids[1]) &&
match_either_id(signer_ids,
sig->auth_ids[0])) {
key = __key_get(trusted);
}
} else {
return -EOPNOTSUPP;
}
}
if (check_dest && !key) {
/* See if the destination has a key that signed this one. */
key = find_asymmetric_key(dest_keyring, sig->auth_ids[0],
sig->auth_ids[1], sig->auth_ids[2],
false);
if (IS_ERR(key))
key = NULL;
}
if (!key)
return -ENOKEY;
ret = key_validate(key);
if (ret == 0)
ret = verify_signature(key, sig);
key_put(key);
return ret;
}
/**
* restrict_link_by_key_or_keyring - Restrict additions to a ring of public
* keys using the restrict_key information stored in the ring.
* @dest_keyring: Keyring being linked to.
* @type: The type of key being added.
* @payload: The payload of the new key.
* @trusted: A key or ring of keys that can be used to vouch for the new cert.
*
* Check the new certificate only against the key or keys passed in the data
* parameter. If one of those is the signing key and validates the new
* certificate, then mark the new certificate as being ok to link.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
* -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
* unsupported crypto, or some other error if there is a matching certificate
* but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trusted)
{
return key_or_keyring_common(dest_keyring, type, payload, trusted,
false);
}
/**
* restrict_link_by_key_or_keyring_chain - Restrict additions to a ring of
* public keys using the restrict_key information stored in the ring.
* @dest_keyring: Keyring being linked to.
* @type: The type of key being added.
* @payload: The payload of the new key.
* @trusted: A key or ring of keys that can be used to vouch for the new cert.
*
* Check the new certificate against the key or keys passed in the data
* parameter and against the keys already linked to the destination keyring. If
* one of those is the signing key and validates the new certificate, then mark
* the new certificate as being ok to link.
*
* Returns 0 if the new certificate was accepted, -ENOKEY if we
* couldn't find a matching parent certificate in the trusted list,
* -EKEYREJECTED if the signature check fails, -ENOPKG if the signature uses
* unsupported crypto, or some other error if there is a matching certificate
* but the signature check cannot be performed.
*/
int restrict_link_by_key_or_keyring_chain(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trusted)
{
return key_or_keyring_common(dest_keyring, type, payload, trusted,
true);
}
| linux-master | crypto/asymmetric_keys/restrict.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Parse a signed PE binary
*
* Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PEFILE: "fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/pe.h>
#include <linux/asn1.h>
#include <linux/verification.h>
#include <crypto/hash.h>
#include "verify_pefile.h"
/*
* Parse a PE binary.
*/
static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
struct pefile_context *ctx)
{
const struct mz_hdr *mz = pebuf;
const struct pe_hdr *pe;
const struct pe32_opt_hdr *pe32;
const struct pe32plus_opt_hdr *pe64;
const struct data_directory *ddir;
const struct data_dirent *dde;
const struct section_header *secs, *sec;
size_t cursor, datalen = pelen;
kenter("");
#define chkaddr(base, x, s) \
do { \
if ((x) < base || (s) >= datalen || (x) > datalen - (s)) \
return -ELIBBAD; \
} while (0)
chkaddr(0, 0, sizeof(*mz));
if (mz->magic != MZ_MAGIC)
return -ELIBBAD;
cursor = sizeof(*mz);
chkaddr(cursor, mz->peaddr, sizeof(*pe));
pe = pebuf + mz->peaddr;
if (pe->magic != PE_MAGIC)
return -ELIBBAD;
cursor = mz->peaddr + sizeof(*pe);
chkaddr(0, cursor, sizeof(pe32->magic));
pe32 = pebuf + cursor;
pe64 = pebuf + cursor;
switch (pe32->magic) {
case PE_OPT_MAGIC_PE32:
chkaddr(0, cursor, sizeof(*pe32));
ctx->image_checksum_offset =
(unsigned long)&pe32->csum - (unsigned long)pebuf;
ctx->header_size = pe32->header_size;
cursor += sizeof(*pe32);
ctx->n_data_dirents = pe32->data_dirs;
break;
case PE_OPT_MAGIC_PE32PLUS:
chkaddr(0, cursor, sizeof(*pe64));
ctx->image_checksum_offset =
(unsigned long)&pe64->csum - (unsigned long)pebuf;
ctx->header_size = pe64->header_size;
cursor += sizeof(*pe64);
ctx->n_data_dirents = pe64->data_dirs;
break;
default:
pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
return -ELIBBAD;
}
pr_debug("checksum @ %x\n", ctx->image_checksum_offset);
pr_debug("header size = %x\n", ctx->header_size);
if (cursor >= ctx->header_size || ctx->header_size >= datalen)
return -ELIBBAD;
if (ctx->n_data_dirents > (ctx->header_size - cursor) / sizeof(*dde))
return -ELIBBAD;
ddir = pebuf + cursor;
cursor += sizeof(*dde) * ctx->n_data_dirents;
ctx->cert_dirent_offset =
(unsigned long)&ddir->certs - (unsigned long)pebuf;
ctx->certs_size = ddir->certs.size;
if (!ddir->certs.virtual_address || !ddir->certs.size) {
pr_warn("Unsigned PE binary\n");
return -ENODATA;
}
chkaddr(ctx->header_size, ddir->certs.virtual_address,
ddir->certs.size);
ctx->sig_offset = ddir->certs.virtual_address;
ctx->sig_len = ddir->certs.size;
pr_debug("cert = %x @%x [%*ph]\n",
ctx->sig_len, ctx->sig_offset,
ctx->sig_len, pebuf + ctx->sig_offset);
ctx->n_sections = pe->sections;
if (ctx->n_sections > (ctx->header_size - cursor) / sizeof(*sec))
return -ELIBBAD;
ctx->secs = secs = pebuf + cursor;
return 0;
}
/*
* Check and strip the PE wrapper from around the signature and check that the
* remnant looks something like PKCS#7.
*/
static int pefile_strip_sig_wrapper(const void *pebuf,
struct pefile_context *ctx)
{
struct win_certificate wrapper;
const u8 *pkcs7;
unsigned len;
if (ctx->sig_len < sizeof(wrapper)) {
pr_warn("Signature wrapper too short\n");
return -ELIBBAD;
}
memcpy(&wrapper, pebuf + ctx->sig_offset, sizeof(wrapper));
pr_debug("sig wrapper = { %x, %x, %x }\n",
wrapper.length, wrapper.revision, wrapper.cert_type);
/* sbsign rounds up the length of certificate table (in optional
* header data directories) to 8 byte alignment. However, the PE
* specification states that while entries are 8-byte aligned, this is
* not included in their length, and as a result, pesign has not
* rounded up since 0.110.
*/
if (wrapper.length > ctx->sig_len) {
pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
ctx->sig_len, wrapper.length);
return -ELIBBAD;
}
if (wrapper.revision != WIN_CERT_REVISION_2_0) {
pr_warn("Signature is not revision 2.0\n");
return -ENOTSUPP;
}
if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
pr_warn("Signature certificate type is not PKCS\n");
return -ENOTSUPP;
}
/* It looks like the pkcs signature length in wrapper->length and the
* size obtained from the data dir entries, which lists the total size
* of certificate table, are both aligned to an octaword boundary, so
* we may have to deal with some padding.
*/
ctx->sig_len = wrapper.length;
ctx->sig_offset += sizeof(wrapper);
ctx->sig_len -= sizeof(wrapper);
if (ctx->sig_len < 4) {
pr_warn("Signature data missing\n");
return -EKEYREJECTED;
}
/* What's left should be a PKCS#7 cert */
pkcs7 = pebuf + ctx->sig_offset;
if (pkcs7[0] != (ASN1_CONS_BIT | ASN1_SEQ))
goto not_pkcs7;
switch (pkcs7[1]) {
case 0 ... 0x7f:
len = pkcs7[1] + 2;
goto check_len;
case ASN1_INDEFINITE_LENGTH:
return 0;
case 0x81:
len = pkcs7[2] + 3;
goto check_len;
case 0x82:
len = ((pkcs7[2] << 8) | pkcs7[3]) + 4;
goto check_len;
case 0x83 ... 0xff:
return -EMSGSIZE;
default:
goto not_pkcs7;
}
check_len:
if (len <= ctx->sig_len) {
/* There may be padding */
ctx->sig_len = len;
return 0;
}
not_pkcs7:
pr_warn("Signature data not PKCS#7\n");
return -ELIBBAD;
}
/*
* Compare two sections for canonicalisation.
*/
static int pefile_compare_shdrs(const void *a, const void *b)
{
const struct section_header *shdra = a;
const struct section_header *shdrb = b;
int rc;
if (shdra->data_addr > shdrb->data_addr)
return 1;
if (shdrb->data_addr > shdra->data_addr)
return -1;
if (shdra->virtual_address > shdrb->virtual_address)
return 1;
if (shdrb->virtual_address > shdra->virtual_address)
return -1;
rc = strcmp(shdra->name, shdrb->name);
if (rc != 0)
return rc;
if (shdra->virtual_size > shdrb->virtual_size)
return 1;
if (shdrb->virtual_size > shdra->virtual_size)
return -1;
if (shdra->raw_data_size > shdrb->raw_data_size)
return 1;
if (shdrb->raw_data_size > shdra->raw_data_size)
return -1;
return 0;
}
/*
* Load the contents of the PE binary into the digest, leaving out the image
* checksum and the certificate data block.
*/
static int pefile_digest_pe_contents(const void *pebuf, unsigned int pelen,
struct pefile_context *ctx,
struct shash_desc *desc)
{
unsigned *canon, tmp, loop, i, hashed_bytes;
int ret;
/* Digest the header and data directory, but leave out the image
* checksum and the data dirent for the signature.
*/
ret = crypto_shash_update(desc, pebuf, ctx->image_checksum_offset);
if (ret < 0)
return ret;
tmp = ctx->image_checksum_offset + sizeof(uint32_t);
ret = crypto_shash_update(desc, pebuf + tmp,
ctx->cert_dirent_offset - tmp);
if (ret < 0)
return ret;
tmp = ctx->cert_dirent_offset + sizeof(struct data_dirent);
ret = crypto_shash_update(desc, pebuf + tmp, ctx->header_size - tmp);
if (ret < 0)
return ret;
canon = kcalloc(ctx->n_sections, sizeof(unsigned), GFP_KERNEL);
if (!canon)
return -ENOMEM;
/* We have to canonicalise the section table, so we perform an
* insertion sort.
*/
canon[0] = 0;
for (loop = 1; loop < ctx->n_sections; loop++) {
for (i = 0; i < loop; i++) {
if (pefile_compare_shdrs(&ctx->secs[canon[i]],
&ctx->secs[loop]) > 0) {
memmove(&canon[i + 1], &canon[i],
(loop - i) * sizeof(canon[0]));
break;
}
}
canon[i] = loop;
}
hashed_bytes = ctx->header_size;
for (loop = 0; loop < ctx->n_sections; loop++) {
i = canon[loop];
if (ctx->secs[i].raw_data_size == 0)
continue;
ret = crypto_shash_update(desc,
pebuf + ctx->secs[i].data_addr,
ctx->secs[i].raw_data_size);
if (ret < 0) {
kfree(canon);
return ret;
}
hashed_bytes += ctx->secs[i].raw_data_size;
}
kfree(canon);
if (pelen > hashed_bytes) {
tmp = hashed_bytes + ctx->certs_size;
ret = crypto_shash_update(desc,
pebuf + hashed_bytes,
pelen - tmp);
if (ret < 0)
return ret;
}
return 0;
}
/*
* Digest the contents of the PE binary, leaving out the image checksum and the
* certificate data block.
*/
static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
struct pefile_context *ctx)
{
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t digest_size, desc_size;
void *digest;
int ret;
kenter(",%s", ctx->digest_algo);
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(ctx->digest_algo, 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
digest_size = crypto_shash_digestsize(tfm);
if (digest_size != ctx->digest_len) {
pr_warn("Digest size mismatch (%zx != %x)\n",
digest_size, ctx->digest_len);
ret = -EBADMSG;
goto error_no_desc;
}
pr_debug("Digest: desc=%zu size=%zu\n", desc_size, digest_size);
ret = -ENOMEM;
desc = kzalloc(desc_size + digest_size, GFP_KERNEL);
if (!desc)
goto error_no_desc;
desc->tfm = tfm;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
ret = pefile_digest_pe_contents(pebuf, pelen, ctx, desc);
if (ret < 0)
goto error;
digest = (void *)desc + desc_size;
ret = crypto_shash_final(desc, digest);
if (ret < 0)
goto error;
pr_debug("Digest calc = [%*ph]\n", ctx->digest_len, digest);
/* Check that the PE file digest matches that in the MSCODE part of the
* PKCS#7 certificate.
*/
if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
pr_warn("Digest mismatch\n");
ret = -EKEYREJECTED;
} else {
pr_debug("The digests match!\n");
}
error:
kfree_sensitive(desc);
error_no_desc:
crypto_free_shash(tfm);
kleave(" = %d", ret);
return ret;
}
/**
* verify_pefile_signature - Verify the signature on a PE binary image
* @pebuf: Buffer containing the PE binary image
* @pelen: Length of the binary image
* @trusted_keys: Signing certificate(s) to use as starting points
* @usage: The use to which the key is being put.
*
* Validate that the certificate chain inside the PKCS#7 message inside the PE
* binary image intersects keys we already know and trust.
*
* Returns, in order of descending priority:
*
* (*) -ELIBBAD if the image cannot be parsed, or:
*
* (*) -EKEYREJECTED if a signature failed to match for which we have a valid
* key, or:
*
* (*) 0 if at least one signature chain intersects with the keys in the trust
* keyring, or:
*
* (*) -ENODATA if there is no signature present.
*
* (*) -ENOPKG if a suitable crypto module couldn't be found for a check on a
* chain.
*
* (*) -ENOKEY if we couldn't find a match for any of the signature chains in
* the message.
*
* May also return -ENOMEM.
*/
int verify_pefile_signature(const void *pebuf, unsigned pelen,
struct key *trusted_keys,
enum key_being_used_for usage)
{
struct pefile_context ctx;
int ret;
kenter("");
memset(&ctx, 0, sizeof(ctx));
ret = pefile_parse_binary(pebuf, pelen, &ctx);
if (ret < 0)
return ret;
ret = pefile_strip_sig_wrapper(pebuf, &ctx);
if (ret < 0)
return ret;
ret = verify_pkcs7_signature(NULL, 0,
pebuf + ctx.sig_offset, ctx.sig_len,
trusted_keys, usage,
mscode_parse, &ctx);
if (ret < 0)
goto error;
pr_debug("Digest: %u [%*ph]\n",
ctx.digest_len, ctx.digest_len, ctx.digest);
/* Generate the digest and check against the PKCS7 certificate
* contents.
*/
ret = pefile_digest_pe(pebuf, pelen, &ctx);
error:
kfree_sensitive(ctx.digest);
return ret;
}
| linux-master | crypto/asymmetric_keys/verify_pefile.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Asymmetric public-key cryptography key type
*
* See Documentation/crypto/asymmetric-keys.rst
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#include <keys/asymmetric-subtype.h>
#include <keys/asymmetric-parser.h>
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
#include <keys/user-type.h>
#include "asymmetric_keys.h"
const char *const key_being_used_for[NR__KEY_BEING_USED_FOR] = {
[VERIFYING_MODULE_SIGNATURE] = "mod sig",
[VERIFYING_FIRMWARE_SIGNATURE] = "firmware sig",
[VERIFYING_KEXEC_PE_SIGNATURE] = "kexec PE sig",
[VERIFYING_KEY_SIGNATURE] = "key sig",
[VERIFYING_KEY_SELF_SIGNATURE] = "key self sig",
[VERIFYING_UNSPECIFIED_SIGNATURE] = "unspec sig",
};
EXPORT_SYMBOL_GPL(key_being_used_for);
static LIST_HEAD(asymmetric_key_parsers);
static DECLARE_RWSEM(asymmetric_key_parsers_sem);
/**
* find_asymmetric_key - Find a key by ID.
* @keyring: The keys to search.
* @id_0: The first ID to look for or NULL.
* @id_1: The second ID to look for or NULL, matched together with @id_0
* against @keyring keys' id[0] and id[1].
* @id_2: The fallback ID to match against @keyring keys' id[2] if both of the
* other IDs are NULL.
* @partial: Use partial match for @id_0 and @id_1 if true, exact if false.
*
* Find a key in the given keyring by identifier. The preferred identifier is
* the id_0 and the fallback identifier is the id_1. If both are given, the
* former is matched (exactly or partially) against either of the sought key's
* identifiers and the latter must match the found key's second identifier
* exactly. If both are missing, id_2 must match the sought key's third
* identifier exactly.
*/
struct key *find_asymmetric_key(struct key *keyring,
const struct asymmetric_key_id *id_0,
const struct asymmetric_key_id *id_1,
const struct asymmetric_key_id *id_2,
bool partial)
{
struct key *key;
key_ref_t ref;
const char *lookup;
char *req, *p;
int len;
WARN_ON(!id_0 && !id_1 && !id_2);
if (id_0) {
lookup = id_0->data;
len = id_0->len;
} else if (id_1) {
lookup = id_1->data;
len = id_1->len;
} else {
lookup = id_2->data;
len = id_2->len;
}
/* Construct an identifier "id:<keyid>". */
p = req = kmalloc(2 + 1 + len * 2 + 1, GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
if (!id_0 && !id_1) {
*p++ = 'd';
*p++ = 'n';
} else if (partial) {
*p++ = 'i';
*p++ = 'd';
} else {
*p++ = 'e';
*p++ = 'x';
}
*p++ = ':';
p = bin2hex(p, lookup, len);
*p = 0;
pr_debug("Look up: \"%s\"\n", req);
ref = keyring_search(make_key_ref(keyring, 1),
&key_type_asymmetric, req, true);
if (IS_ERR(ref))
pr_debug("Request for key '%s' err %ld\n", req, PTR_ERR(ref));
kfree(req);
if (IS_ERR(ref)) {
switch (PTR_ERR(ref)) {
/* Hide some search errors */
case -EACCES:
case -ENOTDIR:
case -EAGAIN:
return ERR_PTR(-ENOKEY);
default:
return ERR_CAST(ref);
}
}
key = key_ref_to_ptr(ref);
if (id_0 && id_1) {
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
if (!kids->id[1]) {
pr_debug("First ID matches, but second is missing\n");
goto reject;
}
if (!asymmetric_key_id_same(id_1, kids->id[1])) {
pr_debug("First ID matches, but second does not\n");
goto reject;
}
}
pr_devel("<==%s() = 0 [%x]\n", __func__, key_serial(key));
return key;
reject:
key_put(key);
return ERR_PTR(-EKEYREJECTED);
}
EXPORT_SYMBOL_GPL(find_asymmetric_key);
/**
* asymmetric_key_generate_id: Construct an asymmetric key ID
* @val_1: First binary blob
* @len_1: Length of first binary blob
* @val_2: Second binary blob
* @len_2: Length of second binary blob
*
* Construct an asymmetric key ID from a pair of binary blobs.
*/
struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_1,
const void *val_2,
size_t len_2)
{
struct asymmetric_key_id *kid;
kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
kid->len = len_1 + len_2;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;
}
EXPORT_SYMBOL_GPL(asymmetric_key_generate_id);
/**
* asymmetric_key_id_same - Return true if two asymmetric keys IDs are the same.
* @kid1: The key ID to compare
* @kid2: The key ID to compare
*/
bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
const struct asymmetric_key_id *kid2)
{
if (!kid1 || !kid2)
return false;
if (kid1->len != kid2->len)
return false;
return memcmp(kid1->data, kid2->data, kid1->len) == 0;
}
EXPORT_SYMBOL_GPL(asymmetric_key_id_same);
/**
* asymmetric_key_id_partial - Return true if two asymmetric keys IDs
* partially match
* @kid1: The key ID to compare
* @kid2: The key ID to compare
*/
bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
const struct asymmetric_key_id *kid2)
{
if (!kid1 || !kid2)
return false;
if (kid1->len < kid2->len)
return false;
return memcmp(kid1->data + (kid1->len - kid2->len),
kid2->data, kid2->len) == 0;
}
EXPORT_SYMBOL_GPL(asymmetric_key_id_partial);
/**
* asymmetric_match_key_ids - Search asymmetric key IDs 1 & 2
* @kids: The pair of key IDs to check
* @match_id: The key ID we're looking for
* @match: The match function to use
*/
static bool asymmetric_match_key_ids(
const struct asymmetric_key_ids *kids,
const struct asymmetric_key_id *match_id,
bool (*match)(const struct asymmetric_key_id *kid1,
const struct asymmetric_key_id *kid2))
{
int i;
if (!kids || !match_id)
return false;
for (i = 0; i < 2; i++)
if (match(kids->id[i], match_id))
return true;
return false;
}
/* helper function can be called directly with pre-allocated memory */
inline int __asymmetric_key_hex_to_key_id(const char *id,
struct asymmetric_key_id *match_id,
size_t hexlen)
{
match_id->len = hexlen;
return hex2bin(match_id->data, id, hexlen);
}
/**
* asymmetric_key_hex_to_key_id - Convert a hex string into a key ID.
* @id: The ID as a hex string.
*/
struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id)
{
struct asymmetric_key_id *match_id;
size_t asciihexlen;
int ret;
if (!*id)
return ERR_PTR(-EINVAL);
asciihexlen = strlen(id);
if (asciihexlen & 1)
return ERR_PTR(-EINVAL);
match_id = kmalloc(sizeof(struct asymmetric_key_id) + asciihexlen / 2,
GFP_KERNEL);
if (!match_id)
return ERR_PTR(-ENOMEM);
ret = __asymmetric_key_hex_to_key_id(id, match_id, asciihexlen / 2);
if (ret < 0) {
kfree(match_id);
return ERR_PTR(-EINVAL);
}
return match_id;
}
/*
* Match asymmetric keys by an exact match on one of the first two IDs.
*/
static bool asymmetric_key_cmp(const struct key *key,
const struct key_match_data *match_data)
{
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
const struct asymmetric_key_id *match_id = match_data->preparsed;
return asymmetric_match_key_ids(kids, match_id,
asymmetric_key_id_same);
}
/*
* Match asymmetric keys by a partial match on one of the first two IDs.
*/
static bool asymmetric_key_cmp_partial(const struct key *key,
const struct key_match_data *match_data)
{
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
const struct asymmetric_key_id *match_id = match_data->preparsed;
return asymmetric_match_key_ids(kids, match_id,
asymmetric_key_id_partial);
}
/*
* Match asymmetric keys by an exact match on the third IDs.
*/
static bool asymmetric_key_cmp_name(const struct key *key,
const struct key_match_data *match_data)
{
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
const struct asymmetric_key_id *match_id = match_data->preparsed;
return kids && asymmetric_key_id_same(kids->id[2], match_id);
}
/*
* Preparse the match criterion. If we don't set lookup_type and cmp,
* the default will be an exact match on the key description.
*
* There are some specifiers for matching key IDs rather than by the key
* description:
*
* "id:<id>" - find a key by partial match on one of the first two IDs
* "ex:<id>" - find a key by exact match on one of the first two IDs
* "dn:<id>" - find a key by exact match on the third ID
*
* These have to be searched by iteration rather than by direct lookup because
* the key is hashed according to its description.
*/
static int asymmetric_key_match_preparse(struct key_match_data *match_data)
{
struct asymmetric_key_id *match_id;
const char *spec = match_data->raw_data;
const char *id;
bool (*cmp)(const struct key *, const struct key_match_data *) =
asymmetric_key_cmp;
if (!spec || !*spec)
return -EINVAL;
if (spec[0] == 'i' &&
spec[1] == 'd' &&
spec[2] == ':') {
id = spec + 3;
cmp = asymmetric_key_cmp_partial;
} else if (spec[0] == 'e' &&
spec[1] == 'x' &&
spec[2] == ':') {
id = spec + 3;
} else if (spec[0] == 'd' &&
spec[1] == 'n' &&
spec[2] == ':') {
id = spec + 3;
cmp = asymmetric_key_cmp_name;
} else {
goto default_match;
}
match_id = asymmetric_key_hex_to_key_id(id);
if (IS_ERR(match_id))
return PTR_ERR(match_id);
match_data->preparsed = match_id;
match_data->cmp = cmp;
match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
return 0;
default_match:
return 0;
}
/*
* Free the preparsed the match criterion.
*/
static void asymmetric_key_match_free(struct key_match_data *match_data)
{
kfree(match_data->preparsed);
}
/*
* Describe the asymmetric key
*/
static void asymmetric_key_describe(const struct key *key, struct seq_file *m)
{
const struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
const struct asymmetric_key_id *kid;
const unsigned char *p;
int n;
seq_puts(m, key->description);
if (subtype) {
seq_puts(m, ": ");
subtype->describe(key, m);
if (kids && kids->id[1]) {
kid = kids->id[1];
seq_putc(m, ' ');
n = kid->len;
p = kid->data;
if (n > 4) {
p += n - 4;
n = 4;
}
seq_printf(m, "%*phN", n, p);
}
seq_puts(m, " [");
/* put something here to indicate the key's capabilities */
seq_putc(m, ']');
}
}
/*
* Preparse a asymmetric payload to get format the contents appropriately for the
* internal payload to cut down on the number of scans of the data performed.
*
* We also generate a proposed description from the contents of the key that
* can be used to name the key if the user doesn't want to provide one.
*/
static int asymmetric_key_preparse(struct key_preparsed_payload *prep)
{
struct asymmetric_key_parser *parser;
int ret;
pr_devel("==>%s()\n", __func__);
if (prep->datalen == 0)
return -EINVAL;
down_read(&asymmetric_key_parsers_sem);
ret = -EBADMSG;
list_for_each_entry(parser, &asymmetric_key_parsers, link) {
pr_debug("Trying parser '%s'\n", parser->name);
ret = parser->parse(prep);
if (ret != -EBADMSG) {
pr_debug("Parser recognised the format (ret %d)\n",
ret);
break;
}
}
up_read(&asymmetric_key_parsers_sem);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
/*
* Clean up the key ID list
*/
static void asymmetric_key_free_kids(struct asymmetric_key_ids *kids)
{
int i;
if (kids) {
for (i = 0; i < ARRAY_SIZE(kids->id); i++)
kfree(kids->id[i]);
kfree(kids);
}
}
/*
* Clean up the preparse data
*/
static void asymmetric_key_free_preparse(struct key_preparsed_payload *prep)
{
struct asymmetric_key_subtype *subtype = prep->payload.data[asym_subtype];
struct asymmetric_key_ids *kids = prep->payload.data[asym_key_ids];
pr_devel("==>%s()\n", __func__);
if (subtype) {
subtype->destroy(prep->payload.data[asym_crypto],
prep->payload.data[asym_auth]);
module_put(subtype->owner);
}
asymmetric_key_free_kids(kids);
kfree(prep->description);
}
/*
* dispose of the data dangling from the corpse of a asymmetric key
*/
static void asymmetric_key_destroy(struct key *key)
{
struct asymmetric_key_subtype *subtype = asymmetric_key_subtype(key);
struct asymmetric_key_ids *kids = key->payload.data[asym_key_ids];
void *data = key->payload.data[asym_crypto];
void *auth = key->payload.data[asym_auth];
key->payload.data[asym_crypto] = NULL;
key->payload.data[asym_subtype] = NULL;
key->payload.data[asym_key_ids] = NULL;
key->payload.data[asym_auth] = NULL;
if (subtype) {
subtype->destroy(data, auth);
module_put(subtype->owner);
}
asymmetric_key_free_kids(kids);
}
static struct key_restriction *asymmetric_restriction_alloc(
key_restrict_link_func_t check,
struct key *key)
{
struct key_restriction *keyres =
kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
if (!keyres)
return ERR_PTR(-ENOMEM);
keyres->check = check;
keyres->key = key;
keyres->keytype = &key_type_asymmetric;
return keyres;
}
/*
* look up keyring restrict functions for asymmetric keys
*/
static struct key_restriction *asymmetric_lookup_restriction(
const char *restriction)
{
char *restrict_method;
char *parse_buf;
char *next;
struct key_restriction *ret = ERR_PTR(-EINVAL);
if (strcmp("builtin_trusted", restriction) == 0)
return asymmetric_restriction_alloc(
restrict_link_by_builtin_trusted, NULL);
if (strcmp("builtin_and_secondary_trusted", restriction) == 0)
return asymmetric_restriction_alloc(
restrict_link_by_builtin_and_secondary_trusted, NULL);
parse_buf = kstrndup(restriction, PAGE_SIZE, GFP_KERNEL);
if (!parse_buf)
return ERR_PTR(-ENOMEM);
next = parse_buf;
restrict_method = strsep(&next, ":");
if ((strcmp(restrict_method, "key_or_keyring") == 0) && next) {
char *key_text;
key_serial_t serial;
struct key *key;
key_restrict_link_func_t link_fn =
restrict_link_by_key_or_keyring;
bool allow_null_key = false;
key_text = strsep(&next, ":");
if (next) {
if (strcmp(next, "chain") != 0)
goto out;
link_fn = restrict_link_by_key_or_keyring_chain;
allow_null_key = true;
}
if (kstrtos32(key_text, 0, &serial) < 0)
goto out;
if ((serial == 0) && allow_null_key) {
key = NULL;
} else {
key = key_lookup(serial);
if (IS_ERR(key)) {
ret = ERR_CAST(key);
goto out;
}
}
ret = asymmetric_restriction_alloc(link_fn, key);
if (IS_ERR(ret))
key_put(key);
}
out:
kfree(parse_buf);
return ret;
}
int asymmetric_key_eds_op(struct kernel_pkey_params *params,
const void *in, void *out)
{
const struct asymmetric_key_subtype *subtype;
struct key *key = params->key;
int ret;
pr_devel("==>%s()\n", __func__);
if (key->type != &key_type_asymmetric)
return -EINVAL;
subtype = asymmetric_key_subtype(key);
if (!subtype ||
!key->payload.data[0])
return -EINVAL;
if (!subtype->eds_op)
return -ENOTSUPP;
ret = subtype->eds_op(params, in, out);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
static int asymmetric_key_verify_signature(struct kernel_pkey_params *params,
const void *in, const void *in2)
{
struct public_key_signature sig = {
.s_size = params->in2_len,
.digest_size = params->in_len,
.encoding = params->encoding,
.hash_algo = params->hash_algo,
.digest = (void *)in,
.s = (void *)in2,
};
return verify_signature(params->key, &sig);
}
struct key_type key_type_asymmetric = {
.name = "asymmetric",
.preparse = asymmetric_key_preparse,
.free_preparse = asymmetric_key_free_preparse,
.instantiate = generic_key_instantiate,
.match_preparse = asymmetric_key_match_preparse,
.match_free = asymmetric_key_match_free,
.destroy = asymmetric_key_destroy,
.describe = asymmetric_key_describe,
.lookup_restriction = asymmetric_lookup_restriction,
.asym_query = query_asymmetric_key,
.asym_eds_op = asymmetric_key_eds_op,
.asym_verify_signature = asymmetric_key_verify_signature,
};
EXPORT_SYMBOL_GPL(key_type_asymmetric);
/**
* register_asymmetric_key_parser - Register a asymmetric key blob parser
* @parser: The parser to register
*/
int register_asymmetric_key_parser(struct asymmetric_key_parser *parser)
{
struct asymmetric_key_parser *cursor;
int ret;
down_write(&asymmetric_key_parsers_sem);
list_for_each_entry(cursor, &asymmetric_key_parsers, link) {
if (strcmp(cursor->name, parser->name) == 0) {
pr_err("Asymmetric key parser '%s' already registered\n",
parser->name);
ret = -EEXIST;
goto out;
}
}
list_add_tail(&parser->link, &asymmetric_key_parsers);
pr_notice("Asymmetric key parser '%s' registered\n", parser->name);
ret = 0;
out:
up_write(&asymmetric_key_parsers_sem);
return ret;
}
EXPORT_SYMBOL_GPL(register_asymmetric_key_parser);
/**
* unregister_asymmetric_key_parser - Unregister a asymmetric key blob parser
* @parser: The parser to unregister
*/
void unregister_asymmetric_key_parser(struct asymmetric_key_parser *parser)
{
down_write(&asymmetric_key_parsers_sem);
list_del(&parser->link);
up_write(&asymmetric_key_parsers_sem);
pr_notice("Asymmetric key parser '%s' unregistered\n", parser->name);
}
EXPORT_SYMBOL_GPL(unregister_asymmetric_key_parser);
/*
* Module stuff
*/
static int __init asymmetric_key_init(void)
{
return register_key_type(&key_type_asymmetric);
}
static void __exit asymmetric_key_cleanup(void)
{
unregister_key_type(&key_type_asymmetric);
}
module_init(asymmetric_key_init);
module_exit(asymmetric_key_cleanup);
| linux-master | crypto/asymmetric_keys/asymmetric_type.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Verify the signature on a PKCS#7 message.
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*/
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/asn1.h>
#include <crypto/hash.h>
#include <crypto/hash_info.h>
#include <crypto/public_key.h>
#include "pkcs7_parser.h"
/*
* Digest the relevant parts of the PKCS#7 data
*/
static int pkcs7_digest(struct pkcs7_message *pkcs7,
struct pkcs7_signed_info *sinfo)
{
struct public_key_signature *sig = sinfo->sig;
struct crypto_shash *tfm;
struct shash_desc *desc;
size_t desc_size;
int ret;
kenter(",%u,%s", sinfo->index, sinfo->sig->hash_algo);
/* The digest was calculated already. */
if (sig->digest)
return 0;
if (!sinfo->sig->hash_algo)
return -ENOPKG;
/* Allocate the hashing algorithm we're going to need and find out how
* big the hash operational data will be.
*/
tfm = crypto_alloc_shash(sinfo->sig->hash_algo, 0, 0);
if (IS_ERR(tfm))
return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm);
desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
sig->digest_size = crypto_shash_digestsize(tfm);
ret = -ENOMEM;
sig->digest = kmalloc(sig->digest_size, GFP_KERNEL);
if (!sig->digest)
goto error_no_desc;
desc = kzalloc(desc_size, GFP_KERNEL);
if (!desc)
goto error_no_desc;
desc->tfm = tfm;
/* Digest the message [RFC2315 9.3] */
ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
sig->digest);
if (ret < 0)
goto error;
pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
/* However, if there are authenticated attributes, there must be a
* message digest attribute amongst them which corresponds to the
* digest we just calculated.
*/
if (sinfo->authattrs) {
u8 tag;
if (!sinfo->msgdigest) {
pr_warn("Sig %u: No messageDigest\n", sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
if (sinfo->msgdigest_len != sig->digest_size) {
pr_warn("Sig %u: Invalid digest size (%u)\n",
sinfo->index, sinfo->msgdigest_len);
ret = -EBADMSG;
goto error;
}
if (memcmp(sig->digest, sinfo->msgdigest,
sinfo->msgdigest_len) != 0) {
pr_warn("Sig %u: Message digest doesn't match\n",
sinfo->index);
ret = -EKEYREJECTED;
goto error;
}
/* We then calculate anew, using the authenticated attributes
* as the contents of the digest instead. Note that we need to
* convert the attributes from a CONT.0 into a SET before we
* hash it.
*/
memset(sig->digest, 0, sig->digest_size);
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
tag = ASN1_CONS_BIT | ASN1_SET;
ret = crypto_shash_update(desc, &tag, 1);
if (ret < 0)
goto error;
ret = crypto_shash_finup(desc, sinfo->authattrs,
sinfo->authattrs_len, sig->digest);
if (ret < 0)
goto error;
pr_devel("AADigest = [%*ph]\n", 8, sig->digest);
}
error:
kfree(desc);
error_no_desc:
crypto_free_shash(tfm);
kleave(" = %d", ret);
return ret;
}
int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, u32 *len,
enum hash_algo *hash_algo)
{
struct pkcs7_signed_info *sinfo = pkcs7->signed_infos;
int i, ret;
/*
* This function doesn't support messages with more than one signature.
*/
if (sinfo == NULL || sinfo->next != NULL)
return -EBADMSG;
ret = pkcs7_digest(pkcs7, sinfo);
if (ret)
return ret;
*buf = sinfo->sig->digest;
*len = sinfo->sig->digest_size;
i = match_string(hash_algo_name, HASH_ALGO__LAST,
sinfo->sig->hash_algo);
if (i >= 0)
*hash_algo = i;
return 0;
}
/*
* Find the key (X.509 certificate) to use to verify a PKCS#7 message. PKCS#7
* uses the issuer's name and the issuing certificate serial number for
* matching purposes. These must match the certificate issuer's name (not
* subject's name) and the certificate serial number [RFC 2315 6.7].
*/
static int pkcs7_find_key(struct pkcs7_message *pkcs7,
struct pkcs7_signed_info *sinfo)
{
struct x509_certificate *x509;
unsigned certix = 1;
kenter("%u", sinfo->index);
for (x509 = pkcs7->certs; x509; x509 = x509->next, certix++) {
/* I'm _assuming_ that the generator of the PKCS#7 message will
* encode the fields from the X.509 cert in the same way in the
* PKCS#7 message - but I can't be 100% sure of that. It's
* possible this will need element-by-element comparison.
*/
if (!asymmetric_key_id_same(x509->id, sinfo->sig->auth_ids[0]))
continue;
pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
sinfo->index, certix);
sinfo->signer = x509;
return 0;
}
/* The relevant X.509 cert isn't found here, but it might be found in
* the trust keyring.
*/
pr_debug("Sig %u: Issuing X.509 cert not found (#%*phN)\n",
sinfo->index,
sinfo->sig->auth_ids[0]->len, sinfo->sig->auth_ids[0]->data);
return 0;
}
/*
* Verify the internal certificate chain as best we can.
*/
static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
struct pkcs7_signed_info *sinfo)
{
struct public_key_signature *sig;
struct x509_certificate *x509 = sinfo->signer, *p;
struct asymmetric_key_id *auth;
int ret;
kenter("");
for (p = pkcs7->certs; p; p = p->next)
p->seen = false;
for (;;) {
pr_debug("verify %s: %*phN\n",
x509->subject,
x509->raw_serial_size, x509->raw_serial);
x509->seen = true;
if (x509->blacklisted) {
/* If this cert is blacklisted, then mark everything
* that depends on this as blacklisted too.
*/
sinfo->blacklisted = true;
for (p = sinfo->signer; p != x509; p = p->signer)
p->blacklisted = true;
pr_debug("- blacklisted\n");
return 0;
}
pr_debug("- issuer %s\n", x509->issuer);
sig = x509->sig;
if (sig->auth_ids[0])
pr_debug("- authkeyid.id %*phN\n",
sig->auth_ids[0]->len, sig->auth_ids[0]->data);
if (sig->auth_ids[1])
pr_debug("- authkeyid.skid %*phN\n",
sig->auth_ids[1]->len, sig->auth_ids[1]->data);
if (x509->self_signed) {
/* If there's no authority certificate specified, then
* the certificate must be self-signed and is the root
* of the chain. Likewise if the cert is its own
* authority.
*/
if (x509->unsupported_sig)
goto unsupported_sig_in_x509;
x509->signer = x509;
pr_debug("- self-signed\n");
return 0;
}
/* Look through the X.509 certificates in the PKCS#7 message's
* list to see if the next one is there.
*/
auth = sig->auth_ids[0];
if (auth) {
pr_debug("- want %*phN\n", auth->len, auth->data);
for (p = pkcs7->certs; p; p = p->next) {
pr_debug("- cmp [%u] %*phN\n",
p->index, p->id->len, p->id->data);
if (asymmetric_key_id_same(p->id, auth))
goto found_issuer_check_skid;
}
} else if (sig->auth_ids[1]) {
auth = sig->auth_ids[1];
pr_debug("- want %*phN\n", auth->len, auth->data);
for (p = pkcs7->certs; p; p = p->next) {
if (!p->skid)
continue;
pr_debug("- cmp [%u] %*phN\n",
p->index, p->skid->len, p->skid->data);
if (asymmetric_key_id_same(p->skid, auth))
goto found_issuer;
}
}
/* We didn't find the root of this chain */
pr_debug("- top\n");
return 0;
found_issuer_check_skid:
/* We matched issuer + serialNumber, but if there's an
* authKeyId.keyId, that must match the CA subjKeyId also.
*/
if (sig->auth_ids[1] &&
!asymmetric_key_id_same(p->skid, sig->auth_ids[1])) {
pr_warn("Sig %u: X.509 chain contains auth-skid nonmatch (%u->%u)\n",
sinfo->index, x509->index, p->index);
return -EKEYREJECTED;
}
found_issuer:
pr_debug("- subject %s\n", p->subject);
if (p->seen) {
pr_warn("Sig %u: X.509 chain contains loop\n",
sinfo->index);
return 0;
}
ret = public_key_verify_signature(p->pub, x509->sig);
if (ret < 0)
return ret;
x509->signer = p;
if (x509 == p) {
pr_debug("- self-signed\n");
return 0;
}
x509 = p;
might_sleep();
}
unsupported_sig_in_x509:
/* Just prune the certificate chain at this point if we lack some
* crypto module to go further. Note, however, we don't want to set
* sinfo->unsupported_crypto as the signed info block may still be
* validatable against an X.509 cert lower in the chain that we have a
* trusted copy of.
*/
return 0;
}
/*
* Verify one signed information block from a PKCS#7 message.
*/
static int pkcs7_verify_one(struct pkcs7_message *pkcs7,
struct pkcs7_signed_info *sinfo)
{
int ret;
kenter(",%u", sinfo->index);
/* First of all, digest the data in the PKCS#7 message and the
* signed information block
*/
ret = pkcs7_digest(pkcs7, sinfo);
if (ret < 0)
return ret;
/* Find the key for the signature if there is one */
ret = pkcs7_find_key(pkcs7, sinfo);
if (ret < 0)
return ret;
if (!sinfo->signer)
return 0;
pr_devel("Using X.509[%u] for sig %u\n",
sinfo->signer->index, sinfo->index);
/* Check that the PKCS#7 signing time is valid according to the X.509
* certificate. We can't, however, check against the system clock
* since that may not have been set yet and may be wrong.
*/
if (test_bit(sinfo_has_signing_time, &sinfo->aa_set)) {
if (sinfo->signing_time < sinfo->signer->valid_from ||
sinfo->signing_time > sinfo->signer->valid_to) {
pr_warn("Message signed outside of X.509 validity window\n");
return -EKEYREJECTED;
}
}
/* Verify the PKCS#7 binary against the key */
ret = public_key_verify_signature(sinfo->signer->pub, sinfo->sig);
if (ret < 0)
return ret;
pr_devel("Verified signature %u\n", sinfo->index);
/* Verify the internal certificate chain */
return pkcs7_verify_sig_chain(pkcs7, sinfo);
}
/**
* pkcs7_verify - Verify a PKCS#7 message
* @pkcs7: The PKCS#7 message to be verified
* @usage: The use to which the key is being put
*
* Verify a PKCS#7 message is internally consistent - that is, the data digest
* matches the digest in the AuthAttrs and any signature in the message or one
* of the X.509 certificates it carries that matches another X.509 cert in the
* message can be verified.
*
* This does not look to match the contents of the PKCS#7 message against any
* external public keys.
*
* Returns, in order of descending priority:
*
* (*) -EKEYREJECTED if a key was selected that had a usage restriction at
* odds with the specified usage, or:
*
* (*) -EKEYREJECTED if a signature failed to match for which we found an
* appropriate X.509 certificate, or:
*
* (*) -EBADMSG if some part of the message was invalid, or:
*
* (*) 0 if a signature chain passed verification, or:
*
* (*) -EKEYREJECTED if a blacklisted key was encountered, or:
*
* (*) -ENOPKG if none of the signature chains are verifiable because suitable
* crypto modules couldn't be found.
*/
int pkcs7_verify(struct pkcs7_message *pkcs7,
enum key_being_used_for usage)
{
struct pkcs7_signed_info *sinfo;
int actual_ret = -ENOPKG;
int ret;
kenter("");
switch (usage) {
case VERIFYING_MODULE_SIGNATURE:
if (pkcs7->data_type != OID_data) {
pr_warn("Invalid module sig (not pkcs7-data)\n");
return -EKEYREJECTED;
}
if (pkcs7->have_authattrs) {
pr_warn("Invalid module sig (has authattrs)\n");
return -EKEYREJECTED;
}
break;
case VERIFYING_FIRMWARE_SIGNATURE:
if (pkcs7->data_type != OID_data) {
pr_warn("Invalid firmware sig (not pkcs7-data)\n");
return -EKEYREJECTED;
}
if (!pkcs7->have_authattrs) {
pr_warn("Invalid firmware sig (missing authattrs)\n");
return -EKEYREJECTED;
}
break;
case VERIFYING_KEXEC_PE_SIGNATURE:
if (pkcs7->data_type != OID_msIndirectData) {
pr_warn("Invalid kexec sig (not Authenticode)\n");
return -EKEYREJECTED;
}
/* Authattr presence checked in parser */
break;
case VERIFYING_UNSPECIFIED_SIGNATURE:
if (pkcs7->data_type != OID_data) {
pr_warn("Invalid unspecified sig (not pkcs7-data)\n");
return -EKEYREJECTED;
}
break;
default:
return -EINVAL;
}
for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
ret = pkcs7_verify_one(pkcs7, sinfo);
if (sinfo->blacklisted) {
if (actual_ret == -ENOPKG)
actual_ret = -EKEYREJECTED;
continue;
}
if (ret < 0) {
if (ret == -ENOPKG) {
sinfo->unsupported_crypto = true;
continue;
}
kleave(" = %d", ret);
return ret;
}
actual_ret = 0;
}
kleave(" = %d", actual_ret);
return actual_ret;
}
EXPORT_SYMBOL_GPL(pkcs7_verify);
/**
* pkcs7_supply_detached_data - Supply the data needed to verify a PKCS#7 message
* @pkcs7: The PKCS#7 message
* @data: The data to be verified
* @datalen: The amount of data
*
* Supply the detached data needed to verify a PKCS#7 message. Note that no
* attempt to retain/pin the data is made. That is left to the caller. The
* data will not be modified by pkcs7_verify() and will not be freed when the
* PKCS#7 message is freed.
*
* Returns -EINVAL if data is already supplied in the message, 0 otherwise.
*/
int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
const void *data, size_t datalen)
{
if (pkcs7->data) {
pr_warn("Data already supplied\n");
return -EINVAL;
}
pkcs7->data = data;
pkcs7->data_len = datalen;
return 0;
}
EXPORT_SYMBOL_GPL(pkcs7_supply_detached_data);
| linux-master | crypto/asymmetric_keys/pkcs7_verify.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* asynchronous raid6 recovery self test
* Copyright (c) 2009, Intel Corporation.
*
* based on drivers/md/raid6test/test.c:
* Copyright 2002-2007 H. Peter Anvin
*/
#include <linux/async_tx.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/module.h>
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
#define NDISKS 64 /* Including P and Q */
static struct page *dataptrs[NDISKS];
unsigned int dataoffs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
static struct page *data[NDISKS+3];
static struct page *spare;
static struct page *recovi;
static struct page *recovj;
static void callback(void *param)
{
struct completion *cmp = param;
complete(cmp);
}
static void makedata(int disks)
{
int i;
for (i = 0; i < disks; i++) {
get_random_bytes(page_address(data[i]), PAGE_SIZE);
dataptrs[i] = data[i];
dataoffs[i] = 0;
}
}
static char disk_type(int d, int disks)
{
if (d == disks - 2)
return 'P';
else if (d == disks - 1)
return 'Q';
else
return 'D';
}
/* Recover two failed blocks. */
static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
struct page **ptrs, unsigned int *offs)
{
struct async_submit_ctl submit;
struct completion cmp;
struct dma_async_tx_descriptor *tx = NULL;
enum sum_check_flags result = ~0;
if (faila > failb)
swap(faila, failb);
if (failb == disks-1) {
if (faila == disks-2) {
/* P+Q failure. Just rebuild the syndrome. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, offs,
disks, bytes, &submit);
} else {
struct page *blocks[NDISKS];
struct page *dest;
int count = 0;
int i;
BUG_ON(disks > NDISKS);
/* data+Q failure. Reconstruct data from P,
* then rebuild syndrome
*/
for (i = disks; i-- ; ) {
if (i == faila || i == failb)
continue;
blocks[count++] = ptrs[i];
}
dest = ptrs[faila];
init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, addr_conv);
tx = async_xor(dest, blocks, 0, count, bytes, &submit);
init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
tx = async_gen_syndrome(ptrs, offs,
disks, bytes, &submit);
}
} else {
if (failb == disks-2) {
/* data+P failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_datap_recov(disks, bytes,
faila, ptrs, offs, &submit);
} else {
/* data+data failure. */
init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
tx = async_raid6_2data_recov(disks, bytes,
faila, failb, ptrs, offs, &submit);
}
}
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
tx = async_syndrome_val(ptrs, offs,
disks, bytes, &result, spare, 0, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
__func__, faila, failb, disks);
if (result != 0)
pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
__func__, faila, failb, result);
}
static int test_disks(int i, int j, int disks)
{
int erra, errb;
memset(page_address(recovi), 0xf0, PAGE_SIZE);
memset(page_address(recovj), 0xba, PAGE_SIZE);
dataptrs[i] = recovi;
dataptrs[j] = recovj;
raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs);
erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
__func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks),
(!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB");
dataptrs[i] = data[i];
dataptrs[j] = data[j];
return erra || errb;
}
static int test(int disks, int *tests)
{
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
struct completion cmp;
int err = 0;
int i, j;
recovi = data[disks];
recovj = data[disks+1];
spare = data[disks+2];
makedata(disks);
/* Nuke syndromes */
memset(page_address(data[disks-2]), 0xee, PAGE_SIZE);
memset(page_address(data[disks-1]), 0xee, PAGE_SIZE);
/* Generate assumed good syndrome */
init_completion(&cmp);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit);
async_tx_issue_pending(tx);
if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
pr("error: initial gen_syndrome(%d) timed out\n", disks);
return 1;
}
pr("testing the %d-disk case...\n", disks);
for (i = 0; i < disks-1; i++)
for (j = i+1; j < disks; j++) {
(*tests)++;
err += test_disks(i, j, disks);
}
return err;
}
static int __init raid6_test(void)
{
int err = 0;
int tests = 0;
int i;
for (i = 0; i < NDISKS+3; i++) {
data[i] = alloc_page(GFP_KERNEL);
if (!data[i]) {
while (i--)
put_page(data[i]);
return -ENOMEM;
}
}
/* the 4-disk and 5-disk cases are special for the recovery code */
if (NDISKS > 4)
err += test(4, &tests);
if (NDISKS > 5)
err += test(5, &tests);
/* the 11 and 12 disk cases are special for ioatdma (p-disabled
* q-continuation without extended descriptor)
*/
if (NDISKS > 12) {
err += test(11, &tests);
err += test(12, &tests);
}
/* the 24 disk case is special for ioatdma as it is the boundary point
* at which it needs to switch from 8-source ops to 16-source
* ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
*/
if (NDISKS > 24)
err += test(24, &tests);
err += test(NDISKS, &tests);
pr("\n");
pr("complete (%d tests, %d failure%s)\n",
tests, err, err == 1 ? "" : "s");
for (i = 0; i < NDISKS+3; i++)
put_page(data[i]);
return 0;
}
static void __exit raid6_test_exit(void)
{
}
/* when compiled-in wait for drivers to load first (assumes dma drivers
* are also compiled-in)
*/
late_initcall(raid6_test);
module_exit(raid6_test_exit);
MODULE_AUTHOR("Dan Williams <[email protected]>");
MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/raid6test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright(c) 2007 Yuri Tikhonov <[email protected]>
* Copyright(c) 2009 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
#include <linux/gfp.h>
/*
* struct pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at
* blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
*
* note: these are macros as they are used as lvalues
*/
#define P(b, d) (b[d-2])
#define Q(b, d) (b[d-1])
#define MAX_DISKS 255
/*
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan *chan,
const unsigned char *scfs, int disks,
struct dmaengine_unmap_data *unmap,
enum dma_ctrl_flags dma_flags,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
/* if we are submitting additional pqs, leave the chain open,
* clear the callback parameters, and leave the destination
* buffers mapped
*/
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
*/
for (;;) {
dma_dest[0] = unmap->addr[disks - 2];
dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
&unmap->addr[src_off],
pq_src_cnt,
&scfs[src_off], unmap->len,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
/* drop completed sources */
src_cnt -= pq_src_cnt;
src_off += pq_src_cnt;
dma_flags |= DMA_PREP_CONTINUE;
}
return tx;
}
/*
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
size_t len, struct async_submit_ctl *submit)
{
void **srcs;
int i;
int start = -1, stop = disks - 3;
if (submit->scribble)
srcs = submit->scribble;
else
srcs = (void **) blocks;
for (i = 0; i < disks; i++) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = (void*)raid6_empty_zero_page;
} else {
srcs[i] = page_address(blocks[i]) + offsets[i];
if (i < disks - 2) {
stop = i;
if (start == -1)
start = i;
}
}
}
if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
BUG_ON(!raid6_call.xor_syndrome);
if (start >= 0)
raid6_call.xor_syndrome(disks, start, stop, len, srcs);
} else
raid6_call.gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
static inline bool
is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
int src_cnt, size_t len)
{
int i;
for (i = 0; i < src_cnt; i++) {
if (!is_dma_pq_aligned(dev, offs[i], 0, len))
return false;
}
return true;
}
/**
* async_gen_syndrome - asynchronously calculate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offsets: offset array into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @submit: submission/completion modifiers
*
* General note: This routine assumes a field of GF(2^8) with a
* primitive polynomial of 0x11d and a generator of {02}.
*
* 'disks' note: callers can optionally omit either P or Q (but not
* both) from the calculation by setting blocks[disks-2] or
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
size_t len, struct async_submit_ctl *submit)
{
int src_cnt = disks - 2;
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
/* XORing P/Q is only implemented in software */
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned_offs(device, offsets, disks, len)) {
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = 0;
unsigned char coefs[MAX_DISKS];
int i, j;
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
/* convert source addresses being careful to collapse 'empty'
* sources and update the coefficients accordingly
*/
unmap->len = len;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
unmap->addr[j] = dma_map_page(device->dev, blocks[i],
offsets[i], len, DMA_TO_DEVICE);
coefs[j] = raid6_gfexp[i];
unmap->to_cnt++;
j++;
}
/*
* DMAs use destinations as sources,
* so use BIDIRECTIONAL mapping
*/
unmap->bidi_cnt++;
if (P(blocks, disks))
unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
P(offsets, disks),
len, DMA_BIDIRECTIONAL);
else {
unmap->addr[j++] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
}
unmap->bidi_cnt++;
if (Q(blocks, disks))
unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
Q(offsets, disks),
len, DMA_BIDIRECTIONAL);
else {
unmap->addr[j++] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
}
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
dmaengine_unmap_put(unmap);
return tx;
}
dmaengine_unmap_put(unmap);
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
if (!P(blocks, disks)) {
P(blocks, disks) = pq_scribble_page;
P(offsets, disks) = 0;
}
if (!Q(blocks, disks)) {
Q(blocks, disks) = pq_scribble_page;
Q(offsets, disks) = 0;
}
do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
return NULL;
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
disks, len);
}
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offsets: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
* @spare: temporary result buffer for the synchronous case
* @s_off: spare buffer page offset
* @submit: submission / completion modifiers
*
* The same notes from async_gen_syndrome apply to the 'blocks',
* and 'disks' parameters of this routine. The synchronous path
* requires a temporary result buffer and submit->scribble to be
* specified.
*/
struct dma_async_tx_descriptor *
async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare,
unsigned int s_off, struct async_submit_ctl *submit)
{
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
unsigned char coefs[MAX_DISKS];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks < 4 || disks > MAX_DISKS);
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned_offs(device, offsets, disks, len)) {
struct device *dev = device->dev;
dma_addr_t pq[2];
int i, j = 0, src_cnt = 0;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
unmap->len = len;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
offsets[i], len,
DMA_TO_DEVICE);
coefs[j] = raid6_gfexp[i];
unmap->to_cnt++;
src_cnt++;
j++;
}
if (!P(blocks, disks)) {
pq[0] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
} else {
pq[0] = dma_map_page(dev, P(blocks, disks),
P(offsets, disks), len,
DMA_TO_DEVICE);
unmap->addr[j++] = pq[0];
unmap->to_cnt++;
}
if (!Q(blocks, disks)) {
pq[1] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
} else {
pq[1] = dma_map_page(dev, Q(blocks, disks),
Q(offsets, disks), len,
DMA_TO_DEVICE);
unmap->addr[j++] = pq[1];
unmap->to_cnt++;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq,
unmap->addr,
src_cnt,
coefs,
len, pqres,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
struct page *p_src = P(blocks, disks);
unsigned int p_off = P(offsets, disks);
struct page *q_src = Q(blocks, disks);
unsigned int q_off = Q(offsets, disks);
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *scribble = submit->scribble;
void *cb_param_orig = submit->cb_param;
void *p, *q, *s;
pr_debug("%s: (sync) disks: %d len: %zu\n",
__func__, disks, len);
/* caller must provide a temporary result buffer and
* allow the input parameters to be preserved
*/
BUG_ON(!spare || !scribble);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
/* recompute p and/or q into the temporary buffer and then
* check to see the result matches the current value
*/
tx = NULL;
*pqres = 0;
if (p_src) {
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, scribble);
tx = async_xor_offs(spare, s_off,
blocks, offsets, disks-2, len, submit);
async_tx_quiesce(&tx);
p = page_address(p_src) + p_off;
s = page_address(spare) + s_off;
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
}
if (q_src) {
P(blocks, disks) = NULL;
Q(blocks, disks) = spare;
Q(offsets, disks) = s_off;
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, offsets, disks,
len, submit);
async_tx_quiesce(&tx);
q = page_address(q_src) + q_off;
s = page_address(spare) + s_off;
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
}
/* restore P, Q and submit */
P(blocks, disks) = p_src;
P(offsets, disks) = p_off;
Q(blocks, disks) = q_src;
Q(offsets, disks) = q_off;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
tx = NULL;
}
dmaengine_unmap_put(unmap);
return tx;
}
EXPORT_SYMBOL_GPL(async_syndrome_val);
static int __init async_pq_init(void)
{
pq_scribble_page = alloc_page(GFP_KERNEL);
if (pq_scribble_page)
return 0;
pr_err("%s: failed to allocate required spare page\n", __func__);
return -ENOMEM;
}
static void __exit async_pq_exit(void)
{
__free_page(pq_scribble_page);
}
module_init(async_pq_init);
module_exit(async_pq_exit);
MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/async_pq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* copy offload engine support
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <[email protected]>
*
* with architecture considerations by:
* Neil Brown <[email protected]>
* Jeff Garzik <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/async_tx.h>
/**
* async_memcpy - attempt to copy memory with a dma engine.
* @dest: destination page
* @src: src page
* @dest_offset: offset into 'dest' to start transaction
* @src_offset: offset into 'src' to start transaction
* @len: length in bytes
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK
*/
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
unsigned int src_offset, size_t len,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *unmap = NULL;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
unmap->to_cnt = 1;
unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
DMA_TO_DEVICE);
unmap->from_cnt = 1;
unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
unmap->len = len;
tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len,
dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
dest_buf = kmap_atomic(dest) + dest_offset;
src_buf = kmap_atomic(src) + src_offset;
memcpy(dest_buf, src_buf, len);
kunmap_atomic(src_buf);
kunmap_atomic(dest_buf);
async_tx_sync_epilog(submit);
}
dmaengine_unmap_put(unmap);
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous memcpy api");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/async_memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* xor offload engine api
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <[email protected]>
*
* with architecture considerations by:
* Neil Brown <[email protected]>
* Jeff Garzik <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags = 0;
int src_cnt = unmap->to_cnt;
int xor_src_cnt;
dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
dma_addr_t *src_list = unmap->addr;
while (src_cnt) {
dma_addr_t tmp;
submit->flags = flags_orig;
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
/* if we are submitting additional xors, leave the chain open
* and clear the callback parameters
*/
if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
}
if (submit->cb_fn)
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide a
* descriptor
*/
tmp = src_list[0];
if (src_list > unmap->addr)
src_list[0] = dma_dest;
tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
xor_src_cnt, unmap->len,
dma_flags);
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
/* spin wait for the preceding transactions to complete */
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
src_list,
xor_src_cnt, unmap->len,
dma_flags);
}
src_list[0] = tmp;
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
/* use the intermediate result a source */
src_cnt++;
src_list += xor_src_cnt - 1;
} else
break;
}
return tx;
}
static void
do_sync_xor_offs(struct page *dest, unsigned int offset,
struct page **src_list, unsigned int *src_offs,
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
int xor_src_cnt = 0;
int src_off = 0;
void *dest_buf;
void **srcs;
if (submit->scribble)
srcs = submit->scribble;
else
srcs = (void **) src_list;
/* convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
if (src_list[i])
srcs[xor_src_cnt++] = page_address(src_list[i]) +
(src_offs ? src_offs[i] : offset);
src_cnt = xor_src_cnt;
/* set destination address */
dest_buf = page_address(dest) + offset;
if (submit->flags & ASYNC_TX_XOR_ZERO_DST)
memset(dest_buf, 0, len);
while (src_cnt > 0) {
/* process up to 'MAX_XOR_BLOCKS' sources */
xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);
/* drop completed sources */
src_cnt -= xor_src_cnt;
src_off += xor_src_cnt;
}
async_tx_sync_epilog(submit);
}
static inline bool
dma_xor_aligned_offsets(struct dma_device *device, unsigned int offset,
unsigned int *src_offs, int src_cnt, int len)
{
int i;
if (!is_dma_xor_aligned(device, offset, 0, len))
return false;
if (!src_offs)
return true;
for (i = 0; i < src_cnt; i++) {
if (!is_dma_xor_aligned(device, src_offs[i], 0, len))
return false;
}
return true;
}
/**
* async_xor_offs - attempt to xor a set of blocks with a dma engine.
* @dest: destination page
* @offset: dst offset to start transaction
* @src_list: array of source pages
* @src_offs: array of source pages offset, NULL means common src/dst offset
* @src_cnt: number of source pages
* @len: length in bytes
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
*
* xor_blocks always uses the dest as a source so the
* ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
* the calculation. The assumption with dma engines is that they only
* use the destination buffer as a source when it is explicitly specified
* in the source list.
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor_offs(struct page *dest, unsigned int offset,
struct page **src_list, unsigned int *src_offs,
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && dma_xor_aligned_offsets(device, offset,
src_offs, src_cnt, len)) {
struct dma_async_tx_descriptor *tx;
int i, j;
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
unmap->len = len;
for (i = 0, j = 0; i < src_cnt; i++) {
if (!src_list[i])
continue;
unmap->to_cnt++;
unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
src_offs ? src_offs[i] : offset,
len, DMA_TO_DEVICE);
}
/* map it bidirectional as it may be re-used as a source */
unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
DMA_BIDIRECTIONAL);
unmap->bidi_cnt = 1;
tx = do_async_xor(chan, unmap, submit);
dmaengine_unmap_put(unmap);
return tx;
} else {
dmaengine_unmap_put(unmap);
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
__func__);
/* in the sync case the dest is an implied source
* (assumes the dest is the first source)
*/
if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--;
src_list++;
if (src_offs)
src_offs++;
}
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
do_sync_xor_offs(dest, offset, src_list, src_offs,
src_cnt, len, submit);
return NULL;
}
}
EXPORT_SYMBOL_GPL(async_xor_offs);
/**
* async_xor - attempt to xor a set of blocks with a dma engine.
* @dest: destination page
* @src_list: array of source pages
* @offset: common src/dst offset to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST
*
* xor_blocks always uses the dest as a source so the
* ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in
* the calculation. The assumption with dma engines is that they only
* use the destination buffer as a source when it is explicitly specified
* in the source list.
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
return async_xor_offs(dest, offset, src_list, NULL,
src_cnt, len, submit);
}
EXPORT_SYMBOL_GPL(async_xor);
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
return !memchr_inv(page_address(p) + offset, 0, len);
}
static inline struct dma_chan *
xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
struct page **src_list, int src_cnt, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
src_cnt, len);
}
/**
* async_xor_val_offs - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
* @offset: des offset in pages to start transaction
* @src_list: array of source pages
* @src_offs: array of source pages offset, NULL means common src/det offset
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor_val_offs(struct page *dest, unsigned int offset,
struct page **src_list, unsigned int *src_offs,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor &&
dma_xor_aligned_offsets(device, offset, src_offs, src_cnt, len)) {
unsigned long dma_prep_flags = 0;
int i;
pr_debug("%s: (async) len: %zu\n", __func__, len);
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
for (i = 0; i < src_cnt; i++) {
unmap->addr[i] = dma_map_page(device->dev, src_list[i],
src_offs ? src_offs[i] : offset,
len, DMA_TO_DEVICE);
unmap->to_cnt++;
}
unmap->len = len;
tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
async_tx_quiesce(&submit->depend_tx);
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_xor_val(chan,
unmap->addr, src_cnt, len, result,
dma_prep_flags);
}
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
enum async_tx_flags flags_orig = submit->flags;
pr_debug("%s: (sync) len: %zu\n", __func__, len);
WARN_ONCE(device && src_cnt <= device->max_xor,
"%s: no space for dma address conversion\n",
__func__);
submit->flags |= ASYNC_TX_XOR_DROP_DST;
submit->flags &= ~ASYNC_TX_ACK;
tx = async_xor_offs(dest, offset, src_list, src_offs,
src_cnt, len, submit);
async_tx_quiesce(&tx);
*result = !page_is_zero(dest, offset, len) << SUM_CHECK_P;
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
dmaengine_unmap_put(unmap);
return tx;
}
EXPORT_SYMBOL_GPL(async_xor_val_offs);
/**
* async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously
* @src_list: array of source pages
* @offset: offset in pages to start transaction
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
* @submit: submission / completion modifiers
*
* honored flags: ASYNC_TX_ACK
*
* src_list note: if the dest is also a source it must be at index zero.
* The contents of this array will be overwritten if a scribble region
* is not specified.
*/
struct dma_async_tx_descriptor *
async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit)
{
return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt,
len, result, submit);
}
EXPORT_SYMBOL_GPL(async_xor_val);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/async_xor.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Asynchronous RAID-6 recovery calculations ASYNC_TX API.
* Copyright(c) 2009 Intel Corporation
*
* based on raid6recov.c:
* Copyright 2002 H. Peter Anvin
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
#include <linux/dmaengine.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, unsigned int d_off,
struct page **srcs, unsigned int *src_offs, unsigned char *coef,
size_t len, struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
struct device *dev = dma->dev;
dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
unmap->addr[0] = dma_map_page(dev, srcs[0], src_offs[0],
len, DMA_TO_DEVICE);
unmap->addr[1] = dma_map_page(dev, srcs[1], src_offs[1],
len, DMA_TO_DEVICE);
unmap->to_cnt = 2;
unmap->addr[2] = dma_map_page(dev, dest, d_off,
len, DMA_BIDIRECTIONAL);
unmap->bidi_cnt = 1;
/* engine only looks at Q, but expects it to follow P */
pq[1] = unmap->addr[2];
unmap->len = len;
tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
len, dma_flags);
if (tx) {
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
dmaengine_unmap_put(unmap);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dmaengine_unmap_put(unmap);
}
/* run the operation synchronously */
async_tx_quiesce(&submit->depend_tx);
amul = raid6_gfmul[coef[0]];
bmul = raid6_gfmul[coef[1]];
a = page_address(srcs[0]) + src_offs[0];
b = page_address(srcs[1]) + src_offs[1];
c = page_address(dest) + d_off;
while (len--) {
ax = amul[*a++];
bx = bmul[*b++];
*c++ = ax ^ bx;
}
return NULL;
}
static struct dma_async_tx_descriptor *
async_mult(struct page *dest, unsigned int d_off, struct page *src,
unsigned int s_off, u8 coef, size_t len,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) {
dma_addr_t dma_dest[2];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
unmap->addr[0] = dma_map_page(dev, src, s_off,
len, DMA_TO_DEVICE);
unmap->to_cnt++;
unmap->addr[1] = dma_map_page(dev, dest, d_off,
len, DMA_BIDIRECTIONAL);
dma_dest[1] = unmap->addr[1];
unmap->bidi_cnt++;
unmap->len = len;
/* this looks funny, but the engine looks for Q at
* dma_dest[1] and ignores dma_dest[0] as a dest
* due to DMA_PREP_PQ_DISABLE_P
*/
tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
1, &coef, len, dma_flags);
if (tx) {
dma_set_unmap(tx, unmap);
dmaengine_unmap_put(unmap);
async_tx_submit(chan, tx, submit);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dmaengine_unmap_put(unmap);
}
/* no channel available, or failed to allocate a descriptor, so
* perform the operation synchronously
*/
async_tx_quiesce(&submit->depend_tx);
qmul = raid6_gfmul[coef];
d = page_address(dest) + d_off;
s = page_address(src) + s_off;
while (len--)
*d++ = qmul[*s++];
return NULL;
}
static struct dma_async_tx_descriptor *
__2data_recov_4(int disks, size_t bytes, int faila, int failb,
struct page **blocks, unsigned int *offs,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *a, *b;
unsigned int p_off, q_off, a_off, b_off;
struct page *srcs[2];
unsigned int src_offs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
p = blocks[disks-2];
p_off = offs[disks-2];
q = blocks[disks-1];
q_off = offs[disks-1];
a = blocks[faila];
a_off = offs[faila];
b = blocks[failb];
b_off = offs[failb];
/* in the 4 disk case P + Pxy == P and Q + Qxy == Q */
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = p;
src_offs[0] = p_off;
srcs[1] = q;
src_offs[1] = q_off;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = p;
src_offs[0] = p_off;
srcs[1] = b;
src_offs[1] = b_off;
init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit);
return tx;
}
static struct dma_async_tx_descriptor *
__2data_recov_5(int disks, size_t bytes, int faila, int failb,
struct page **blocks, unsigned int *offs,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *g, *dp, *dq;
unsigned int p_off, q_off, g_off, dp_off, dq_off;
struct page *srcs[2];
unsigned int src_offs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
int good_srcs, good, i;
good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (blocks[i] == NULL)
continue;
if (i == faila || i == failb)
continue;
good = i;
good_srcs++;
}
BUG_ON(good_srcs > 1);
p = blocks[disks-2];
p_off = offs[disks-2];
q = blocks[disks-1];
q_off = offs[disks-1];
g = blocks[good];
g_off = offs[good];
/* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for delta p and
* delta q
*/
dp = blocks[faila];
dp_off = offs[faila];
dq = blocks[failb];
dq_off = offs[failb];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_mult(dq, dq_off, g, g_off,
raid6_gfexp[good], bytes, submit);
/* compute P + Pxy */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = p;
src_offs[1] = p_off;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
/* compute Q + Qxy */
srcs[0] = dq;
src_offs[0] = dq_off;
srcs[1] = q;
src_offs[1] = q_off;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = dq;
src_offs[1] = dq_off;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = dq;
src_offs[1] = dq_off;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
return tx;
}
static struct dma_async_tx_descriptor *
__2data_recov_n(int disks, size_t bytes, int faila, int failb,
struct page **blocks, unsigned int *offs,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *dp, *dq;
unsigned int p_off, q_off, dp_off, dq_off;
struct page *srcs[2];
unsigned int src_offs[2];
unsigned char coef[2];
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
p = blocks[disks-2];
p_off = offs[disks-2];
q = blocks[disks-1];
q_off = offs[disks-1];
/* Compute syndrome with zero for the missing data pages
* Use the dead data pages as temporary storage for
* delta p and delta q
*/
dp = blocks[faila];
dp_off = offs[faila];
blocks[faila] = NULL;
blocks[disks-2] = dp;
offs[disks-2] = dp_off;
dq = blocks[failb];
dq_off = offs[failb];
blocks[failb] = NULL;
blocks[disks-1] = dq;
offs[disks-1] = dq_off;
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
/* Restore pointer table */
blocks[faila] = dp;
offs[faila] = dp_off;
blocks[failb] = dq;
offs[failb] = dq_off;
blocks[disks-2] = p;
offs[disks-2] = p_off;
blocks[disks-1] = q;
offs[disks-1] = q_off;
/* compute P + Pxy */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = p;
src_offs[1] = p_off;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
/* compute Q + Qxy */
srcs[0] = dq;
src_offs[0] = dq_off;
srcs[1] = q;
src_offs[1] = q_off;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
/* Dx = A*(P+Pxy) + B*(Q+Qxy) */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = dq;
src_offs[1] = dq_off;
coef[0] = raid6_gfexi[failb-faila];
coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit);
/* Dy = P+Pxy+Dx */
srcs[0] = dp;
src_offs[0] = dp_off;
srcs[1] = dq;
src_offs[1] = dq_off;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit);
return tx;
}
/**
* async_raid6_2data_recov - asynchronously calculate two missing data blocks
* @disks: number of disks in the RAID-6 array
* @bytes: block size
* @faila: first failed drive index
* @failb: second failed drive index
* @blocks: array of source pointers where the last two entries are p and q
* @offs: array of offset for pages in blocks
* @submit: submission/completion modifiers
*/
struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, unsigned int *offs,
struct async_submit_ctl *submit)
{
void *scribble = submit->scribble;
int non_zero_srcs, i;
BUG_ON(faila == failb);
if (failb < faila)
swap(faila, failb);
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
/* if a dma resource is not available or a scribble buffer is not
* available punt to the synchronous path. In the 'dma not
* available' case be sure to use the scribble buffer to
* preserve the content of 'blocks' as the caller intended.
*/
if (!async_dma_find_channel(DMA_PQ) || !scribble) {
void **ptrs = scribble ? scribble : (void **) blocks;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
ptrs[i] = (void *) raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]) + offs[i];
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
async_tx_sync_epilog(submit);
return NULL;
}
non_zero_srcs = 0;
for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
if (blocks[i])
non_zero_srcs++;
switch (non_zero_srcs) {
case 0:
case 1:
/* There must be at least 2 sources - the failed devices. */
BUG();
case 2:
/* dma devices do not uniformly understand a zero source pq
* operation (in contrast to the synchronous case), so
* explicitly handle the special case of a 4 disk array with
* both data disks missing.
*/
return __2data_recov_4(disks, bytes, faila, failb,
blocks, offs, submit);
case 3:
/* dma devices do not uniformly understand a single
* source pq operation (in contrast to the synchronous
* case), so explicitly handle the special case of a 5 disk
* array with 2 of 3 data disks missing.
*/
return __2data_recov_5(disks, bytes, faila, failb,
blocks, offs, submit);
default:
return __2data_recov_n(disks, bytes, faila, failb,
blocks, offs, submit);
}
}
EXPORT_SYMBOL_GPL(async_raid6_2data_recov);
/**
* async_raid6_datap_recov - asynchronously calculate a data and the 'p' block
* @disks: number of disks in the RAID-6 array
* @bytes: block size
* @faila: failed drive index
* @blocks: array of source pointers where the last two entries are p and q
* @offs: array of offset for pages in blocks
* @submit: submission/completion modifiers
*/
struct dma_async_tx_descriptor *
async_raid6_datap_recov(int disks, size_t bytes, int faila,
struct page **blocks, unsigned int *offs,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *dq;
unsigned int p_off, q_off, dq_off;
u8 coef;
enum async_tx_flags flags = submit->flags;
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
int good_srcs, good, i;
struct page *srcs[2];
unsigned int src_offs[2];
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
/* if a dma resource is not available or a scribble buffer is not
* available punt to the synchronous path. In the 'dma not
* available' case be sure to use the scribble buffer to
* preserve the content of 'blocks' as the caller intended.
*/
if (!async_dma_find_channel(DMA_PQ) || !scribble) {
void **ptrs = scribble ? scribble : (void **) blocks;
async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
if (blocks[i] == NULL)
ptrs[i] = (void*)raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]) + offs[i];
raid6_datap_recov(disks, bytes, faila, ptrs);
async_tx_sync_epilog(submit);
return NULL;
}
good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (i == faila)
continue;
if (blocks[i]) {
good = i;
good_srcs++;
if (good_srcs > 1)
break;
}
}
BUG_ON(good_srcs == 0);
p = blocks[disks-2];
p_off = offs[disks-2];
q = blocks[disks-1];
q_off = offs[disks-1];
/* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = blocks[faila];
dq_off = offs[faila];
blocks[faila] = NULL;
blocks[disks-1] = dq;
offs[disks-1] = dq_off;
/* in the 4-disk case we only need to perform a single source
* multiplication with the one good data block.
*/
if (good_srcs == 1) {
struct page *g = blocks[good];
unsigned int g_off = offs[good];
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_memcpy(p, g, p_off, g_off, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_mult(dq, dq_off, g, g_off,
raid6_gfexp[good], bytes, submit);
} else {
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
scribble);
tx = async_gen_syndrome(blocks, offs, disks, bytes, submit);
}
/* Restore pointer table */
blocks[faila] = dq;
offs[faila] = dq_off;
blocks[disks-1] = q;
offs[disks-1] = q_off;
/* calculate g^{-faila} */
coef = raid6_gfinv[raid6_gfexp[faila]];
srcs[0] = dq;
src_offs[0] = dq_off;
srcs[1] = q;
src_offs[1] = q_off;
init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
NULL, NULL, scribble);
tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit);
init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit);
srcs[0] = p;
src_offs[0] = p_off;
srcs[1] = dq;
src_offs[1] = dq_off;
init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn,
cb_param, scribble);
tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit);
return tx;
}
EXPORT_SYMBOL_GPL(async_raid6_datap_recov);
MODULE_AUTHOR("Dan Williams <[email protected]>");
MODULE_DESCRIPTION("asynchronous RAID-6 recovery api");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/async_raid6_recov.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* core routines for the asynchronous memory transfer/transform api
*
* Copyright © 2006, Intel Corporation.
*
* Dan Williams <[email protected]>
*
* with architecture considerations by:
* Neil Brown <[email protected]>
* Jeff Garzik <[email protected]>
*/
#include <linux/rculist.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/async_tx.h>
#ifdef CONFIG_DMA_ENGINE
static int __init async_tx_init(void)
{
async_dmaengine_get();
printk(KERN_INFO "async_tx: api initialized (async)\n");
return 0;
}
static void __exit async_tx_exit(void)
{
async_dmaengine_put();
}
module_init(async_tx_init);
module_exit(async_tx_exit);
/**
* __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @submit: transaction dependency and submission modifiers
* @tx_type: transaction type
*/
struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
/* see if we can keep the chain on one channel */
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
return depend_tx->chan;
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#endif
/**
* async_tx_channel_switch - queue an interrupt descriptor with a dependency
* pre-attached.
* @depend_tx: the operation that must finish before the new operation runs
* @tx: the new operation
*/
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx)
{
struct dma_chan *chan = depend_tx->chan;
struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
/* first check to see if we can still append to depend_tx */
txd_lock(depend_tx);
if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
txd_chain(depend_tx, tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return;
}
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
intr_tx = device->device_prep_dma_interrupt(chan, 0);
else
intr_tx = NULL;
if (intr_tx) {
intr_tx->callback = NULL;
intr_tx->callback_param = NULL;
/* safe to chain outside the lock since we know we are
* not submitted yet
*/
txd_chain(intr_tx, tx);
/* check if we need to append */
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
txd_chain(depend_tx, intr_tx);
async_tx_ack(intr_tx);
intr_tx = NULL;
}
txd_unlock(depend_tx);
if (intr_tx) {
txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx);
}
device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
}
}
/**
* enum submit_disposition - flags for routing an incoming operation
* @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
* @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
* @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
*
* while holding depend_tx->lock we must avoid submitting new operations
* to prevent a circular locking dependency with drivers that already
* hold a channel lock when calling async_tx_run_dependencies.
*/
enum submit_disposition {
ASYNC_TX_SUBMITTED,
ASYNC_TX_CHANNEL_SWITCH,
ASYNC_TX_DIRECT_SUBMIT,
};
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
tx->callback = submit->cb_fn;
tx->callback_param = submit->cb_param;
if (depend_tx) {
enum submit_disposition s;
/* sanity check the dependency chain:
* 1/ if ack is already set then we cannot be sure
* we are referring to the correct operation
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
*/
txd_lock(depend_tx);
if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly
* if we are staying on the same channel: append
* else: channel switch
*/
if (depend_tx->chan == chan) {
txd_chain(depend_tx, tx);
s = ASYNC_TX_SUBMITTED;
} else
s = ASYNC_TX_CHANNEL_SWITCH;
} else {
/* we do not have a parent so we may be able to submit
* directly if we are staying on the same channel
*/
if (depend_tx->chan == chan)
s = ASYNC_TX_DIRECT_SUBMIT;
else
s = ASYNC_TX_CHANNEL_SWITCH;
}
txd_unlock(depend_tx);
switch (s) {
case ASYNC_TX_SUBMITTED:
break;
case ASYNC_TX_CHANNEL_SWITCH:
async_tx_channel_switch(depend_tx, tx);
break;
case ASYNC_TX_DIRECT_SUBMIT:
txd_clear_parent(tx);
tx->tx_submit(tx);
break;
}
} else {
txd_clear_parent(tx);
tx->tx_submit(tx);
}
if (submit->flags & ASYNC_TX_ACK)
async_tx_ack(tx);
if (depend_tx)
async_tx_ack(depend_tx);
}
EXPORT_SYMBOL_GPL(async_tx_submit);
/**
* async_trigger_callback - schedules the callback function to be run
* @submit: submission and completion parameters
*
* honored flags: ASYNC_TX_ACK
*
* The callback is run after any dependent operations have completed.
*/
struct dma_async_tx_descriptor *
async_trigger_callback(struct async_submit_ctl *submit)
{
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;
if (depend_tx) {
chan = depend_tx->chan;
device = chan->device;
/* see if we can schedule an interrupt
* otherwise poll for completion
*/
if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
device = NULL;
tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
} else
tx = NULL;
if (tx) {
pr_debug("%s: (async)\n", __func__);
async_tx_submit(chan, tx, submit);
} else {
pr_debug("%s: (sync)\n", __func__);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
async_tx_sync_epilog(submit);
}
return tx;
}
EXPORT_SYMBOL_GPL(async_trigger_callback);
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
* @tx: transaction to quiesce
*/
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
if (*tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);
*tx = NULL;
}
}
EXPORT_SYMBOL_GPL(async_tx_quiesce);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
MODULE_LICENSE("GPL");
| linux-master | crypto/async_tx/async_tx.c |
/*
* The following program is used to generate the constants for
* computing sched averages.
*
* ==============================================================
* C program (compile with -lm)
* ==============================================================
*/
#include <math.h>
#include <stdio.h>
#define HALFLIFE 32
#define SHIFT 32
double y;
void calc_runnable_avg_yN_inv(void)
{
int i;
unsigned int x;
/* To silence -Wunused-but-set-variable warnings. */
printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {");
for (i = 0; i < HALFLIFE; i++) {
x = ((1UL<<32)-1)*pow(y, i);
if (i % 6 == 0) printf("\n\t");
printf("0x%8x, ", x);
}
printf("\n};\n\n");
}
int sum = 1024;
void calc_runnable_avg_yN_sum(void)
{
int i;
printf("static const u32 runnable_avg_yN_sum[] = {\n\t 0,");
for (i = 1; i <= HALFLIFE; i++) {
if (i == 1)
sum *= y;
else
sum = sum*y + 1024*y;
if (i % 11 == 0)
printf("\n\t");
printf("%5d,", sum);
}
printf("\n};\n\n");
}
int n = -1;
/* first period */
long max = 1024;
void calc_converged_max(void)
{
long last = 0, y_inv = ((1UL<<32)-1)*y;
for (; ; n++) {
if (n > -1)
max = ((max*y_inv)>>SHIFT) + 1024;
/*
* This is the same as:
* max = max*y + 1024;
*/
if (last == max)
break;
last = max;
}
n--;
printf("#define LOAD_AVG_PERIOD %d\n", HALFLIFE);
printf("#define LOAD_AVG_MAX %ld\n", max);
// printf("#define LOAD_AVG_MAX_N %d\n\n", n);
}
void calc_accumulated_sum_32(void)
{
int i, x = sum;
printf("static const u32 __accumulated_sum_N32[] = {\n\t 0,");
for (i = 1; i <= n/HALFLIFE+1; i++) {
if (i > 1)
x = x/2 + sum;
if (i % 6 == 0)
printf("\n\t");
printf("%6d,", x);
}
printf("\n};\n\n");
}
void main(void)
{
printf("/* Generated by Documentation/scheduler/sched-pelt; do not modify. */\n\n");
y = pow(0.5, 1/(double)HALFLIFE);
calc_runnable_avg_yN_inv();
// calc_runnable_avg_yN_sum();
calc_converged_max();
// calc_accumulated_sum_32();
}
| linux-master | Documentation/scheduler/sched-pelt.c |
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <linux/usbdevice_fs.h>
/* For building without an updated set of headers */
#ifndef USBDEVFS_DROP_PRIVILEGES
#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32)
#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
#endif
void drop_privileges(int fd, uint32_t mask)
{
int res;
res = ioctl(fd, USBDEVFS_DROP_PRIVILEGES, &mask);
if (res)
printf("ERROR: USBDEVFS_DROP_PRIVILEGES returned %d\n", res);
else
printf("OK: privileges dropped!\n");
}
void reset_device(int fd)
{
int res;
res = ioctl(fd, USBDEVFS_RESET);
if (!res)
printf("OK: USBDEVFS_RESET succeeded\n");
else
printf("ERROR: reset failed! (%d - %s)\n",
-res, strerror(-res));
}
void claim_some_intf(int fd)
{
int i, res;
for (i = 0; i < 4; i++) {
res = ioctl(fd, USBDEVFS_CLAIMINTERFACE, &i);
if (!res)
printf("OK: claimed if %d\n", i);
else
printf("ERROR claiming if %d (%d - %s)\n",
i, -res, strerror(-res));
}
}
int main(int argc, char *argv[])
{
uint32_t mask, caps;
int c, fd;
fd = open(argv[1], O_RDWR);
if (fd < 0) {
printf("Failed to open file\n");
goto err_fd;
}
/*
* check if dropping privileges is supported,
* bail on systems where the capability is not present
*/
ioctl(fd, USBDEVFS_GET_CAPABILITIES, &caps);
if (!(caps & USBDEVFS_CAP_DROP_PRIVILEGES)) {
printf("DROP_PRIVILEGES not supported\n");
goto err;
}
/*
* Drop privileges but keep the ability to claim all
* free interfaces (i.e., those not used by kernel drivers)
*/
drop_privileges(fd, -1U);
printf("Available options:\n"
"[0] Exit now\n"
"[1] Reset device. Should fail if device is in use\n"
"[2] Claim 4 interfaces. Should succeed where not in use\n"
"[3] Narrow interface permission mask\n"
"Which option shall I run?: ");
while (scanf("%d", &c) == 1) {
switch (c) {
case 0:
goto exit;
case 1:
reset_device(fd);
break;
case 2:
claim_some_intf(fd);
break;
case 3:
printf("Insert new mask: ");
scanf("%x", &mask);
drop_privileges(fd, mask);
break;
default:
printf("I don't recognize that\n");
}
printf("Which test shall I run next?: ");
}
exit:
close(fd);
return 0;
err:
close(fd);
err_fd:
return 1;
}
| linux-master | Documentation/usb/usbdevfs-drop-permissions.c |
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <errno.h>
#include <ctype.h>
#include <limits.h>
/*
* Original work by Jeff Garzik
*
* External file lists, symlink, pipe and fifo support by Thayne Harbaugh
* Hard link support by Luciano Rocha
*/
#define xstr(s) #s
#define str(s) xstr(s)
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static unsigned int offset;
static unsigned int ino = 721;
static time_t default_mtime;
static bool do_csum = false;
struct file_handler {
const char *type;
int (*handler)(const char *line);
};
static void push_string(const char *name)
{
unsigned int name_len = strlen(name) + 1;
fputs(name, stdout);
putchar(0);
offset += name_len;
}
static void push_pad (void)
{
while (offset & 3) {
putchar(0);
offset++;
}
}
static void push_rest(const char *name)
{
unsigned int name_len = strlen(name) + 1;
unsigned int tmp_ofs;
fputs(name, stdout);
putchar(0);
offset += name_len;
tmp_ofs = name_len + 110;
while (tmp_ofs & 3) {
putchar(0);
offset++;
tmp_ofs++;
}
}
static void push_hdr(const char *s)
{
fputs(s, stdout);
offset += 110;
}
static void cpio_trailer(void)
{
char s[256];
const char name[] = "TRAILER!!!";
sprintf(s, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
0, /* ino */
0, /* mode */
(long) 0, /* uid */
(long) 0, /* gid */
1, /* nlink */
(long) 0, /* mtime */
0, /* filesize */
0, /* major */
0, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name)+1, /* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
while (offset % 512) {
putchar(0);
offset++;
}
}
static int cpio_mkslink(const char *name, const char *target,
unsigned int mode, uid_t uid, gid_t gid)
{
char s[256];
if (name[0] == '/')
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
S_IFLNK | mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) default_mtime, /* mtime */
(unsigned)strlen(target)+1, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_string(name);
push_pad();
push_string(target);
push_pad();
return 0;
}
static int cpio_mkslink_line(const char *line)
{
char name[PATH_MAX + 1];
char target[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (5 != sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX) "s %o %d %d", name, target, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized dir format '%s'", line);
goto fail;
}
rc = cpio_mkslink(name, target, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkgeneric(const char *name, unsigned int mode,
uid_t uid, gid_t gid)
{
char s[256];
if (name[0] == '/')
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
2, /* nlink */
(long) default_mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
return 0;
}
enum generic_types {
GT_DIR,
GT_PIPE,
GT_SOCK
};
struct generic_type {
const char *type;
mode_t mode;
};
static const struct generic_type generic_type_table[] = {
[GT_DIR] = {
.type = "dir",
.mode = S_IFDIR
},
[GT_PIPE] = {
.type = "pipe",
.mode = S_IFIFO
},
[GT_SOCK] = {
.type = "sock",
.mode = S_IFSOCK
}
};
static int cpio_mkgeneric_line(const char *line, enum generic_types gt)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (4 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d", name, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized %s format '%s'",
line, generic_type_table[gt].type);
goto fail;
}
mode |= generic_type_table[gt].mode;
rc = cpio_mkgeneric(name, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkdir_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_DIR);
}
static int cpio_mkpipe_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_PIPE);
}
static int cpio_mksock_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_SOCK);
}
static int cpio_mknod(const char *name, unsigned int mode,
uid_t uid, gid_t gid, char dev_type,
unsigned int maj, unsigned int min)
{
char s[256];
if (dev_type == 'b')
mode |= S_IFBLK;
else
mode |= S_IFCHR;
if (name[0] == '/')
name++;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) default_mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
maj, /* rmajor */
min, /* rminor */
(unsigned)strlen(name) + 1,/* namesize */
0); /* chksum */
push_hdr(s);
push_rest(name);
return 0;
}
static int cpio_mknod_line(const char *line)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
char dev_type;
unsigned int maj;
unsigned int min;
int rc = -1;
if (7 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d %c %u %u",
name, &mode, &uid, &gid, &dev_type, &maj, &min)) {
fprintf(stderr, "Unrecognized nod format '%s'", line);
goto fail;
}
rc = cpio_mknod(name, mode, uid, gid, dev_type, maj, min);
fail:
return rc;
}
static int cpio_mkfile_csum(int fd, unsigned long size, uint32_t *csum)
{
while (size) {
unsigned char filebuf[65536];
ssize_t this_read;
size_t i, this_size = MIN(size, sizeof(filebuf));
this_read = read(fd, filebuf, this_size);
if (this_read <= 0 || this_read > this_size)
return -1;
for (i = 0; i < this_read; i++)
*csum += filebuf[i];
size -= this_read;
}
/* seek back to the start for data segment I/O */
if (lseek(fd, 0, SEEK_SET) < 0)
return -1;
return 0;
}
static int cpio_mkfile(const char *name, const char *location,
unsigned int mode, uid_t uid, gid_t gid,
unsigned int nlinks)
{
char s[256];
struct stat buf;
unsigned long size;
int file;
int retval;
int rc = -1;
int namesize;
unsigned int i;
uint32_t csum = 0;
mode |= S_IFREG;
file = open (location, O_RDONLY);
if (file < 0) {
fprintf (stderr, "File %s could not be opened for reading\n", location);
goto error;
}
retval = fstat(file, &buf);
if (retval) {
fprintf(stderr, "File %s could not be stat()'ed\n", location);
goto error;
}
if (buf.st_mtime > 0xffffffff) {
fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
location);
buf.st_mtime = 0xffffffff;
}
if (buf.st_mtime < 0) {
fprintf(stderr, "%s: Timestamp negative, clipping.\n",
location);
buf.st_mtime = 0;
}
if (buf.st_size > 0xffffffff) {
fprintf(stderr, "%s: Size exceeds maximum cpio file size\n",
location);
goto error;
}
if (do_csum && cpio_mkfile_csum(file, buf.st_size, &csum) < 0) {
fprintf(stderr, "Failed to checksum file %s\n", location);
goto error;
}
size = 0;
for (i = 1; i <= nlinks; i++) {
/* data goes on last link */
if (i == nlinks)
size = buf.st_size;
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
sprintf(s,"%s%08X%08X%08lX%08lX%08X%08lX"
"%08lX%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
nlinks, /* nlink */
(long) buf.st_mtime, /* mtime */
size, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
size ? csum : 0); /* chksum */
push_hdr(s);
push_string(name);
push_pad();
while (size) {
unsigned char filebuf[65536];
ssize_t this_read;
size_t this_size = MIN(size, sizeof(filebuf));
this_read = read(file, filebuf, this_size);
if (this_read <= 0 || this_read > this_size) {
fprintf(stderr, "Can not read %s file\n", location);
goto error;
}
if (fwrite(filebuf, this_read, 1, stdout) != 1) {
fprintf(stderr, "writing filebuf failed\n");
goto error;
}
offset += this_read;
size -= this_read;
}
push_pad();
name += namesize;
}
ino++;
rc = 0;
error:
if (file >= 0)
close(file);
return rc;
}
static char *cpio_replace_env(char *new_location)
{
char expanded[PATH_MAX + 1];
char *start, *end, *var;
while ((start = strstr(new_location, "${")) &&
(end = strchr(start + 2, '}'))) {
*start = *end = 0;
var = getenv(start + 2);
snprintf(expanded, sizeof expanded, "%s%s%s",
new_location, var ? var : "", end + 1);
strcpy(new_location, expanded);
}
return new_location;
}
static int cpio_mkfile_line(const char *line)
{
char name[PATH_MAX + 1];
char *dname = NULL; /* malloc'ed buffer for hard links */
char location[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int nlinks = 1;
int end = 0, dname_len = 0;
int rc = -1;
if (5 > sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX)
"s %o %d %d %n",
name, location, &mode, &uid, &gid, &end)) {
fprintf(stderr, "Unrecognized file format '%s'", line);
goto fail;
}
if (end && isgraph(line[end])) {
int len;
int nend;
dname = malloc(strlen(line));
if (!dname) {
fprintf (stderr, "out of memory (%d)\n", dname_len);
goto fail;
}
dname_len = strlen(name) + 1;
memcpy(dname, name, dname_len);
do {
nend = 0;
if (sscanf(line + end, "%" str(PATH_MAX) "s %n",
name, &nend) < 1)
break;
len = strlen(name) + 1;
memcpy(dname + dname_len, name, len);
dname_len += len;
nlinks++;
end += nend;
} while (isgraph(line[end]));
} else {
dname = name;
}
rc = cpio_mkfile(dname, cpio_replace_env(location),
mode, uid, gid, nlinks);
fail:
if (dname_len) free(dname);
return rc;
}
static void usage(const char *prog)
{
fprintf(stderr, "Usage:\n"
"\t%s [-t <timestamp>] [-c] <cpio_list>\n"
"\n"
"<cpio_list> is a file containing newline separated entries that\n"
"describe the files to be included in the initramfs archive:\n"
"\n"
"# a comment\n"
"file <name> <location> <mode> <uid> <gid> [<hard links>]\n"
"dir <name> <mode> <uid> <gid>\n"
"nod <name> <mode> <uid> <gid> <dev_type> <maj> <min>\n"
"slink <name> <target> <mode> <uid> <gid>\n"
"pipe <name> <mode> <uid> <gid>\n"
"sock <name> <mode> <uid> <gid>\n"
"\n"
"<name> name of the file/dir/nod/etc in the archive\n"
"<location> location of the file in the current filesystem\n"
" expands shell variables quoted with ${}\n"
"<target> link target\n"
"<mode> mode/permissions of the file\n"
"<uid> user id (0=root)\n"
"<gid> group id (0=root)\n"
"<dev_type> device type (b=block, c=character)\n"
"<maj> major number of nod\n"
"<min> minor number of nod\n"
"<hard links> space separated list of other links to file\n"
"\n"
"example:\n"
"# A simple initramfs\n"
"dir /dev 0755 0 0\n"
"nod /dev/console 0600 0 0 c 5 1\n"
"dir /root 0700 0 0\n"
"dir /sbin 0755 0 0\n"
"file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n"
"\n"
"<timestamp> is time in seconds since Epoch that will be used\n"
"as mtime for symlinks, special files and directories. The default\n"
"is to use the current time for these entries.\n"
"-c: calculate and store 32-bit checksums for file data.\n",
prog);
}
static const struct file_handler file_handler_table[] = {
{
.type = "file",
.handler = cpio_mkfile_line,
}, {
.type = "nod",
.handler = cpio_mknod_line,
}, {
.type = "dir",
.handler = cpio_mkdir_line,
}, {
.type = "slink",
.handler = cpio_mkslink_line,
}, {
.type = "pipe",
.handler = cpio_mkpipe_line,
}, {
.type = "sock",
.handler = cpio_mksock_line,
}, {
.type = NULL,
.handler = NULL,
}
};
#define LINE_SIZE (2 * PATH_MAX + 50)
int main (int argc, char *argv[])
{
FILE *cpio_list;
char line[LINE_SIZE];
char *args, *type;
int ec = 0;
int line_nr = 0;
const char *filename;
default_mtime = time(NULL);
while (1) {
int opt = getopt(argc, argv, "t:ch");
char *invalid;
if (opt == -1)
break;
switch (opt) {
case 't':
default_mtime = strtol(optarg, &invalid, 10);
if (!*optarg || *invalid) {
fprintf(stderr, "Invalid timestamp: %s\n",
optarg);
usage(argv[0]);
exit(1);
}
break;
case 'c':
do_csum = true;
break;
case 'h':
case '?':
usage(argv[0]);
exit(opt == 'h' ? 0 : 1);
}
}
/*
* Timestamps after 2106-02-07 06:28:15 UTC have an ascii hex time_t
* representation that exceeds 8 chars and breaks the cpio header
* specification. Negative timestamps similarly exceed 8 chars.
*/
if (default_mtime > 0xffffffff || default_mtime < 0) {
fprintf(stderr, "ERROR: Timestamp out of range for cpio format\n");
exit(1);
}
if (argc - optind != 1) {
usage(argv[0]);
exit(1);
}
filename = argv[optind];
if (!strcmp(filename, "-"))
cpio_list = stdin;
else if (!(cpio_list = fopen(filename, "r"))) {
fprintf(stderr, "ERROR: unable to open '%s': %s\n\n",
filename, strerror(errno));
usage(argv[0]);
exit(1);
}
while (fgets(line, LINE_SIZE, cpio_list)) {
int type_idx;
size_t slen = strlen(line);
line_nr++;
if ('#' == *line) {
/* comment - skip to next line */
continue;
}
if (! (type = strtok(line, " \t"))) {
fprintf(stderr,
"ERROR: incorrect format, could not locate file type line %d: '%s'\n",
line_nr, line);
ec = -1;
break;
}
if ('\n' == *type) {
/* a blank line */
continue;
}
if (slen == strlen(type)) {
/* must be an empty line */
continue;
}
if (! (args = strtok(NULL, "\n"))) {
fprintf(stderr,
"ERROR: incorrect format, newline required line %d: '%s'\n",
line_nr, line);
ec = -1;
}
for (type_idx = 0; file_handler_table[type_idx].type; type_idx++) {
int rc;
if (! strcmp(line, file_handler_table[type_idx].type)) {
if ((rc = file_handler_table[type_idx].handler(args))) {
ec = rc;
fprintf(stderr, " line %d\n", line_nr);
}
break;
}
}
if (NULL == file_handler_table[type_idx].type) {
fprintf(stderr, "unknown file type line %d: '%s'\n",
line_nr, line);
}
}
if (ec == 0)
cpio_trailer();
exit(ec);
}
| linux-master | usr/gen_init_cpio.c |
// SPDX-License-Identifier: GPL-2.0
/* procacct.c
*
* Demonstrator of fetching resource data on task exit, as a way
* to accumulate accurate program resource usage statistics, without
* prior identification of the programs. For that, the fields for
* device and inode of the program executable binary file are also
* extracted in addition to the command string.
*
* The TGID together with the PID and the AGROUP flag allow
* identification of threads in a process and single-threaded processes.
* The ac_tgetime field gives proper whole-process walltime.
*
* Written (changed) by Thomas Orgis, University of Hamburg in 2022
*
* This is a cheap derivation (inheriting the style) of getdelays.c:
*
* Utility to get per-pid and per-tgid delay accounting statistics
* Also illustrates usage of the taskstats interface
*
* Copyright (C) Shailabh Nagar, IBM Corp. 2005
* Copyright (C) Balbir Singh, IBM Corp. 2006
* Copyright (c) Jay Lan, SGI. 2006
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <poll.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include <linux/genetlink.h>
#include <linux/acct.h>
#include <linux/taskstats.h>
#include <linux/kdev_t.h>
/*
* Generic macros for dealing with netlink sockets. Might be duplicated
* elsewhere. It is recommended that commercial grade applications use
* libnl or libnetlink and use the interfaces provided by the library
*/
#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
#define err(code, fmt, arg...) \
do { \
fprintf(stderr, fmt, ##arg); \
exit(code); \
} while (0)
int rcvbufsz;
char name[100];
int dbg;
int print_delays;
int print_io_accounting;
int print_task_context_switch_counts;
#define PRINTF(fmt, arg...) { \
if (dbg) { \
printf(fmt, ##arg); \
} \
}
/* Maximum size of response requested or message sent */
#define MAX_MSG_SIZE 1024
/* Maximum number of cpus expected to be specified in a cpumask */
#define MAX_CPUS 32
struct msgtemplate {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[MAX_MSG_SIZE];
};
char cpumask[100+6*MAX_CPUS];
static void usage(void)
{
fprintf(stderr, "procacct [-v] [-w logfile] [-r bufsize] [-m cpumask]\n");
fprintf(stderr, " -v: debug on\n");
}
/*
* Create a raw netlink socket and bind
*/
static int create_nl_socket(int protocol)
{
int fd;
struct sockaddr_nl local;
fd = socket(AF_NETLINK, SOCK_RAW, protocol);
if (fd < 0)
return -1;
if (rcvbufsz)
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
&rcvbufsz, sizeof(rcvbufsz)) < 0) {
fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
rcvbufsz);
goto error;
}
memset(&local, 0, sizeof(local));
local.nl_family = AF_NETLINK;
if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
goto error;
return fd;
error:
close(fd);
return -1;
}
static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
void *nla_data, int nla_len)
{
struct nlattr *na;
struct sockaddr_nl nladdr;
int r, buflen;
char *buf;
struct msgtemplate msg;
msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
msg.n.nlmsg_type = nlmsg_type;
msg.n.nlmsg_flags = NLM_F_REQUEST;
msg.n.nlmsg_seq = 0;
msg.n.nlmsg_pid = nlmsg_pid;
msg.g.cmd = genl_cmd;
msg.g.version = 0x1;
na = (struct nlattr *) GENLMSG_DATA(&msg);
na->nla_type = nla_type;
na->nla_len = nla_len + 1 + NLA_HDRLEN;
memcpy(NLA_DATA(na), nla_data, nla_len);
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
buf = (char *) &msg;
buflen = msg.n.nlmsg_len;
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
sizeof(nladdr))) < buflen) {
if (r > 0) {
buf += r;
buflen -= r;
} else if (errno != EAGAIN)
return -1;
}
return 0;
}
/*
* Probe the controller in genetlink to find the family id
* for the TASKSTATS family
*/
static int get_family_id(int sd)
{
struct {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[256];
} ans;
int id = 0, rc;
struct nlattr *na;
int rep_len;
strcpy(name, TASKSTATS_GENL_NAME);
rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
CTRL_ATTR_FAMILY_NAME, (void *)name,
strlen(TASKSTATS_GENL_NAME)+1);
if (rc < 0)
return 0; /* sendto() failure? */
rep_len = recv(sd, &ans, sizeof(ans), 0);
if (ans.n.nlmsg_type == NLMSG_ERROR ||
(rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
return 0;
na = (struct nlattr *) GENLMSG_DATA(&ans);
na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
if (na->nla_type == CTRL_ATTR_FAMILY_ID)
id = *(__u16 *) NLA_DATA(na);
return id;
}
#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
static void print_procacct(struct taskstats *t)
{
/* First letter: T is a mere thread, G the last in a group, U unknown. */
printf(
"%c pid=%lu tgid=%lu uid=%lu wall=%llu gwall=%llu cpu=%llu vmpeak=%llu rsspeak=%llu dev=%lu:%lu inode=%llu comm=%s\n"
, t->version >= 12 ? (t->ac_flag & AGROUP ? 'P' : 'T') : '?'
, (unsigned long)t->ac_pid
, (unsigned long)(t->version >= 12 ? t->ac_tgid : 0)
, (unsigned long)t->ac_uid
, (unsigned long long)t->ac_etime
, (unsigned long long)(t->version >= 12 ? t->ac_tgetime : 0)
, (unsigned long long)(t->ac_utime+t->ac_stime)
, (unsigned long long)t->hiwater_vm
, (unsigned long long)t->hiwater_rss
, (unsigned long)(t->version >= 12 ? MAJOR(t->ac_exe_dev) : 0)
, (unsigned long)(t->version >= 12 ? MINOR(t->ac_exe_dev) : 0)
, (unsigned long long)(t->version >= 12 ? t->ac_exe_inode : 0)
, t->ac_comm
);
}
void handle_aggr(int mother, struct nlattr *na, int fd)
{
int aggr_len = NLA_PAYLOAD(na->nla_len);
int len2 = 0;
pid_t rtid = 0;
na = (struct nlattr *) NLA_DATA(na);
while (len2 < aggr_len) {
switch (na->nla_type) {
case TASKSTATS_TYPE_PID:
rtid = *(int *) NLA_DATA(na);
PRINTF("PID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_TGID:
rtid = *(int *) NLA_DATA(na);
PRINTF("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
if (mother == TASKSTATS_TYPE_AGGR_PID)
print_procacct((struct taskstats *) NLA_DATA(na));
if (fd) {
if (write(fd, NLA_DATA(na), na->nla_len) < 0)
err(1, "write error\n");
}
break;
case TASKSTATS_TYPE_NULL:
break;
default:
fprintf(stderr, "Unknown nested nla_type %d\n",
na->nla_type);
break;
}
len2 += NLA_ALIGN(na->nla_len);
na = (struct nlattr *)((char *)na +
NLA_ALIGN(na->nla_len));
}
}
int main(int argc, char *argv[])
{
int c, rc, rep_len;
__u16 id;
__u32 mypid;
struct nlattr *na;
int nl_sd = -1;
int len = 0;
int fd = 0;
int write_file = 0;
int maskset = 0;
char *logfile = NULL;
int cfd = 0;
int forking = 0;
struct msgtemplate msg;
while (!forking) {
c = getopt(argc, argv, "m:vr:");
if (c < 0)
break;
switch (c) {
case 'w':
logfile = strdup(optarg);
printf("write to file %s\n", logfile);
write_file = 1;
break;
case 'r':
rcvbufsz = atoi(optarg);
printf("receive buf size %d\n", rcvbufsz);
if (rcvbufsz < 0)
err(1, "Invalid rcv buf size\n");
break;
case 'm':
strncpy(cpumask, optarg, sizeof(cpumask));
cpumask[sizeof(cpumask) - 1] = '\0';
maskset = 1;
break;
case 'v':
printf("debug on\n");
dbg = 1;
break;
default:
usage();
exit(-1);
}
}
if (!maskset) {
maskset = 1;
strncpy(cpumask, "1", sizeof(cpumask));
cpumask[sizeof(cpumask) - 1] = '\0';
}
printf("cpumask %s maskset %d\n", cpumask, maskset);
if (write_file) {
fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (fd == -1) {
perror("Cannot open output file\n");
exit(1);
}
}
nl_sd = create_nl_socket(NETLINK_GENERIC);
if (nl_sd < 0)
err(1, "error creating Netlink socket\n");
mypid = getpid();
id = get_family_id(nl_sd);
if (!id) {
fprintf(stderr, "Error getting family id, errno %d\n", errno);
goto err;
}
PRINTF("family id %d\n", id);
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
PRINTF("Sent register cpumask, retval %d\n", rc);
if (rc < 0) {
fprintf(stderr, "error sending register cpumask\n");
goto err;
}
}
do {
rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
PRINTF("received %d bytes\n", rep_len);
if (rep_len < 0) {
fprintf(stderr, "nonfatal reply error: errno %d\n",
errno);
continue;
}
if (msg.n.nlmsg_type == NLMSG_ERROR ||
!NLMSG_OK((&msg.n), rep_len)) {
struct nlmsgerr *err = NLMSG_DATA(&msg);
fprintf(stderr, "fatal reply error, errno %d\n",
err->error);
goto done;
}
PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
rep_len = GENLMSG_PAYLOAD(&msg.n);
na = (struct nlattr *) GENLMSG_DATA(&msg);
len = 0;
while (len < rep_len) {
len += NLA_ALIGN(na->nla_len);
int mother = na->nla_type;
PRINTF("mother=%i\n", mother);
switch (na->nla_type) {
case TASKSTATS_TYPE_AGGR_PID:
case TASKSTATS_TYPE_AGGR_TGID:
/* For nested attributes, na follows */
handle_aggr(mother, na, fd);
break;
default:
fprintf(stderr, "Unexpected nla_type %d\n",
na->nla_type);
case TASKSTATS_TYPE_NULL:
break;
}
na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
}
} while (1);
done:
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
printf("Sent deregister mask, retval %d\n", rc);
if (rc < 0)
err(rc, "error sending deregister cpumask\n");
}
err:
close(nl_sd);
if (fd)
close(fd);
if (cfd)
close(cfd);
return 0;
}
| linux-master | tools/accounting/procacct.c |
// SPDX-License-Identifier: GPL-2.0
/* getdelays.c
*
* Utility to get per-pid and per-tgid delay accounting statistics
* Also illustrates usage of the taskstats interface
*
* Copyright (C) Shailabh Nagar, IBM Corp. 2005
* Copyright (C) Balbir Singh, IBM Corp. 2006
* Copyright (c) Jay Lan, SGI. 2006
*
* Compile with
* gcc -I/usr/src/linux/include getdelays.c -o getdelays
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <poll.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include <linux/genetlink.h>
#include <linux/taskstats.h>
#include <linux/cgroupstats.h>
/*
* Generic macros for dealing with netlink sockets. Might be duplicated
* elsewhere. It is recommended that commercial grade applications use
* libnl or libnetlink and use the interfaces provided by the library
*/
#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
#define NLA_DATA(na) ((void *)((char*)(na) + NLA_HDRLEN))
#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
#define err(code, fmt, arg...) \
do { \
fprintf(stderr, fmt, ##arg); \
exit(code); \
} while (0)
int rcvbufsz;
char name[100];
int dbg;
int print_delays;
int print_io_accounting;
int print_task_context_switch_counts;
#define PRINTF(fmt, arg...) { \
if (dbg) { \
printf(fmt, ##arg); \
} \
}
/* Maximum size of response requested or message sent */
#define MAX_MSG_SIZE 1024
/* Maximum number of cpus expected to be specified in a cpumask */
#define MAX_CPUS 32
struct msgtemplate {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[MAX_MSG_SIZE];
};
char cpumask[100+6*MAX_CPUS];
static void usage(void)
{
fprintf(stderr, "getdelays [-dilv] [-w logfile] [-r bufsize] "
"[-m cpumask] [-t tgid] [-p pid]\n");
fprintf(stderr, " -d: print delayacct stats\n");
fprintf(stderr, " -i: print IO accounting (works only with -p)\n");
fprintf(stderr, " -l: listen forever\n");
fprintf(stderr, " -v: debug on\n");
fprintf(stderr, " -C: container path\n");
}
/*
* Create a raw netlink socket and bind
*/
static int create_nl_socket(int protocol)
{
int fd;
struct sockaddr_nl local;
fd = socket(AF_NETLINK, SOCK_RAW, protocol);
if (fd < 0)
return -1;
if (rcvbufsz)
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
&rcvbufsz, sizeof(rcvbufsz)) < 0) {
fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
rcvbufsz);
goto error;
}
memset(&local, 0, sizeof(local));
local.nl_family = AF_NETLINK;
if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
goto error;
return fd;
error:
close(fd);
return -1;
}
static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
__u8 genl_cmd, __u16 nla_type,
void *nla_data, int nla_len)
{
struct nlattr *na;
struct sockaddr_nl nladdr;
int r, buflen;
char *buf;
struct msgtemplate msg;
msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
msg.n.nlmsg_type = nlmsg_type;
msg.n.nlmsg_flags = NLM_F_REQUEST;
msg.n.nlmsg_seq = 0;
msg.n.nlmsg_pid = nlmsg_pid;
msg.g.cmd = genl_cmd;
msg.g.version = 0x1;
na = (struct nlattr *) GENLMSG_DATA(&msg);
na->nla_type = nla_type;
na->nla_len = nla_len + NLA_HDRLEN;
memcpy(NLA_DATA(na), nla_data, nla_len);
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
buf = (char *) &msg;
buflen = msg.n.nlmsg_len ;
memset(&nladdr, 0, sizeof(nladdr));
nladdr.nl_family = AF_NETLINK;
while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
sizeof(nladdr))) < buflen) {
if (r > 0) {
buf += r;
buflen -= r;
} else if (errno != EAGAIN)
return -1;
}
return 0;
}
/*
* Probe the controller in genetlink to find the family id
* for the TASKSTATS family
*/
static int get_family_id(int sd)
{
struct {
struct nlmsghdr n;
struct genlmsghdr g;
char buf[256];
} ans;
int id = 0, rc;
struct nlattr *na;
int rep_len;
strcpy(name, TASKSTATS_GENL_NAME);
rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
CTRL_ATTR_FAMILY_NAME, (void *)name,
strlen(TASKSTATS_GENL_NAME)+1);
if (rc < 0)
return 0; /* sendto() failure? */
rep_len = recv(sd, &ans, sizeof(ans), 0);
if (ans.n.nlmsg_type == NLMSG_ERROR ||
(rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
return 0;
na = (struct nlattr *) GENLMSG_DATA(&ans);
na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
if (na->nla_type == CTRL_ATTR_FAMILY_ID) {
id = *(__u16 *) NLA_DATA(na);
}
return id;
}
#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
static void print_delayacct(struct taskstats *t)
{
printf("\n\nCPU %15s%15s%15s%15s%15s\n"
" %15llu%15llu%15llu%15llu%15.3fms\n"
"IO %15s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"SWAP %15s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"RECLAIM %12s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"THRASHING%12s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"COMPACT %12s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"WPCOPY %12s%15s%15s\n"
" %15llu%15llu%15.3fms\n"
"IRQ %15s%15s%15s\n"
" %15llu%15llu%15.3fms\n",
"count", "real total", "virtual total",
"delay total", "delay average",
(unsigned long long)t->cpu_count,
(unsigned long long)t->cpu_run_real_total,
(unsigned long long)t->cpu_run_virtual_total,
(unsigned long long)t->cpu_delay_total,
average_ms((double)t->cpu_delay_total, t->cpu_count),
"count", "delay total", "delay average",
(unsigned long long)t->blkio_count,
(unsigned long long)t->blkio_delay_total,
average_ms((double)t->blkio_delay_total, t->blkio_count),
"count", "delay total", "delay average",
(unsigned long long)t->swapin_count,
(unsigned long long)t->swapin_delay_total,
average_ms((double)t->swapin_delay_total, t->swapin_count),
"count", "delay total", "delay average",
(unsigned long long)t->freepages_count,
(unsigned long long)t->freepages_delay_total,
average_ms((double)t->freepages_delay_total, t->freepages_count),
"count", "delay total", "delay average",
(unsigned long long)t->thrashing_count,
(unsigned long long)t->thrashing_delay_total,
average_ms((double)t->thrashing_delay_total, t->thrashing_count),
"count", "delay total", "delay average",
(unsigned long long)t->compact_count,
(unsigned long long)t->compact_delay_total,
average_ms((double)t->compact_delay_total, t->compact_count),
"count", "delay total", "delay average",
(unsigned long long)t->wpcopy_count,
(unsigned long long)t->wpcopy_delay_total,
average_ms((double)t->wpcopy_delay_total, t->wpcopy_count),
"count", "delay total", "delay average",
(unsigned long long)t->irq_count,
(unsigned long long)t->irq_delay_total,
average_ms((double)t->irq_delay_total, t->irq_count));
}
static void task_context_switch_counts(struct taskstats *t)
{
printf("\n\nTask %15s%15s\n"
" %15llu%15llu\n",
"voluntary", "nonvoluntary",
(unsigned long long)t->nvcsw, (unsigned long long)t->nivcsw);
}
static void print_cgroupstats(struct cgroupstats *c)
{
printf("sleeping %llu, blocked %llu, running %llu, stopped %llu, "
"uninterruptible %llu\n", (unsigned long long)c->nr_sleeping,
(unsigned long long)c->nr_io_wait,
(unsigned long long)c->nr_running,
(unsigned long long)c->nr_stopped,
(unsigned long long)c->nr_uninterruptible);
}
static void print_ioacct(struct taskstats *t)
{
printf("%s: read=%llu, write=%llu, cancelled_write=%llu\n",
t->ac_comm,
(unsigned long long)t->read_bytes,
(unsigned long long)t->write_bytes,
(unsigned long long)t->cancelled_write_bytes);
}
int main(int argc, char *argv[])
{
int c, rc, rep_len, aggr_len, len2;
int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
__u16 id;
__u32 mypid;
struct nlattr *na;
int nl_sd = -1;
int len = 0;
pid_t tid = 0;
pid_t rtid = 0;
int fd = 0;
int write_file = 0;
int maskset = 0;
char *logfile = NULL;
int loop = 0;
int containerset = 0;
char *containerpath = NULL;
int cfd = 0;
int forking = 0;
sigset_t sigset;
struct msgtemplate msg;
while (!forking) {
c = getopt(argc, argv, "qdiw:r:m:t:p:vlC:c:");
if (c < 0)
break;
switch (c) {
case 'd':
printf("print delayacct stats ON\n");
print_delays = 1;
break;
case 'i':
printf("printing IO accounting\n");
print_io_accounting = 1;
break;
case 'q':
printf("printing task/process context switch rates\n");
print_task_context_switch_counts = 1;
break;
case 'C':
containerset = 1;
containerpath = optarg;
break;
case 'w':
logfile = strdup(optarg);
printf("write to file %s\n", logfile);
write_file = 1;
break;
case 'r':
rcvbufsz = atoi(optarg);
printf("receive buf size %d\n", rcvbufsz);
if (rcvbufsz < 0)
err(1, "Invalid rcv buf size\n");
break;
case 'm':
strncpy(cpumask, optarg, sizeof(cpumask));
cpumask[sizeof(cpumask) - 1] = '\0';
maskset = 1;
printf("cpumask %s maskset %d\n", cpumask, maskset);
break;
case 't':
tid = atoi(optarg);
if (!tid)
err(1, "Invalid tgid\n");
cmd_type = TASKSTATS_CMD_ATTR_TGID;
break;
case 'p':
tid = atoi(optarg);
if (!tid)
err(1, "Invalid pid\n");
cmd_type = TASKSTATS_CMD_ATTR_PID;
break;
case 'c':
/* Block SIGCHLD for sigwait() later */
if (sigemptyset(&sigset) == -1)
err(1, "Failed to empty sigset");
if (sigaddset(&sigset, SIGCHLD))
err(1, "Failed to set sigchld in sigset");
sigprocmask(SIG_BLOCK, &sigset, NULL);
/* fork/exec a child */
tid = fork();
if (tid < 0)
err(1, "Fork failed\n");
if (tid == 0)
if (execvp(argv[optind - 1],
&argv[optind - 1]) < 0)
exit(-1);
/* Set the command type and avoid further processing */
cmd_type = TASKSTATS_CMD_ATTR_PID;
forking = 1;
break;
case 'v':
printf("debug on\n");
dbg = 1;
break;
case 'l':
printf("listen forever\n");
loop = 1;
break;
default:
usage();
exit(-1);
}
}
if (write_file) {
fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd == -1) {
perror("Cannot open output file\n");
exit(1);
}
}
nl_sd = create_nl_socket(NETLINK_GENERIC);
if (nl_sd < 0)
err(1, "error creating Netlink socket\n");
mypid = getpid();
id = get_family_id(nl_sd);
if (!id) {
fprintf(stderr, "Error getting family id, errno %d\n", errno);
goto err;
}
PRINTF("family id %d\n", id);
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
PRINTF("Sent register cpumask, retval %d\n", rc);
if (rc < 0) {
fprintf(stderr, "error sending register cpumask\n");
goto err;
}
}
if (tid && containerset) {
fprintf(stderr, "Select either -t or -C, not both\n");
goto err;
}
/*
* If we forked a child, wait for it to exit. Cannot use waitpid()
* as all the delicious data would be reaped as part of the wait
*/
if (tid && forking) {
int sig_received;
sigwait(&sigset, &sig_received);
}
if (tid) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
cmd_type, &tid, sizeof(__u32));
PRINTF("Sent pid/tgid, retval %d\n", rc);
if (rc < 0) {
fprintf(stderr, "error sending tid/tgid cmd\n");
goto done;
}
}
if (containerset) {
cfd = open(containerpath, O_RDONLY);
if (cfd < 0) {
perror("error opening container file");
goto err;
}
rc = send_cmd(nl_sd, id, mypid, CGROUPSTATS_CMD_GET,
CGROUPSTATS_CMD_ATTR_FD, &cfd, sizeof(__u32));
if (rc < 0) {
perror("error sending cgroupstats command");
goto err;
}
}
if (!maskset && !tid && !containerset) {
usage();
goto err;
}
do {
rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
PRINTF("received %d bytes\n", rep_len);
if (rep_len < 0) {
fprintf(stderr, "nonfatal reply error: errno %d\n",
errno);
continue;
}
if (msg.n.nlmsg_type == NLMSG_ERROR ||
!NLMSG_OK((&msg.n), rep_len)) {
struct nlmsgerr *err = NLMSG_DATA(&msg);
fprintf(stderr, "fatal reply error, errno %d\n",
err->error);
goto done;
}
PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
rep_len = GENLMSG_PAYLOAD(&msg.n);
na = (struct nlattr *) GENLMSG_DATA(&msg);
len = 0;
while (len < rep_len) {
len += NLA_ALIGN(na->nla_len);
switch (na->nla_type) {
case TASKSTATS_TYPE_AGGR_TGID:
/* Fall through */
case TASKSTATS_TYPE_AGGR_PID:
aggr_len = NLA_PAYLOAD(na->nla_len);
len2 = 0;
/* For nested attributes, na follows */
na = (struct nlattr *) NLA_DATA(na);
while (len2 < aggr_len) {
switch (na->nla_type) {
case TASKSTATS_TYPE_PID:
rtid = *(int *) NLA_DATA(na);
if (print_delays)
printf("PID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_TGID:
rtid = *(int *) NLA_DATA(na);
if (print_delays)
printf("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
if (print_delays)
print_delayacct((struct taskstats *) NLA_DATA(na));
if (print_io_accounting)
print_ioacct((struct taskstats *) NLA_DATA(na));
if (print_task_context_switch_counts)
task_context_switch_counts((struct taskstats *) NLA_DATA(na));
if (fd) {
if (write(fd, NLA_DATA(na), na->nla_len) < 0) {
err(1,"write error\n");
}
}
if (!loop)
goto done;
break;
case TASKSTATS_TYPE_NULL:
break;
default:
fprintf(stderr, "Unknown nested"
" nla_type %d\n",
na->nla_type);
break;
}
len2 += NLA_ALIGN(na->nla_len);
na = (struct nlattr *)((char *)na +
NLA_ALIGN(na->nla_len));
}
break;
case CGROUPSTATS_TYPE_CGROUP_STATS:
print_cgroupstats(NLA_DATA(na));
break;
default:
fprintf(stderr, "Unknown nla_type %d\n",
na->nla_type);
case TASKSTATS_TYPE_NULL:
break;
}
na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
}
} while (loop);
done:
if (maskset) {
rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
&cpumask, strlen(cpumask) + 1);
printf("Sent deregister mask, retval %d\n", rc);
if (rc < 0)
err(rc, "error sending deregister cpumask\n");
}
err:
close(nl_sd);
if (fd)
close(fd);
if (cfd)
close(cfd);
return 0;
}
| linux-master | tools/accounting/getdelays.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ec_access.c
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Author:
* Thomas Renninger <[email protected]>
*/
#include <fcntl.h>
#include <err.h>
#include <stdio.h>
#include <stdlib.h>
#include <libgen.h>
#include <unistd.h>
#include <getopt.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/stat.h>
#define EC_SPACE_SIZE 256
#define SYSFS_PATH "/sys/kernel/debug/ec/ec0/io"
/* TBD/Enhancements:
- Provide param for accessing different ECs (not supported by kernel yet)
*/
static int read_mode = -1;
static int sleep_time;
static int write_byte_offset = -1;
static int read_byte_offset = -1;
static uint8_t write_value = -1;
void usage(char progname[], int exit_status)
{
printf("Usage:\n");
printf("1) %s -r [-s sleep]\n", basename(progname));
printf("2) %s -b byte_offset\n", basename(progname));
printf("3) %s -w byte_offset -v value\n\n", basename(progname));
puts("\t-r [-s sleep] : Dump EC registers");
puts("\t If sleep is given, sleep x seconds,");
puts("\t re-read EC registers and show changes");
puts("\t-b offset : Read value at byte_offset (in hex)");
puts("\t-w offset -v value : Write value at byte_offset");
puts("\t-h : Print this help\n\n");
puts("Offsets and values are in hexadecimal number system.");
puts("The offset and value must be between 0 and 0xff.");
exit(exit_status);
}
void parse_opts(int argc, char *argv[])
{
int c;
while ((c = getopt(argc, argv, "rs:b:w:v:h")) != -1) {
switch (c) {
case 'r':
if (read_mode != -1)
usage(argv[0], EXIT_FAILURE);
read_mode = 1;
break;
case 's':
if (read_mode != -1 && read_mode != 1)
usage(argv[0], EXIT_FAILURE);
sleep_time = atoi(optarg);
if (sleep_time <= 0) {
sleep_time = 0;
usage(argv[0], EXIT_FAILURE);
printf("Bad sleep time: %s\n", optarg);
}
break;
case 'b':
if (read_mode != -1)
usage(argv[0], EXIT_FAILURE);
read_mode = 1;
read_byte_offset = strtoul(optarg, NULL, 16);
break;
case 'w':
if (read_mode != -1)
usage(argv[0], EXIT_FAILURE);
read_mode = 0;
write_byte_offset = strtoul(optarg, NULL, 16);
break;
case 'v':
write_value = strtoul(optarg, NULL, 16);
break;
case 'h':
usage(argv[0], EXIT_SUCCESS);
default:
fprintf(stderr, "Unknown option!\n");
usage(argv[0], EXIT_FAILURE);
}
}
if (read_mode == 0) {
if (write_byte_offset < 0 ||
write_byte_offset >= EC_SPACE_SIZE) {
fprintf(stderr, "Wrong byte offset 0x%.2x, valid: "
"[0-0x%.2x]\n",
write_byte_offset, EC_SPACE_SIZE - 1);
usage(argv[0], EXIT_FAILURE);
}
if (write_value < 0 ||
write_value >= 255) {
fprintf(stderr, "Wrong byte offset 0x%.2x, valid:"
"[0-0xff]\n", write_byte_offset);
usage(argv[0], EXIT_FAILURE);
}
}
if (read_mode == 1 && read_byte_offset != -1) {
if (read_byte_offset < -1 ||
read_byte_offset >= EC_SPACE_SIZE) {
fprintf(stderr, "Wrong byte offset 0x%.2x, valid: "
"[0-0x%.2x]\n",
read_byte_offset, EC_SPACE_SIZE - 1);
usage(argv[0], EXIT_FAILURE);
}
}
/* Add additional parameter checks here */
}
void dump_ec(int fd)
{
char buf[EC_SPACE_SIZE];
char buf2[EC_SPACE_SIZE];
int byte_off, bytes_read;
bytes_read = read(fd, buf, EC_SPACE_SIZE);
if (bytes_read == -1)
err(EXIT_FAILURE, "Could not read from %s\n", SYSFS_PATH);
if (bytes_read != EC_SPACE_SIZE)
fprintf(stderr, "Could only read %d bytes\n", bytes_read);
printf(" 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F");
for (byte_off = 0; byte_off < bytes_read; byte_off++) {
if ((byte_off % 16) == 0)
printf("\n%.2X: ", byte_off);
printf(" %.2x ", (uint8_t)buf[byte_off]);
}
printf("\n");
if (!sleep_time)
return;
printf("\n");
lseek(fd, 0, SEEK_SET);
sleep(sleep_time);
bytes_read = read(fd, buf2, EC_SPACE_SIZE);
if (bytes_read == -1)
err(EXIT_FAILURE, "Could not read from %s\n", SYSFS_PATH);
if (bytes_read != EC_SPACE_SIZE)
fprintf(stderr, "Could only read %d bytes\n", bytes_read);
printf(" 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F");
for (byte_off = 0; byte_off < bytes_read; byte_off++) {
if ((byte_off % 16) == 0)
printf("\n%.2X: ", byte_off);
if (buf[byte_off] == buf2[byte_off])
printf(" %.2x ", (uint8_t)buf2[byte_off]);
else
printf("*%.2x ", (uint8_t)buf2[byte_off]);
}
printf("\n");
}
void read_ec_val(int fd, int byte_offset)
{
uint8_t buf;
int error;
error = lseek(fd, byte_offset, SEEK_SET);
if (error != byte_offset)
err(EXIT_FAILURE, "Cannot set offset to 0x%.2x", byte_offset);
error = read(fd, &buf, 1);
if (error != 1)
err(EXIT_FAILURE, "Could not read byte 0x%.2x from %s\n",
byte_offset, SYSFS_PATH);
printf("0x%.2x\n", buf);
return;
}
void write_ec_val(int fd, int byte_offset, uint8_t value)
{
int error;
error = lseek(fd, byte_offset, SEEK_SET);
if (error != byte_offset)
err(EXIT_FAILURE, "Cannot set offset to 0x%.2x", byte_offset);
error = write(fd, &value, 1);
if (error != 1)
err(EXIT_FAILURE, "Cannot write value 0x%.2x to offset 0x%.2x",
value, byte_offset);
}
int main(int argc, char *argv[])
{
int file_mode = O_RDONLY;
int fd;
parse_opts(argc, argv);
if (read_mode == 0)
file_mode = O_WRONLY;
else if (read_mode == 1)
file_mode = O_RDONLY;
else
usage(argv[0], EXIT_FAILURE);
fd = open(SYSFS_PATH, file_mode);
if (fd == -1)
err(EXIT_FAILURE, "%s", SYSFS_PATH);
if (read_mode)
if (read_byte_offset == -1)
dump_ec(fd);
else if (read_byte_offset < 0 ||
read_byte_offset >= EC_SPACE_SIZE)
usage(argv[0], EXIT_FAILURE);
else
read_ec_val(fd, read_byte_offset);
else
write_ec_val(fd, write_byte_offset, write_value);
close(fd);
exit(EXIT_SUCCESS);
}
| linux-master | tools/power/acpi/tools/ec/ec_access.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: apmain - Main module for the acpidump utility
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#define _DECLARE_GLOBALS
#include "acpidump.h"
/*
* acpidump - A portable utility for obtaining system ACPI tables and dumping
* them in an ASCII hex format suitable for binary extraction via acpixtract.
*
* Obtaining the system ACPI tables is an OS-specific operation.
*
* This utility can be ported to any host operating system by providing a
* module containing system-specific versions of these interfaces:
*
* acpi_os_get_table_by_address
* acpi_os_get_table_by_index
* acpi_os_get_table_by_name
*
* See the ACPICA Reference Guide for the exact definitions of these
* interfaces. Also, see these ACPICA source code modules for example
* implementations:
*
* source/os_specific/service_layers/oswintbl.c
* source/os_specific/service_layers/oslinuxtbl.c
*/
/* Local prototypes */
static void ap_display_usage(void);
static int ap_do_options(int argc, char **argv);
static int ap_insert_action(char *argument, u32 to_be_done);
/* Table for deferred actions from command line options */
struct ap_dump_action action_table[AP_MAX_ACTIONS];
u32 current_action = 0;
#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility"
#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:sv^xz"
/******************************************************************************
*
* FUNCTION: ap_display_usage
*
* DESCRIPTION: Usage message for the acpi_dump utility
*
******************************************************************************/
static void ap_display_usage(void)
{
ACPI_USAGE_HEADER("acpidump [options]");
ACPI_OPTION("-b", "Dump tables to binary files");
ACPI_OPTION("-h -?", "This help message");
ACPI_OPTION("-o <File>", "Redirect output to file");
ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
ACPI_OPTION("-s", "Print table summaries only");
ACPI_OPTION("-v", "Display version information");
ACPI_OPTION("-vd", "Display build date and time");
ACPI_OPTION("-z", "Verbose mode");
ACPI_USAGE_TEXT("\nTable Options:\n");
ACPI_OPTION("-a <Address>", "Get table via a physical address");
ACPI_OPTION("-c <on|off>", "Turning on/off customized table dumping");
ACPI_OPTION("-f <BinaryFile>", "Get table via a binary file");
ACPI_OPTION("-n <Signature>", "Get table via a name/signature");
ACPI_OPTION("-x", "Do not use but dump XSDT");
ACPI_OPTION("-x -x", "Do not use or dump XSDT");
ACPI_USAGE_TEXT("\n"
"Invocation without parameters dumps all available tables\n"
"Multiple mixed instances of -a, -f, and -n are supported\n\n");
}
/******************************************************************************
*
* FUNCTION: ap_insert_action
*
* PARAMETERS: argument - Pointer to the argument for this action
* to_be_done - What to do to process this action
*
* RETURN: Status
*
* DESCRIPTION: Add an action item to the action table
*
******************************************************************************/
static int ap_insert_action(char *argument, u32 to_be_done)
{
/* Insert action and check for table overflow */
action_table[current_action].argument = argument;
action_table[current_action].to_be_done = to_be_done;
current_action++;
if (current_action > AP_MAX_ACTIONS) {
fprintf(stderr, "Too many table options (max %d)\n",
AP_MAX_ACTIONS);
return (-1);
}
return (0);
}
/******************************************************************************
*
* FUNCTION: ap_do_options
*
* PARAMETERS: argc/argv - Standard argc/argv
*
* RETURN: Status
*
* DESCRIPTION: Command line option processing. The main actions for getting
* and dumping tables are deferred via the action table.
*
*****************************************************************************/
static int ap_do_options(int argc, char **argv)
{
int j;
acpi_status status;
/* Command line options */
while ((j =
acpi_getopt(argc, argv, AP_SUPPORTED_OPTIONS)) != ACPI_OPT_END)
switch (j) {
/*
* Global options
*/
case 'b': /* Dump all input tables to binary files */
gbl_binary_mode = TRUE;
continue;
case 'c': /* Dump customized tables */
if (!strcmp(acpi_gbl_optarg, "on")) {
gbl_dump_customized_tables = TRUE;
} else if (!strcmp(acpi_gbl_optarg, "off")) {
gbl_dump_customized_tables = FALSE;
} else {
fprintf(stderr,
"%s: Cannot handle this switch, please use on|off\n",
acpi_gbl_optarg);
return (-1);
}
continue;
case 'h':
case '?':
ap_display_usage();
return (1);
case 'o': /* Redirect output to a single file */
if (ap_open_output_file(acpi_gbl_optarg)) {
return (-1);
}
continue;
case 'r': /* Dump tables from specified RSDP */
status =
acpi_ut_strtoul64(acpi_gbl_optarg, &gbl_rsdp_base);
if (ACPI_FAILURE(status)) {
fprintf(stderr,
"%s: Could not convert to a physical address\n",
acpi_gbl_optarg);
return (-1);
}
continue;
case 's': /* Print table summaries only */
gbl_summary_mode = TRUE;
continue;
case 'x': /* Do not use XSDT */
if (!acpi_gbl_do_not_use_xsdt) {
acpi_gbl_do_not_use_xsdt = TRUE;
} else {
gbl_do_not_dump_xsdt = TRUE;
}
continue;
case 'v': /* -v: (Version): signon already emitted, just exit */
switch (acpi_gbl_optarg[0]) {
case '^': /* -v: (Version) */
fprintf(stderr,
ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
return (1);
case 'd':
fprintf(stderr,
ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
printf(ACPI_COMMON_BUILD_TIME);
return (1);
default:
printf("Unknown option: -v%s\n",
acpi_gbl_optarg);
return (-1);
}
break;
case 'z': /* Verbose mode */
gbl_verbose_mode = TRUE;
fprintf(stderr, ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
continue;
/*
* Table options
*/
case 'a': /* Get table by physical address */
if (ap_insert_action
(acpi_gbl_optarg, AP_DUMP_TABLE_BY_ADDRESS)) {
return (-1);
}
break;
case 'f': /* Get table from a file */
if (ap_insert_action
(acpi_gbl_optarg, AP_DUMP_TABLE_BY_FILE)) {
return (-1);
}
break;
case 'n': /* Get table by input name (signature) */
if (ap_insert_action
(acpi_gbl_optarg, AP_DUMP_TABLE_BY_NAME)) {
return (-1);
}
break;
default:
ap_display_usage();
return (-1);
}
/* If there are no actions, this means "get/dump all tables" */
if (current_action == 0) {
if (ap_insert_action(NULL, AP_DUMP_ALL_TABLES)) {
return (-1);
}
}
return (0);
}
/******************************************************************************
*
* FUNCTION: main
*
* PARAMETERS: argc/argv - Standard argc/argv
*
* RETURN: Status
*
* DESCRIPTION: C main function for acpidump utility
*
******************************************************************************/
#if !defined(_GNU_EFI) && !defined(_EDK2_EFI)
int ACPI_SYSTEM_XFACE main(int argc, char *argv[])
#else
int ACPI_SYSTEM_XFACE acpi_main(int argc, char *argv[])
#endif
{
int status = 0;
struct ap_dump_action *action;
u32 file_size;
u32 i;
ACPI_DEBUG_INITIALIZE(); /* For debug version only */
acpi_os_initialize();
gbl_output_file = ACPI_FILE_OUT;
acpi_gbl_integer_byte_width = 8;
/* Process command line options */
status = ap_do_options(argc, argv);
if (status > 0) {
return (0);
}
if (status < 0) {
return (status);
}
/* Get/dump ACPI table(s) as requested */
for (i = 0; i < current_action; i++) {
action = &action_table[i];
switch (action->to_be_done) {
case AP_DUMP_ALL_TABLES:
status = ap_dump_all_tables();
break;
case AP_DUMP_TABLE_BY_ADDRESS:
status = ap_dump_table_by_address(action->argument);
break;
case AP_DUMP_TABLE_BY_NAME:
status = ap_dump_table_by_name(action->argument);
break;
case AP_DUMP_TABLE_BY_FILE:
status = ap_dump_table_from_file(action->argument);
break;
default:
fprintf(stderr,
"Internal error, invalid action: 0x%X\n",
action->to_be_done);
return (-1);
}
if (status) {
return (status);
}
}
if (gbl_output_filename) {
if (gbl_verbose_mode) {
/* Summary for the output file */
file_size = cm_get_file_size(gbl_output_file);
fprintf(stderr,
"Output file %s contains 0x%X (%u) bytes\n\n",
gbl_output_filename, file_size, file_size);
}
fclose(gbl_output_file);
}
return (status);
}
| linux-master | tools/power/acpi/tools/acpidump/apmain.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include "acpidump.h"
/* Local prototypes */
static int
ap_dump_table_buffer(struct acpi_table_header *table,
u32 instance, acpi_physical_address address);
/******************************************************************************
*
* FUNCTION: ap_is_valid_header
*
* PARAMETERS: table - Pointer to table to be validated
*
* RETURN: TRUE if the header appears to be valid. FALSE otherwise
*
* DESCRIPTION: Check for a valid ACPI table header
*
******************************************************************************/
u8 ap_is_valid_header(struct acpi_table_header *table)
{
if (!ACPI_VALIDATE_RSDP_SIG(table->signature)) {
/* Make sure signature is all ASCII and a valid ACPI name */
if (!acpi_ut_valid_nameseg(table->signature)) {
fprintf(stderr,
"Table signature (0x%8.8X) is invalid\n",
*(u32 *)table->signature);
return (FALSE);
}
/* Check for minimum table length */
if (table->length < sizeof(struct acpi_table_header)) {
fprintf(stderr, "Table length (0x%8.8X) is invalid\n",
table->length);
return (FALSE);
}
}
return (TRUE);
}
/******************************************************************************
*
* FUNCTION: ap_is_valid_checksum
*
* PARAMETERS: table - Pointer to table to be validated
*
* RETURN: TRUE if the checksum appears to be valid. FALSE otherwise.
*
* DESCRIPTION: Check for a valid ACPI table checksum.
*
******************************************************************************/
u8 ap_is_valid_checksum(struct acpi_table_header *table)
{
acpi_status status;
struct acpi_table_rsdp *rsdp;
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
/*
* Checksum for RSDP.
* Note: Other checksums are computed during the table dump.
*/
rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table);
status = acpi_tb_validate_rsdp(rsdp);
} else {
/* We don't have to check for a CDAT here, since CDAT is not in the RSDT/XSDT */
status = acpi_ut_verify_checksum(table, table->length);
}
if (ACPI_FAILURE(status)) {
fprintf(stderr, "%4.4s: Warning: wrong checksum in table\n",
table->signature);
}
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: ap_get_table_length
*
* PARAMETERS: table - Pointer to the table
*
* RETURN: Table length
*
* DESCRIPTION: Obtain table length according to table signature.
*
******************************************************************************/
u32 ap_get_table_length(struct acpi_table_header *table)
{
struct acpi_table_rsdp *rsdp;
/* Check if table is valid */
if (!ap_is_valid_header(table)) {
return (0);
}
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table);
return (acpi_tb_get_rsdp_length(rsdp));
}
/* Normal ACPI table */
return (table->length);
}
/******************************************************************************
*
* FUNCTION: ap_dump_table_buffer
*
* PARAMETERS: table - ACPI table to be dumped
* instance - ACPI table instance no. to be dumped
* address - Physical address of the table
*
* RETURN: None
*
* DESCRIPTION: Dump an ACPI table in standard ASCII hex format, with a
* header that is compatible with the acpi_xtract utility.
*
******************************************************************************/
static int
ap_dump_table_buffer(struct acpi_table_header *table,
u32 instance, acpi_physical_address address)
{
u32 table_length;
table_length = ap_get_table_length(table);
/* Print only the header if requested */
if (gbl_summary_mode) {
acpi_tb_print_table_header(address, table);
return (0);
}
/* Dump to binary file if requested */
if (gbl_binary_mode) {
return (ap_write_to_binary_file(table, instance));
}
/*
* Dump the table with header for use with acpixtract utility.
* Note: simplest to just always emit a 64-bit address. acpi_xtract
* utility can handle this.
*/
fprintf(gbl_output_file, "%4.4s @ 0x%8.8X%8.8X\n",
table->signature, ACPI_FORMAT_UINT64(address));
acpi_ut_dump_buffer_to_file(gbl_output_file,
ACPI_CAST_PTR(u8, table), table_length,
DB_BYTE_DISPLAY, 0);
fprintf(gbl_output_file, "\n");
return (0);
}
/******************************************************************************
*
* FUNCTION: ap_dump_all_tables
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Get all tables from the RSDT/XSDT (or at least all of the
* tables that we can possibly get).
*
******************************************************************************/
int ap_dump_all_tables(void)
{
struct acpi_table_header *table;
u32 instance = 0;
acpi_physical_address address;
acpi_status status;
int table_status;
u32 i;
/* Get and dump all available ACPI tables */
for (i = 0; i < AP_MAX_ACPI_FILES; i++) {
status =
acpi_os_get_table_by_index(i, &table, &instance, &address);
if (ACPI_FAILURE(status)) {
/* AE_LIMIT means that no more tables are available */
if (status == AE_LIMIT) {
return (0);
} else if (i == 0) {
fprintf(stderr,
"Could not get ACPI tables, %s\n",
acpi_format_exception(status));
return (-1);
} else {
fprintf(stderr,
"Could not get ACPI table at index %u, %s\n",
i, acpi_format_exception(status));
continue;
}
}
table_status = ap_dump_table_buffer(table, instance, address);
ACPI_FREE(table);
if (table_status) {
break;
}
}
/* Something seriously bad happened if the loop terminates here */
return (-1);
}
/******************************************************************************
*
* FUNCTION: ap_dump_table_by_address
*
* PARAMETERS: ascii_address - Address for requested ACPI table
*
* RETURN: Status
*
* DESCRIPTION: Get an ACPI table via a physical address and dump it.
*
******************************************************************************/
int ap_dump_table_by_address(char *ascii_address)
{
acpi_physical_address address;
struct acpi_table_header *table;
acpi_status status;
int table_status;
u64 long_address;
/* Convert argument to an integer physical address */
status = acpi_ut_strtoul64(ascii_address, &long_address);
if (ACPI_FAILURE(status)) {
fprintf(stderr, "%s: Could not convert to a physical address\n",
ascii_address);
return (-1);
}
address = (acpi_physical_address)long_address;
status = acpi_os_get_table_by_address(address, &table);
if (ACPI_FAILURE(status)) {
fprintf(stderr, "Could not get table at 0x%8.8X%8.8X, %s\n",
ACPI_FORMAT_UINT64(address),
acpi_format_exception(status));
return (-1);
}
table_status = ap_dump_table_buffer(table, 0, address);
ACPI_FREE(table);
return (table_status);
}
/******************************************************************************
*
* FUNCTION: ap_dump_table_by_name
*
* PARAMETERS: signature - Requested ACPI table signature
*
* RETURN: Status
*
* DESCRIPTION: Get an ACPI table via a signature and dump it. Handles
* multiple tables with the same signature (SSDTs).
*
******************************************************************************/
int ap_dump_table_by_name(char *signature)
{
char local_signature[ACPI_NAMESEG_SIZE + 1];
u32 instance;
struct acpi_table_header *table;
acpi_physical_address address;
acpi_status status;
int table_status;
if (strlen(signature) != ACPI_NAMESEG_SIZE) {
fprintf(stderr,
"Invalid table signature [%s]: must be exactly 4 characters\n",
signature);
return (-1);
}
/* Table signatures are expected to be uppercase */
strcpy(local_signature, signature);
acpi_ut_strupr(local_signature);
/* To be friendly, handle tables whose signatures do not match the name */
if (ACPI_COMPARE_NAMESEG(local_signature, "FADT")) {
strcpy(local_signature, ACPI_SIG_FADT);
} else if (ACPI_COMPARE_NAMESEG(local_signature, "MADT")) {
strcpy(local_signature, ACPI_SIG_MADT);
}
/* Dump all instances of this signature (to handle multiple SSDTs) */
for (instance = 0; instance < AP_MAX_ACPI_FILES; instance++) {
status = acpi_os_get_table_by_name(local_signature, instance,
&table, &address);
if (ACPI_FAILURE(status)) {
/* AE_LIMIT means that no more tables are available */
if (status == AE_LIMIT) {
return (0);
}
fprintf(stderr,
"Could not get ACPI table with signature [%s], %s\n",
local_signature, acpi_format_exception(status));
return (-1);
}
table_status = ap_dump_table_buffer(table, instance, address);
ACPI_FREE(table);
if (table_status) {
break;
}
}
/* Something seriously bad happened if the loop terminates here */
return (-1);
}
/******************************************************************************
*
* FUNCTION: ap_dump_table_from_file
*
* PARAMETERS: pathname - File containing the binary ACPI table
*
* RETURN: Status
*
* DESCRIPTION: Dump an ACPI table from a binary file
*
******************************************************************************/
int ap_dump_table_from_file(char *pathname)
{
struct acpi_table_header *table;
u32 file_size = 0;
int table_status = -1;
/* Get the entire ACPI table from the file */
table = ap_get_table_from_file(pathname, &file_size);
if (!table) {
return (-1);
}
if (!acpi_ut_valid_nameseg(table->signature)) {
fprintf(stderr,
"No valid ACPI signature was found in input file %s\n",
pathname);
}
/* File must be at least as long as the table length */
if (table->length > file_size) {
fprintf(stderr,
"Table length (0x%X) is too large for input file (0x%X) %s\n",
table->length, file_size, pathname);
goto exit;
}
if (gbl_verbose_mode) {
fprintf(stderr,
"Input file: %s contains table [%4.4s], 0x%X (%u) bytes\n",
pathname, table->signature, file_size, file_size);
}
table_status = ap_dump_table_buffer(table, 0, 0);
exit:
ACPI_FREE(table);
return (table_status);
}
| linux-master | tools/power/acpi/tools/acpidump/apdump.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: apfiles - File-related functions for acpidump utility
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include "acpidump.h"
/* Local prototypes */
static int ap_is_existing_file(char *pathname);
/******************************************************************************
*
* FUNCTION: ap_is_existing_file
*
* PARAMETERS: pathname - Output filename
*
* RETURN: 0 on success
*
* DESCRIPTION: Query for file overwrite if it already exists.
*
******************************************************************************/
static int ap_is_existing_file(char *pathname)
{
#if !defined(_GNU_EFI) && !defined(_EDK2_EFI)
struct stat stat_info;
int in_char;
if (!stat(pathname, &stat_info)) {
fprintf(stderr,
"Target path already exists, overwrite? [y|n] ");
in_char = fgetc(stdin);
if (in_char == '\n') {
in_char = fgetc(stdin);
}
if (in_char != 'y' && in_char != 'Y') {
return (-1);
}
}
#endif
return (0);
}
/******************************************************************************
*
* FUNCTION: ap_open_output_file
*
* PARAMETERS: pathname - Output filename
*
* RETURN: Open file handle
*
* DESCRIPTION: Open a text output file for acpidump. Checks if file already
* exists.
*
******************************************************************************/
int ap_open_output_file(char *pathname)
{
ACPI_FILE file;
/* If file exists, prompt for overwrite */
if (ap_is_existing_file(pathname) != 0) {
return (-1);
}
/* Point stdout to the file */
file = fopen(pathname, "w");
if (!file) {
fprintf(stderr, "Could not open output file: %s\n", pathname);
return (-1);
}
/* Save the file and path */
gbl_output_file = file;
gbl_output_filename = pathname;
return (0);
}
/******************************************************************************
*
* FUNCTION: ap_write_to_binary_file
*
* PARAMETERS: table - ACPI table to be written
* instance - ACPI table instance no. to be written
*
* RETURN: Status
*
* DESCRIPTION: Write an ACPI table to a binary file. Builds the output
* filename from the table signature.
*
******************************************************************************/
int ap_write_to_binary_file(struct acpi_table_header *table, u32 instance)
{
char filename[ACPI_NAMESEG_SIZE + 16];
char instance_str[16];
ACPI_FILE file;
acpi_size actual;
u32 table_length;
/* Obtain table length */
table_length = ap_get_table_length(table);
/* Construct lower-case filename from the table local signature */
if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
ACPI_COPY_NAMESEG(filename, ACPI_RSDP_NAME);
} else {
ACPI_COPY_NAMESEG(filename, table->signature);
}
filename[0] = (char)tolower((int)filename[0]);
filename[1] = (char)tolower((int)filename[1]);
filename[2] = (char)tolower((int)filename[2]);
filename[3] = (char)tolower((int)filename[3]);
filename[ACPI_NAMESEG_SIZE] = 0;
/* Handle multiple SSDts - create different filenames for each */
if (instance > 0) {
snprintf(instance_str, sizeof(instance_str), "%u", instance);
strcat(filename, instance_str);
}
strcat(filename, FILE_SUFFIX_BINARY_TABLE);
if (gbl_verbose_mode) {
fprintf(stderr,
"Writing [%4.4s] to binary file: %s 0x%X (%u) bytes\n",
table->signature, filename, table->length,
table->length);
}
/* Open the file and dump the entire table in binary mode */
file = fopen(filename, "wb");
if (!file) {
fprintf(stderr, "Could not open output file: %s\n", filename);
return (-1);
}
actual = fwrite(table, 1, table_length, file);
if (actual != table_length) {
fprintf(stderr, "Error writing binary output file: %s\n",
filename);
fclose(file);
return (-1);
}
fclose(file);
return (0);
}
/******************************************************************************
*
* FUNCTION: ap_get_table_from_file
*
* PARAMETERS: pathname - File containing the binary ACPI table
* out_file_size - Where the file size is returned
*
* RETURN: Buffer containing the ACPI table. NULL on error.
*
* DESCRIPTION: Open a file and read it entirely into a new buffer
*
******************************************************************************/
struct acpi_table_header *ap_get_table_from_file(char *pathname,
u32 *out_file_size)
{
struct acpi_table_header *buffer = NULL;
ACPI_FILE file;
u32 file_size;
acpi_size actual;
/* Must use binary mode */
file = fopen(pathname, "rb");
if (!file) {
fprintf(stderr, "Could not open input file: %s\n", pathname);
return (NULL);
}
/* Need file size to allocate a buffer */
file_size = cm_get_file_size(file);
if (file_size == ACPI_UINT32_MAX) {
fprintf(stderr,
"Could not get input file size: %s\n", pathname);
goto cleanup;
}
/* Allocate a buffer for the entire file */
buffer = ACPI_ALLOCATE_ZEROED(file_size);
if (!buffer) {
fprintf(stderr,
"Could not allocate file buffer of size: %u\n",
file_size);
goto cleanup;
}
/* Read the entire file */
actual = fread(buffer, 1, file_size, file);
if (actual != file_size) {
fprintf(stderr, "Could not read input file: %s\n", pathname);
ACPI_FREE(buffer);
buffer = NULL;
goto cleanup;
}
*out_file_size = file_size;
cleanup:
fclose(file);
return (buffer);
}
| linux-master | tools/power/acpi/tools/acpidump/apfiles.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Platform Firmware Runtime Update tool to do Management
* Mode code injection/driver update and telemetry retrieval.
*
* This tool uses the interfaces provided by pfr_update and
* pfr_telemetry drivers. These interfaces are exposed via
* /dev/pfr_update and /dev/pfr_telemetry. Write operation
* on the /dev/pfr_update is to load the EFI capsule into
* kernel space. Mmap/read operations on /dev/pfr_telemetry
* could be used to read the telemetry data to user space.
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <getopt.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <uuid/uuid.h>
#include PFRUT_HEADER
char *capsule_name;
int action, query_cap, log_type, log_level, log_read, log_getinfo,
revid, log_revid;
int set_log_level, set_log_type,
set_revid, set_log_revid;
char *progname;
#define LOG_ERR 0
#define LOG_WARN 1
#define LOG_INFO 2
#define LOG_VERB 4
#define LOG_EXEC_IDX 0
#define LOG_HISTORY_IDX 1
#define REVID_1 1
#define REVID_2 2
static int valid_log_level(int level)
{
return level == LOG_ERR || level == LOG_WARN ||
level == LOG_INFO || level == LOG_VERB;
}
static int valid_log_type(int type)
{
return type == LOG_EXEC_IDX || type == LOG_HISTORY_IDX;
}
static inline int valid_log_revid(int id)
{
return id == REVID_1 || id == REVID_2;
}
static void help(void)
{
fprintf(stderr,
"usage: %s [OPTIONS]\n"
" code injection:\n"
" -l, --load\n"
" -s, --stage\n"
" -a, --activate\n"
" -u, --update [stage and activate]\n"
" -q, --query\n"
" -d, --revid update\n"
" telemetry:\n"
" -G, --getloginfo\n"
" -T, --type(0:execution, 1:history)\n"
" -L, --level(0, 1, 2, 4)\n"
" -R, --read\n"
" -D, --revid log\n",
progname);
}
char *option_string = "l:sauqd:GT:L:RD:h";
static struct option long_options[] = {
{"load", required_argument, 0, 'l'},
{"stage", no_argument, 0, 's'},
{"activate", no_argument, 0, 'a'},
{"update", no_argument, 0, 'u'},
{"query", no_argument, 0, 'q'},
{"getloginfo", no_argument, 0, 'G'},
{"type", required_argument, 0, 'T'},
{"level", required_argument, 0, 'L'},
{"read", no_argument, 0, 'R'},
{"setrev", required_argument, 0, 'd'},
{"setrevlog", required_argument, 0, 'D'},
{"help", no_argument, 0, 'h'},
{}
};
static void parse_options(int argc, char **argv)
{
int option_index = 0;
char *pathname, *endptr;
int opt;
pathname = strdup(argv[0]);
progname = basename(pathname);
while ((opt = getopt_long_only(argc, argv, option_string,
long_options, &option_index)) != -1) {
switch (opt) {
case 'l':
capsule_name = optarg;
break;
case 's':
action = 1;
break;
case 'a':
action = 2;
break;
case 'u':
action = 3;
break;
case 'q':
query_cap = 1;
break;
case 'G':
log_getinfo = 1;
break;
case 'T':
log_type = strtol(optarg, &endptr, 0);
if (*endptr || (log_type != 0 && log_type != 1)) {
printf("Number expected: type(0:execution, 1:history) - Quit.\n");
exit(1);
}
set_log_type = 1;
break;
case 'L':
log_level = strtol(optarg, &endptr, 0);
if (*endptr ||
(log_level != 0 && log_level != 1 &&
log_level != 2 && log_level != 4)) {
printf("Number expected: level(0, 1, 2, 4) - Quit.\n");
exit(1);
}
set_log_level = 1;
break;
case 'R':
log_read = 1;
break;
case 'd':
revid = atoi(optarg);
set_revid = 1;
break;
case 'D':
log_revid = atoi(optarg);
set_log_revid = 1;
break;
case 'h':
help();
exit(0);
default:
break;
}
}
}
void print_cap(struct pfru_update_cap_info *cap)
{
char *uuid;
uuid = malloc(37);
if (!uuid) {
perror("Can not allocate uuid buffer\n");
exit(1);
}
uuid_unparse(cap->code_type, uuid);
printf("code injection image type:%s\n", uuid);
printf("fw_version:%d\n", cap->fw_version);
printf("code_rt_version:%d\n", cap->code_rt_version);
uuid_unparse(cap->drv_type, uuid);
printf("driver update image type:%s\n", uuid);
printf("drv_rt_version:%d\n", cap->drv_rt_version);
printf("drv_svn:%d\n", cap->drv_svn);
uuid_unparse(cap->platform_id, uuid);
printf("platform id:%s\n", uuid);
uuid_unparse(cap->oem_id, uuid);
printf("oem id:%s\n", uuid);
printf("oem information length:%d\n", cap->oem_info_len);
free(uuid);
}
int main(int argc, char *argv[])
{
int fd_update, fd_update_log, fd_capsule;
struct pfrt_log_data_info data_info;
struct pfrt_log_info info;
struct pfru_update_cap_info cap;
void *addr_map_capsule;
struct stat st;
char *log_buf;
int ret;
if (getuid() != 0) {
printf("Please run the tool as root - Exiting.\n");
return 1;
}
parse_options(argc, argv);
fd_update = open("/dev/acpi_pfr_update0", O_RDWR);
if (fd_update < 0) {
printf("PFRU device not supported - Quit...\n");
return 1;
}
fd_update_log = open("/dev/acpi_pfr_telemetry0", O_RDWR);
if (fd_update_log < 0) {
printf("PFRT device not supported - Quit...\n");
return 1;
}
if (query_cap) {
ret = ioctl(fd_update, PFRU_IOC_QUERY_CAP, &cap);
if (ret)
perror("Query Update Capability info failed.");
else
print_cap(&cap);
close(fd_update);
close(fd_update_log);
return ret;
}
if (log_getinfo) {
ret = ioctl(fd_update_log, PFRT_LOG_IOC_GET_DATA_INFO, &data_info);
if (ret) {
perror("Get telemetry data info failed.");
close(fd_update);
close(fd_update_log);
return 1;
}
ret = ioctl(fd_update_log, PFRT_LOG_IOC_GET_INFO, &info);
if (ret) {
perror("Get telemetry info failed.");
close(fd_update);
close(fd_update_log);
return 1;
}
printf("log_level:%d\n", info.log_level);
printf("log_type:%d\n", info.log_type);
printf("log_revid:%d\n", info.log_revid);
printf("max_data_size:%d\n", data_info.max_data_size);
printf("chunk1_size:%d\n", data_info.chunk1_size);
printf("chunk2_size:%d\n", data_info.chunk2_size);
printf("rollover_cnt:%d\n", data_info.rollover_cnt);
printf("reset_cnt:%d\n", data_info.reset_cnt);
return 0;
}
info.log_level = -1;
info.log_type = -1;
info.log_revid = -1;
if (set_log_level) {
if (!valid_log_level(log_level)) {
printf("Invalid log level %d\n",
log_level);
} else {
info.log_level = log_level;
}
}
if (set_log_type) {
if (!valid_log_type(log_type)) {
printf("Invalid log type %d\n",
log_type);
} else {
info.log_type = log_type;
}
}
if (set_log_revid) {
if (!valid_log_revid(log_revid)) {
printf("Invalid log revid %d, unchanged.\n",
log_revid);
} else {
info.log_revid = log_revid;
}
}
ret = ioctl(fd_update_log, PFRT_LOG_IOC_SET_INFO, &info);
if (ret) {
perror("Log information set failed.(log_level, log_type, log_revid)");
close(fd_update);
close(fd_update_log);
return 1;
}
if (set_revid) {
ret = ioctl(fd_update, PFRU_IOC_SET_REV, &revid);
if (ret) {
perror("pfru update revid set failed");
close(fd_update);
close(fd_update_log);
return 1;
}
printf("pfru update revid set to %d\n", revid);
}
if (capsule_name) {
fd_capsule = open(capsule_name, O_RDONLY);
if (fd_capsule < 0) {
perror("Can not open capsule file...");
close(fd_update);
close(fd_update_log);
return 1;
}
if (fstat(fd_capsule, &st) < 0) {
perror("Can not fstat capsule file...");
close(fd_capsule);
close(fd_update);
close(fd_update_log);
return 1;
}
addr_map_capsule = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED,
fd_capsule, 0);
if (addr_map_capsule == MAP_FAILED) {
perror("Failed to mmap capsule file.");
close(fd_capsule);
close(fd_update);
close(fd_update_log);
return 1;
}
ret = write(fd_update, (char *)addr_map_capsule, st.st_size);
printf("Load %d bytes of capsule file into the system\n",
ret);
if (ret == -1) {
perror("Failed to load capsule file");
close(fd_capsule);
close(fd_update);
close(fd_update_log);
return 1;
}
munmap(addr_map_capsule, st.st_size);
close(fd_capsule);
printf("Load done.\n");
}
if (action) {
if (action == 1) {
ret = ioctl(fd_update, PFRU_IOC_STAGE, NULL);
} else if (action == 2) {
ret = ioctl(fd_update, PFRU_IOC_ACTIVATE, NULL);
} else if (action == 3) {
ret = ioctl(fd_update, PFRU_IOC_STAGE_ACTIVATE, NULL);
} else {
close(fd_update);
close(fd_update_log);
return 1;
}
printf("Update finished, return %d\n", ret);
}
close(fd_update);
if (log_read) {
void *p_mmap;
int max_data_sz;
ret = ioctl(fd_update_log, PFRT_LOG_IOC_GET_DATA_INFO, &data_info);
if (ret) {
perror("Get telemetry data info failed.");
close(fd_update_log);
return 1;
}
max_data_sz = data_info.max_data_size;
if (!max_data_sz) {
printf("No telemetry data available.\n");
close(fd_update_log);
return 1;
}
log_buf = malloc(max_data_sz + 1);
if (!log_buf) {
perror("log_buf allocate failed.");
close(fd_update_log);
return 1;
}
p_mmap = mmap(NULL, max_data_sz, PROT_READ, MAP_SHARED, fd_update_log, 0);
if (p_mmap == MAP_FAILED) {
perror("mmap error.");
close(fd_update_log);
return 1;
}
memcpy(log_buf, p_mmap, max_data_sz);
log_buf[max_data_sz] = '\0';
printf("%s\n", log_buf);
free(log_buf);
munmap(p_mmap, max_data_sz);
}
close(fd_update_log);
return 0;
}
| linux-master | tools/power/acpi/tools/pfrut/pfrut.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ACPI AML interfacing userspace utility
*
* Copyright (C) 2015, Intel Corporation
* Authors: Lv Zheng <[email protected]>
*/
#include <acpi/acpi.h>
/* Headers not included by include/acpi/platform/aclinux.h */
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <error.h>
#include <stdbool.h>
#include <fcntl.h>
#include <assert.h>
#include <sys/select.h>
#include "../../../../../include/linux/circ_buf.h"
#define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg"
#define ACPI_AML_SEC_TICK 1
#define ACPI_AML_USEC_PEEK 200
#define ACPI_AML_BUF_SIZE 4096
#define ACPI_AML_BATCH_WRITE_CMD 0x00 /* Write command to kernel */
#define ACPI_AML_BATCH_READ_LOG 0x01 /* Read log from kernel */
#define ACPI_AML_BATCH_WRITE_LOG 0x02 /* Write log to console */
#define ACPI_AML_LOG_START 0x00
#define ACPI_AML_PROMPT_START 0x01
#define ACPI_AML_PROMPT_STOP 0x02
#define ACPI_AML_LOG_STOP 0x03
#define ACPI_AML_PROMPT_ROLL 0x04
#define ACPI_AML_INTERACTIVE 0x00
#define ACPI_AML_BATCH 0x01
#define circ_count(circ) \
(CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_count_to_end(circ) \
(CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_space(circ) \
(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define circ_space_to_end(circ) \
(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
#define acpi_aml_cmd_count() circ_count(&acpi_aml_cmd_crc)
#define acpi_aml_log_count() circ_count(&acpi_aml_log_crc)
#define acpi_aml_cmd_space() circ_space(&acpi_aml_cmd_crc)
#define acpi_aml_log_space() circ_space(&acpi_aml_log_crc)
#define ACPI_AML_DO(_fd, _op, _buf, _ret) \
do { \
_ret = acpi_aml_##_op(_fd, &acpi_aml_##_buf##_crc); \
if (_ret == 0) { \
fprintf(stderr, \
"%s %s pipe closed.\n", #_buf, #_op); \
return; \
} \
} while (0)
#define ACPI_AML_BATCH_DO(_fd, _op, _buf, _ret) \
do { \
_ret = acpi_aml_##_op##_batch_##_buf(_fd, \
&acpi_aml_##_buf##_crc); \
if (_ret == 0) \
return; \
} while (0)
static char acpi_aml_cmd_buf[ACPI_AML_BUF_SIZE];
static char acpi_aml_log_buf[ACPI_AML_BUF_SIZE];
static struct circ_buf acpi_aml_cmd_crc = {
.buf = acpi_aml_cmd_buf,
.head = 0,
.tail = 0,
};
static struct circ_buf acpi_aml_log_crc = {
.buf = acpi_aml_log_buf,
.head = 0,
.tail = 0,
};
static const char *acpi_aml_file_path = ACPI_AML_FILE;
static unsigned long acpi_aml_mode = ACPI_AML_INTERACTIVE;
static bool acpi_aml_exit;
static bool acpi_aml_batch_drain;
static unsigned long acpi_aml_batch_state;
static char acpi_aml_batch_prompt;
static char acpi_aml_batch_roll;
static unsigned long acpi_aml_log_state;
static char *acpi_aml_batch_cmd = NULL;
static char *acpi_aml_batch_pos = NULL;
static int acpi_aml_set_fl(int fd, int flags)
{
int ret;
ret = fcntl(fd, F_GETFL, 0);
if (ret < 0) {
perror("fcntl(F_GETFL)");
return ret;
}
flags |= ret;
ret = fcntl(fd, F_SETFL, flags);
if (ret < 0) {
perror("fcntl(F_SETFL)");
return ret;
}
return ret;
}
static int acpi_aml_set_fd(int fd, int maxfd, fd_set *set)
{
if (fd > maxfd)
maxfd = fd;
FD_SET(fd, set);
return maxfd;
}
static int acpi_aml_read(int fd, struct circ_buf *crc)
{
char *p;
int len;
p = &crc->buf[crc->head];
len = circ_space_to_end(crc);
len = read(fd, p, len);
if (len < 0)
perror("read");
else if (len > 0)
crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1);
return len;
}
static int acpi_aml_read_batch_cmd(int unused, struct circ_buf *crc)
{
char *p;
int len;
int remained = strlen(acpi_aml_batch_pos);
p = &crc->buf[crc->head];
len = circ_space_to_end(crc);
if (len > remained) {
memcpy(p, acpi_aml_batch_pos, remained);
acpi_aml_batch_pos += remained;
len = remained;
} else {
memcpy(p, acpi_aml_batch_pos, len);
acpi_aml_batch_pos += len;
}
if (len > 0)
crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1);
return len;
}
static int acpi_aml_read_batch_log(int fd, struct circ_buf *crc)
{
char *p;
int len;
int ret = 0;
p = &crc->buf[crc->head];
len = circ_space_to_end(crc);
while (ret < len && acpi_aml_log_state != ACPI_AML_LOG_STOP) {
if (acpi_aml_log_state == ACPI_AML_PROMPT_ROLL) {
*p = acpi_aml_batch_roll;
len = 1;
crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
ret += 1;
acpi_aml_log_state = ACPI_AML_LOG_START;
} else {
len = read(fd, p, 1);
if (len <= 0) {
if (len < 0)
perror("read");
ret = len;
break;
}
}
switch (acpi_aml_log_state) {
case ACPI_AML_LOG_START:
if (*p == '\n')
acpi_aml_log_state = ACPI_AML_PROMPT_START;
crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
ret += 1;
break;
case ACPI_AML_PROMPT_START:
if (*p == ACPI_DEBUGGER_COMMAND_PROMPT ||
*p == ACPI_DEBUGGER_EXECUTE_PROMPT) {
acpi_aml_batch_prompt = *p;
acpi_aml_log_state = ACPI_AML_PROMPT_STOP;
} else {
if (*p != '\n')
acpi_aml_log_state = ACPI_AML_LOG_START;
crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
ret += 1;
}
break;
case ACPI_AML_PROMPT_STOP:
if (*p == ' ') {
acpi_aml_log_state = ACPI_AML_LOG_STOP;
acpi_aml_exit = true;
} else {
/* Roll back */
acpi_aml_log_state = ACPI_AML_PROMPT_ROLL;
acpi_aml_batch_roll = *p;
*p = acpi_aml_batch_prompt;
crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1);
ret += 1;
}
break;
default:
assert(0);
break;
}
}
return ret;
}
static int acpi_aml_write(int fd, struct circ_buf *crc)
{
char *p;
int len;
p = &crc->buf[crc->tail];
len = circ_count_to_end(crc);
len = write(fd, p, len);
if (len < 0)
perror("write");
else if (len > 0)
crc->tail = (crc->tail + len) & (ACPI_AML_BUF_SIZE - 1);
return len;
}
static int acpi_aml_write_batch_log(int fd, struct circ_buf *crc)
{
char *p;
int len;
p = &crc->buf[crc->tail];
len = circ_count_to_end(crc);
if (!acpi_aml_batch_drain) {
len = write(fd, p, len);
if (len < 0)
perror("write");
}
if (len > 0)
crc->tail = (crc->tail + len) & (ACPI_AML_BUF_SIZE - 1);
return len;
}
static int acpi_aml_write_batch_cmd(int fd, struct circ_buf *crc)
{
int len;
len = acpi_aml_write(fd, crc);
if (circ_count_to_end(crc) == 0)
acpi_aml_batch_state = ACPI_AML_BATCH_READ_LOG;
return len;
}
static void acpi_aml_loop(int fd)
{
fd_set rfds;
fd_set wfds;
struct timeval tv;
int ret;
int maxfd = 0;
if (acpi_aml_mode == ACPI_AML_BATCH) {
acpi_aml_log_state = ACPI_AML_LOG_START;
acpi_aml_batch_pos = acpi_aml_batch_cmd;
if (acpi_aml_batch_drain)
acpi_aml_batch_state = ACPI_AML_BATCH_READ_LOG;
else
acpi_aml_batch_state = ACPI_AML_BATCH_WRITE_CMD;
}
acpi_aml_exit = false;
while (!acpi_aml_exit) {
tv.tv_sec = ACPI_AML_SEC_TICK;
tv.tv_usec = 0;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
if (acpi_aml_cmd_space()) {
if (acpi_aml_mode == ACPI_AML_INTERACTIVE)
maxfd = acpi_aml_set_fd(STDIN_FILENO, maxfd, &rfds);
else if (strlen(acpi_aml_batch_pos) &&
acpi_aml_batch_state == ACPI_AML_BATCH_WRITE_CMD)
ACPI_AML_BATCH_DO(STDIN_FILENO, read, cmd, ret);
}
if (acpi_aml_cmd_count() &&
(acpi_aml_mode == ACPI_AML_INTERACTIVE ||
acpi_aml_batch_state == ACPI_AML_BATCH_WRITE_CMD))
maxfd = acpi_aml_set_fd(fd, maxfd, &wfds);
if (acpi_aml_log_space() &&
(acpi_aml_mode == ACPI_AML_INTERACTIVE ||
acpi_aml_batch_state == ACPI_AML_BATCH_READ_LOG))
maxfd = acpi_aml_set_fd(fd, maxfd, &rfds);
if (acpi_aml_log_count())
maxfd = acpi_aml_set_fd(STDOUT_FILENO, maxfd, &wfds);
ret = select(maxfd+1, &rfds, &wfds, NULL, &tv);
if (ret < 0) {
perror("select");
break;
}
if (ret > 0) {
if (FD_ISSET(STDIN_FILENO, &rfds))
ACPI_AML_DO(STDIN_FILENO, read, cmd, ret);
if (FD_ISSET(fd, &wfds)) {
if (acpi_aml_mode == ACPI_AML_BATCH)
ACPI_AML_BATCH_DO(fd, write, cmd, ret);
else
ACPI_AML_DO(fd, write, cmd, ret);
}
if (FD_ISSET(fd, &rfds)) {
if (acpi_aml_mode == ACPI_AML_BATCH)
ACPI_AML_BATCH_DO(fd, read, log, ret);
else
ACPI_AML_DO(fd, read, log, ret);
}
if (FD_ISSET(STDOUT_FILENO, &wfds)) {
if (acpi_aml_mode == ACPI_AML_BATCH)
ACPI_AML_BATCH_DO(STDOUT_FILENO, write, log, ret);
else
ACPI_AML_DO(STDOUT_FILENO, write, log, ret);
}
}
}
}
static bool acpi_aml_readable(int fd)
{
fd_set rfds;
struct timeval tv;
int ret;
int maxfd = 0;
tv.tv_sec = 0;
tv.tv_usec = ACPI_AML_USEC_PEEK;
FD_ZERO(&rfds);
maxfd = acpi_aml_set_fd(fd, maxfd, &rfds);
ret = select(maxfd+1, &rfds, NULL, NULL, &tv);
if (ret < 0)
perror("select");
if (ret > 0 && FD_ISSET(fd, &rfds))
return true;
return false;
}
/*
* This is a userspace IO flush implementation, replying on the prompt
* characters and can be turned into a flush() call after kernel implements
* .flush() filesystem operation.
*/
static void acpi_aml_flush(int fd)
{
while (acpi_aml_readable(fd)) {
acpi_aml_batch_drain = true;
acpi_aml_loop(fd);
acpi_aml_batch_drain = false;
}
}
void usage(FILE *file, char *progname)
{
fprintf(file, "usage: %s [-b cmd] [-f file] [-h]\n", progname);
fprintf(file, "\nOptions:\n");
fprintf(file, " -b Specify command to be executed in batch mode\n");
fprintf(file, " -f Specify interface file other than");
fprintf(file, " /sys/kernel/debug/acpi/acpidbg\n");
fprintf(file, " -h Print this help message\n");
}
int main(int argc, char **argv)
{
int fd = -1;
int ch;
int len;
int ret = EXIT_SUCCESS;
while ((ch = getopt(argc, argv, "b:f:h")) != -1) {
switch (ch) {
case 'b':
if (acpi_aml_batch_cmd) {
fprintf(stderr, "Already specify %s\n",
acpi_aml_batch_cmd);
ret = EXIT_FAILURE;
goto exit;
}
len = strlen(optarg);
acpi_aml_batch_cmd = calloc(len + 2, 1);
if (!acpi_aml_batch_cmd) {
perror("calloc");
ret = EXIT_FAILURE;
goto exit;
}
memcpy(acpi_aml_batch_cmd, optarg, len);
acpi_aml_batch_cmd[len] = '\n';
acpi_aml_mode = ACPI_AML_BATCH;
break;
case 'f':
acpi_aml_file_path = optarg;
break;
case 'h':
usage(stdout, argv[0]);
goto exit;
break;
case '?':
default:
usage(stderr, argv[0]);
ret = EXIT_FAILURE;
goto exit;
break;
}
}
fd = open(acpi_aml_file_path, O_RDWR | O_NONBLOCK);
if (fd < 0) {
perror("open");
ret = EXIT_FAILURE;
goto exit;
}
acpi_aml_set_fl(STDIN_FILENO, O_NONBLOCK);
acpi_aml_set_fl(STDOUT_FILENO, O_NONBLOCK);
if (acpi_aml_mode == ACPI_AML_BATCH)
acpi_aml_flush(fd);
acpi_aml_loop(fd);
exit:
if (fd >= 0)
close(fd);
if (acpi_aml_batch_cmd)
free(acpi_aml_batch_cmd);
return ret;
}
| linux-master | tools/power/acpi/tools/acpidbg/acpidbg.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: cmfsize - Common get file size function
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acapps.h"
#define _COMPONENT ACPI_TOOLS
ACPI_MODULE_NAME("cmfsize")
/*******************************************************************************
*
* FUNCTION: cm_get_file_size
*
* PARAMETERS: file - Open file descriptor
*
* RETURN: File Size. On error, -1 (ACPI_UINT32_MAX)
*
* DESCRIPTION: Get the size of a file. Uses seek-to-EOF. File must be open.
* Does not disturb the current file pointer.
*
******************************************************************************/
u32 cm_get_file_size(ACPI_FILE file)
{
long file_size;
long current_offset;
acpi_status status;
/* Save the current file pointer, seek to EOF to obtain file size */
current_offset = ftell(file);
if (current_offset < 0) {
goto offset_error;
}
status = fseek(file, 0, SEEK_END);
if (ACPI_FAILURE(status)) {
goto seek_error;
}
file_size = ftell(file);
if (file_size < 0) {
goto offset_error;
}
/* Restore original file pointer */
status = fseek(file, current_offset, SEEK_SET);
if (ACPI_FAILURE(status)) {
goto seek_error;
}
return ((u32)file_size);
offset_error:
fprintf(stderr, "Could not get file offset\n");
return (ACPI_UINT32_MAX);
seek_error:
fprintf(stderr, "Could not set file offset\n");
return (ACPI_UINT32_MAX);
}
| linux-master | tools/power/acpi/common/cmfsize.c |
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: getopt
*
* Copyright (C) 2000 - 2023, Intel Corp.
*
*****************************************************************************/
/*
* ACPICA getopt() implementation
*
* Option strings:
* "f" - Option has no arguments
* "f:" - Option requires an argument
* "f+" - Option has an optional argument
* "f^" - Option has optional single-char sub-options
* "f|" - Option has required single-char sub-options
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acapps.h"
#define ACPI_OPTION_ERROR(msg, badchar) \
if (acpi_gbl_opterr) {fprintf (stderr, "%s%c\n", msg, badchar);}
int acpi_gbl_opterr = 1;
int acpi_gbl_optind = 1;
int acpi_gbl_sub_opt_char = 0;
char *acpi_gbl_optarg;
static int current_char_ptr = 1;
/*******************************************************************************
*
* FUNCTION: acpi_getopt_argument
*
* PARAMETERS: argc, argv - from main
*
* RETURN: 0 if an argument was found, -1 otherwise. Sets acpi_gbl_Optarg
* to point to the next argument.
*
* DESCRIPTION: Get the next argument. Used to obtain arguments for the
* two-character options after the original call to acpi_getopt.
* Note: Either the argument starts at the next character after
* the option, or it is pointed to by the next argv entry.
* (After call to acpi_getopt, we need to backup to the previous
* argv entry).
*
******************************************************************************/
int acpi_getopt_argument(int argc, char **argv)
{
acpi_gbl_optind--;
current_char_ptr++;
if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') {
acpi_gbl_optarg =
&argv[acpi_gbl_optind++][(int)(current_char_ptr + 1)];
} else if (++acpi_gbl_optind >= argc) {
ACPI_OPTION_ERROR("\nOption requires an argument", 0);
current_char_ptr = 1;
return (-1);
} else {
acpi_gbl_optarg = argv[acpi_gbl_optind++];
}
current_char_ptr = 1;
return (0);
}
/*******************************************************************************
*
* FUNCTION: acpi_getopt
*
* PARAMETERS: argc, argv - from main
* opts - options info list
*
* RETURN: Option character or ACPI_OPT_END
*
* DESCRIPTION: Get the next option
*
******************************************************************************/
int acpi_getopt(int argc, char **argv, char *opts)
{
int current_char;
char *opts_ptr;
if (current_char_ptr == 1) {
if (acpi_gbl_optind >= argc ||
argv[acpi_gbl_optind][0] != '-' ||
argv[acpi_gbl_optind][1] == '\0') {
return (ACPI_OPT_END);
} else if (strcmp(argv[acpi_gbl_optind], "--") == 0) {
acpi_gbl_optind++;
return (ACPI_OPT_END);
}
}
/* Get the option */
current_char = argv[acpi_gbl_optind][current_char_ptr];
/* Make sure that the option is legal */
if (current_char == ':' ||
(opts_ptr = strchr(opts, current_char)) == NULL) {
ACPI_OPTION_ERROR("Illegal option: -", current_char);
if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') {
acpi_gbl_optind++;
current_char_ptr = 1;
}
return ('?');
}
/* Option requires an argument? */
if (*++opts_ptr == ':') {
if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') {
acpi_gbl_optarg =
&argv[acpi_gbl_optind++][(int)
(current_char_ptr + 1)];
} else if (++acpi_gbl_optind >= argc) {
ACPI_OPTION_ERROR("Option requires an argument: -",
current_char);
current_char_ptr = 1;
return ('?');
} else {
acpi_gbl_optarg = argv[acpi_gbl_optind++];
}
current_char_ptr = 1;
}
/* Option has an optional argument? */
else if (*opts_ptr == '+') {
if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') {
acpi_gbl_optarg =
&argv[acpi_gbl_optind++][(int)
(current_char_ptr + 1)];
} else if (++acpi_gbl_optind >= argc) {
acpi_gbl_optarg = NULL;
} else {
acpi_gbl_optarg = argv[acpi_gbl_optind++];
}
current_char_ptr = 1;
}
/* Option has optional single-char arguments? */
else if (*opts_ptr == '^') {
if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') {
acpi_gbl_optarg =
&argv[acpi_gbl_optind][(int)(current_char_ptr + 1)];
} else {
acpi_gbl_optarg = "^";
}
acpi_gbl_sub_opt_char = acpi_gbl_optarg[0];
acpi_gbl_optind++;
current_char_ptr = 1;
}
/* Option has a required single-char argument? */
else if (*opts_ptr == '|') {
if (argv[acpi_gbl_optind][(int)(current_char_ptr + 1)] != '\0') {
acpi_gbl_optarg =
&argv[acpi_gbl_optind][(int)(current_char_ptr + 1)];
} else {
ACPI_OPTION_ERROR
("Option requires a single-character suboption: -",
current_char);
current_char_ptr = 1;
return ('?');
}
acpi_gbl_sub_opt_char = acpi_gbl_optarg[0];
acpi_gbl_optind++;
current_char_ptr = 1;
}
/* Option with no arguments */
else {
if (argv[acpi_gbl_optind][++current_char_ptr] == '\0') {
current_char_ptr = 1;
acpi_gbl_optind++;
}
acpi_gbl_optarg = NULL;
}
return (current_char);
}
| linux-master | tools/power/acpi/common/getopt.c |
Subsets and Splits